iothread: replace fair_mutex with a condition variable
This conveys the intention better, and scales to more than >1 threads contending the mutex with the iothread (as long as all of them have a "quiescent point" like the TCG thread has). Also, on Mac OS X the fair_mutex somehow didn't work as intended and deadlocked. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Tested-by: Alexander Graf <agraf@suse.de> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
		
							parent
							
								
									49e40b6627
								
							
						
					
					
						commit
						46daff13c8
					
				
							
								
								
									
										24
									
								
								cpus.c
								
								
								
								
							
							
						
						
									
										24
									
								
								cpus.c
								
								
								
								
							| 
						 | 
					@ -636,7 +636,8 @@ void vm_stop(int reason)
 | 
				
			||||||
#else /* CONFIG_IOTHREAD */
 | 
					#else /* CONFIG_IOTHREAD */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
QemuMutex qemu_global_mutex;
 | 
					QemuMutex qemu_global_mutex;
 | 
				
			||||||
static QemuMutex qemu_fair_mutex;
 | 
					static QemuCond qemu_io_proceeded_cond;
 | 
				
			||||||
 | 
					static bool iothread_requesting_mutex;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static QemuThread io_thread;
 | 
					static QemuThread io_thread;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -672,7 +673,7 @@ int qemu_init_main_loop(void)
 | 
				
			||||||
    qemu_cond_init(&qemu_system_cond);
 | 
					    qemu_cond_init(&qemu_system_cond);
 | 
				
			||||||
    qemu_cond_init(&qemu_pause_cond);
 | 
					    qemu_cond_init(&qemu_pause_cond);
 | 
				
			||||||
    qemu_cond_init(&qemu_work_cond);
 | 
					    qemu_cond_init(&qemu_work_cond);
 | 
				
			||||||
    qemu_mutex_init(&qemu_fair_mutex);
 | 
					    qemu_cond_init(&qemu_io_proceeded_cond);
 | 
				
			||||||
    qemu_mutex_init(&qemu_global_mutex);
 | 
					    qemu_mutex_init(&qemu_global_mutex);
 | 
				
			||||||
    qemu_mutex_lock(&qemu_global_mutex);
 | 
					    qemu_mutex_lock(&qemu_global_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -755,17 +756,9 @@ static void qemu_tcg_wait_io_event(void)
 | 
				
			||||||
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
 | 
					        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    qemu_mutex_unlock(&qemu_global_mutex);
 | 
					    while (iothread_requesting_mutex) {
 | 
				
			||||||
 | 
					        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
 | 
				
			||||||
    /*
 | 
					    }
 | 
				
			||||||
     * Users of qemu_global_mutex can be starved, having no chance
 | 
					 | 
				
			||||||
     * to acquire it since this path will get to it first.
 | 
					 | 
				
			||||||
     * So use another lock to provide fairness.
 | 
					 | 
				
			||||||
     */
 | 
					 | 
				
			||||||
    qemu_mutex_lock(&qemu_fair_mutex);
 | 
					 | 
				
			||||||
    qemu_mutex_unlock(&qemu_fair_mutex);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    qemu_mutex_lock(&qemu_global_mutex);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
					    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
				
			||||||
        qemu_wait_io_event_common(env);
 | 
					        qemu_wait_io_event_common(env);
 | 
				
			||||||
| 
						 | 
					@ -908,12 +901,13 @@ void qemu_mutex_lock_iothread(void)
 | 
				
			||||||
    if (kvm_enabled()) {
 | 
					    if (kvm_enabled()) {
 | 
				
			||||||
        qemu_mutex_lock(&qemu_global_mutex);
 | 
					        qemu_mutex_lock(&qemu_global_mutex);
 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        qemu_mutex_lock(&qemu_fair_mutex);
 | 
					        iothread_requesting_mutex = true;
 | 
				
			||||||
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
 | 
					        if (qemu_mutex_trylock(&qemu_global_mutex)) {
 | 
				
			||||||
            qemu_cpu_kick_thread(first_cpu);
 | 
					            qemu_cpu_kick_thread(first_cpu);
 | 
				
			||||||
            qemu_mutex_lock(&qemu_global_mutex);
 | 
					            qemu_mutex_lock(&qemu_global_mutex);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        qemu_mutex_unlock(&qemu_fair_mutex);
 | 
					        iothread_requesting_mutex = false;
 | 
				
			||||||
 | 
					        qemu_cond_broadcast(&qemu_io_proceeded_cond);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue