diff mbox

[1/9] main-loop: use qemu_mutex_lock_iothread consistently

Message ID 1435825251-674-2-git-send-email-pbonzini@redhat.com
State New
Headers show

Commit Message

Paolo Bonzini July 2, 2015, 8:20 a.m. UTC
The next patch will require the BQL to be always taken with
qemu_mutex_lock_iothread(), while right now this isn't the case.

Outside TCG mode this is not a problem.  In TCG mode, we need to be
careful and avoid the "prod out of compiled code" step if already
in a VCPU thread.  This is easily done with a check on current_cpu,
i.e. qemu_in_vcpu_thread().

Hopefully, multithreaded TCG will get rid of the whole logic to kick
VCPUs whenever an I/O event occurs!

Cc: Frederic Konrad <fred.konrad@greensocs.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 cpus.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index 4f0e54d..c09fbef 100644
--- a/cpus.c
+++ b/cpus.c
@@ -954,7 +954,7 @@  static void *qemu_kvm_cpu_thread_fn(void *arg)
     CPUState *cpu = arg;
     int r;
 
-    qemu_mutex_lock(&qemu_global_mutex);
+    qemu_mutex_lock_iothread();
     qemu_thread_get_self(cpu->thread);
     cpu->thread_id = qemu_get_thread_id();
     cpu->can_do_io = 1;
@@ -1034,10 +1034,10 @@  static void *qemu_tcg_cpu_thread_fn(void *arg)
 {
     CPUState *cpu = arg;
 
+    qemu_mutex_lock_iothread();
     qemu_tcg_init_cpu_signals();
     qemu_thread_get_self(cpu->thread);
 
-    qemu_mutex_lock(&qemu_global_mutex);
     CPU_FOREACH(cpu) {
         cpu->thread_id = qemu_get_thread_id();
         cpu->created = true;
@@ -1149,7 +1149,11 @@  bool qemu_in_vcpu_thread(void)
 void qemu_mutex_lock_iothread(void)
 {
     atomic_inc(&iothread_requesting_mutex);
-    if (!tcg_enabled() || !first_cpu || !first_cpu->thread) {
+    /* In the simple case there is no need to bump the VCPU thread out of
+     * TCG code execution.
+     */
+    if (!tcg_enabled() || qemu_in_vcpu_thread() ||
+        !first_cpu || !first_cpu->thread) {
         qemu_mutex_lock(&qemu_global_mutex);
         atomic_dec(&iothread_requesting_mutex);
     } else {