diff mbox

[v2,04/11] qemu_flush_work for remote vcpu execution

Message ID 1259671897-22232-5-git-send-email-glommer@redhat.com
State New
Headers show

Commit Message

Glauber Costa Dec. 1, 2009, 12:51 p.m. UTC
This function is similar to qemu-kvm's on_vcpu mechanism. Totally synchronous,
and guarantees that a given function will be executed at the specified vcpu.

This patch also convert usage within the breakpoints system

Signed-off-by: Glauber Costa <glommer@redhat.com>
---
 cpu-all.h  |    3 ++
 cpu-defs.h |   14 +++++++++++++
 kvm-all.c  |   17 +--------------
 vl.c       |   61 +++++++++++++++++++++++++++++++++++++++++++++++++++++++----
 4 files changed, 75 insertions(+), 20 deletions(-)

Comments

Marcelo Tosatti Dec. 2, 2009, 1:27 p.m. UTC | #1
On Tue, Dec 01, 2009 at 10:51:30AM -0200, Glauber Costa wrote:
> This function is similar to qemu-kvm's on_vcpu mechanism. Totally synchronous,
> and guarantees that a given function will be executed at the specified vcpu.
> 
> This patch also convert usage within the breakpoints system
> 
> Signed-off-by: Glauber Costa <glommer@redhat.com>
> ---

> @@ -3436,8 +3441,7 @@ static int tcg_has_work(void);
>  
>  static pthread_key_t current_env;
>  
> -CPUState *qemu_get_current_env(void);
> -CPUState *qemu_get_current_env(void)
> +static CPUState *qemu_get_current_env(void)
>  {
>      return pthread_getspecific(current_env);
>  }
> @@ -3474,8 +3478,10 @@ static int qemu_init_main_loop(void)
>  
>  static void qemu_wait_io_event(CPUState *env)
>  {
> -    while (!tcg_has_work())
> +    while (!tcg_has_work()) {

This checks all cpus, while for KVM it should check only 
the current cpu.

> +        qemu_flush_work(env);
>          qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
> +    }

KVM vcpu threads should block SIGUSR1, set the in-kernel signal mask 
with KVM_SET_SIGNAL_MASK ioctl, and eat the signal in
qemu_wait_io_event (qemu_flush_work should run after eating
the signal). Similarly to qemu-kvm's kvm_main_loop_wait.

Otherwise a vcpu thread can lose a signal (say handle SIGUSR1 when not
holding qemu_global_mutex before kernel entry).

I think this the source of the problems patch 8 attempts to fix.
Glauber Costa Dec. 2, 2009, 1:41 p.m. UTC | #2
On Wed, Dec 02, 2009 at 11:27:53AM -0200, Marcelo Tosatti wrote:
> On Tue, Dec 01, 2009 at 10:51:30AM -0200, Glauber Costa wrote:
> > This function is similar to qemu-kvm's on_vcpu mechanism. Totally synchronous,
> > and guarantees that a given function will be executed at the specified vcpu.
> > 
> > This patch also convert usage within the breakpoints system
> > 
> > Signed-off-by: Glauber Costa <glommer@redhat.com>
> > ---
> 
> > @@ -3436,8 +3441,7 @@ static int tcg_has_work(void);
> >  
> >  static pthread_key_t current_env;
> >  
> > -CPUState *qemu_get_current_env(void);
> > -CPUState *qemu_get_current_env(void)
> > +static CPUState *qemu_get_current_env(void)
> >  {
> >      return pthread_getspecific(current_env);
> >  }
> > @@ -3474,8 +3478,10 @@ static int qemu_init_main_loop(void)
> >  
> >  static void qemu_wait_io_event(CPUState *env)
> >  {
> > -    while (!tcg_has_work())
> > +    while (!tcg_has_work()) {
> 
> This checks all cpus, while for KVM it should check only 
> the current cpu.
> 
> > +        qemu_flush_work(env);
> >          qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
> > +    }
> 
> KVM vcpu threads should block SIGUSR1, set the in-kernel signal mask 
> with KVM_SET_SIGNAL_MASK ioctl, and eat the signal in
> qemu_wait_io_event (qemu_flush_work should run after eating
> the signal). Similarly to qemu-kvm's kvm_main_loop_wait.
> 
> Otherwise a vcpu thread can lose a signal (say handle SIGUSR1 when not
> holding qemu_global_mutex before kernel entry).
> 
> I think this the source of the problems patch 8 attempts to fix.

I remember you had patches for both theses fixes.

Can you resend them, ontop of this series?
Marcelo Tosatti Dec. 2, 2009, 1:54 p.m. UTC | #3
On Wed, Dec 02, 2009 at 11:41:19AM -0200, Glauber Costa wrote:
> > KVM vcpu threads should block SIGUSR1, set the in-kernel signal mask 
> > with KVM_SET_SIGNAL_MASK ioctl, and eat the signal in
> > qemu_wait_io_event (qemu_flush_work should run after eating
> > the signal). Similarly to qemu-kvm's kvm_main_loop_wait.
> > 
> > Otherwise a vcpu thread can lose a signal (say handle SIGUSR1 when not
> > holding qemu_global_mutex before kernel entry).
> > 
> > I think this the source of the problems patch 8 attempts to fix.
> 
> I remember you had patches for both theses fixes.

Only qemu_wait_io_event split.

> Can you resend them, ontop of this series?

There is not much sense to split qemu_wait_io_event after what your
patch set does (it should be split before).
Glauber Costa Dec. 2, 2009, 2:01 p.m. UTC | #4
On Wed, Dec 02, 2009 at 11:54:45AM -0200, Marcelo Tosatti wrote:
> On Wed, Dec 02, 2009 at 11:41:19AM -0200, Glauber Costa wrote:
> > > KVM vcpu threads should block SIGUSR1, set the in-kernel signal mask 
> > > with KVM_SET_SIGNAL_MASK ioctl, and eat the signal in
> > > qemu_wait_io_event (qemu_flush_work should run after eating
> > > the signal). Similarly to qemu-kvm's kvm_main_loop_wait.
> > > 
> > > Otherwise a vcpu thread can lose a signal (say handle SIGUSR1 when not
> > > holding qemu_global_mutex before kernel entry).
> > > 
> > > I think this the source of the problems patch 8 attempts to fix.
> > 
> > I remember you had patches for both theses fixes.
> 
> Only qemu_wait_io_event split.
> 
> > Can you resend them, ontop of this series?
> 
> There is not much sense to split qemu_wait_io_event after what your
> patch set does (it should be split before).

What's the difference here?
diff mbox

Patch

diff --git a/cpu-all.h b/cpu-all.h
index e214374..8270d43 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -763,6 +763,9 @@  extern CPUState *cpu_single_env;
 extern int64_t qemu_icount;
 extern int use_icount;
 
+void qemu_queue_work(CPUState *env, void (*func)(void *data), void *data);
+void qemu_flush_work(CPUState *env);
+
 #define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
 #define CPU_INTERRUPT_TIMER  0x08 /* internal timer exception pending */
diff --git a/cpu-defs.h b/cpu-defs.h
index 95068b5..18792fc 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -31,6 +31,8 @@ 
 #include "qemu-queue.h"
 #include "targphys.h"
 
+#include "qemu-thread.h"
+
 #ifndef TARGET_LONG_BITS
 #error TARGET_LONG_BITS must be defined before including this header
 #endif
@@ -134,6 +136,13 @@  typedef struct CPUWatchpoint {
     QTAILQ_ENTRY(CPUWatchpoint) entry;
 } CPUWatchpoint;
 
+typedef struct QemuWorkItem {
+    void (*func)(void *data);
+    void *data;
+    int done;
+} QemuWorkItem;
+
+
 #define CPU_TEMP_BUF_NLONGS 128
 #define CPU_COMMON                                                      \
     struct TranslationBlock *current_tb; /* currently executing TB  */  \
@@ -175,6 +184,10 @@  typedef struct CPUWatchpoint {
     QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;            \
     CPUWatchpoint *watchpoint_hit;                                      \
                                                                         \
+    QemuWorkItem queued_work;                                           \
+    uint64_t queued_local, queued_total;                                \
+    struct QemuMutex queue_lock;                                        \
+                                                                        \
     struct GDBRegisterState *gdb_regs;                                  \
                                                                         \
     /* Core interrupt code */                                           \
@@ -194,6 +207,7 @@  typedef struct CPUWatchpoint {
     uint32_t created;                                                   \
     struct QemuThread *thread;                                          \
     struct QemuCond *halt_cond;                                         \
+    struct QemuCond work_cond;                                          \
     const char *cpu_model_str;                                          \
     struct KVMState *kvm_state;                                         \
     struct kvm_run *kvm_run;                                            \
diff --git a/kvm-all.c b/kvm-all.c
index 0c0b3c3..40203f0 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -636,7 +636,7 @@  int kvm_cpu_exec(CPUState *env)
     struct kvm_run *run = env->kvm_run;
     int ret;
 
-    dprintf("kvm_cpu_exec()\n");
+    dprintf("kvm_cpu_exec() %d\n", env->cpu_index);
 
     do {
         if (env->exit_request) {
@@ -951,19 +951,6 @@  void kvm_setup_guest_memory(void *start, size_t size)
 }
 
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
-{
-#ifdef CONFIG_IOTHREAD
-    if (env == cpu_single_env) {
-        func(data);
-        return;
-    }
-    abort();
-#else
-    func(data);
-#endif
-}
-
 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
                                                  target_ulong pc)
 {
@@ -1011,7 +998,7 @@  int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
     data.dbg.control |= reinject_trap;
     data.env = env;
 
-    on_vcpu(env, kvm_invoke_set_guest_debug, &data);
+    qemu_queue_work(env, kvm_invoke_set_guest_debug, &data);
     return data.err;
 }
 
diff --git a/vl.c b/vl.c
index 321b18d..c7b46a9 100644
--- a/vl.c
+++ b/vl.c
@@ -3403,6 +3403,11 @@  void qemu_notify_event(void)
     }
 }
 
+void qemu_queue_work(CPUState *env, void (*func)(void *data), void *data)
+{
+        func(data);
+}
+
 void qemu_mutex_lock_iothread(void) {}
 void qemu_mutex_unlock_iothread(void) {}
 
@@ -3436,8 +3441,7 @@  static int tcg_has_work(void);
 
 static pthread_key_t current_env;
 
-CPUState *qemu_get_current_env(void);
-CPUState *qemu_get_current_env(void)
+static CPUState *qemu_get_current_env(void)
 {
     return pthread_getspecific(current_env);
 }
@@ -3474,8 +3478,10 @@  static int qemu_init_main_loop(void)
 
 static void qemu_wait_io_event(CPUState *env)
 {
-    while (!tcg_has_work())
+    while (!tcg_has_work()) {
+        qemu_flush_work(env);
         qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
+    }
 
     qemu_mutex_unlock(&qemu_global_mutex);
 
@@ -3488,6 +3494,7 @@  static void qemu_wait_io_event(CPUState *env)
     qemu_mutex_unlock(&qemu_fair_mutex);
 
     qemu_mutex_lock(&qemu_global_mutex);
+
     if (env->stop) {
         env->stop = 0;
         env->stopped = 1;
@@ -3514,8 +3521,11 @@  static void *kvm_cpu_thread_fn(void *arg)
     qemu_cond_signal(&qemu_cpu_cond);
 
     /* and wait for machine initialization */
-    while (!qemu_system_ready)
+    while (!qemu_system_ready) {
+        /* system reset and initialization is a heavy caller of queue_work() */
+        qemu_flush_work(env);
         qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
+    }
 
     while (1) {
         if (cpu_can_run(env))
@@ -3542,8 +3552,9 @@  static void *tcg_cpu_thread_fn(void *arg)
     qemu_cond_signal(&qemu_cpu_cond);
 
     /* and wait for machine initialization */
-    while (!qemu_system_ready)
+    while (!qemu_system_ready) {
         qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
+    }
 
     while (1) {
         tcg_cpu_exec();
@@ -3561,6 +3572,44 @@  void qemu_cpu_kick(void *_env)
         qemu_thread_signal(env->thread, SIGUSR1);
 }
 
+void qemu_queue_work(CPUState *env, void (*func)(void *data), void *data)
+{
+    QemuWorkItem *wii;
+
+    env->queued_total++;
+
+    if (env == qemu_get_current_env()) {
+        env->queued_local++;
+        func(data);
+        return;
+    }
+
+    wii = &env->queued_work;
+    wii->func = func;
+    wii->data = data;
+    wii->done = 0;
+
+    qemu_thread_signal(env->thread, SIGUSR1);
+
+    while (!wii->done) {
+        qemu_cond_wait(&env->work_cond, &qemu_global_mutex);
+    }
+
+    wii->func = NULL;
+}
+
+void qemu_flush_work(CPUState *env)
+{
+    QemuWorkItem *wi;
+
+    wi = &env->queued_work;
+    if (wi->func) {
+        wi->func(wi->data);
+        wi->done = 1;
+    }
+    qemu_cond_broadcast(&env->work_cond);
+}
+
 int qemu_cpu_self(void *_env)
 {
     CPUState *env = _env;
@@ -3719,6 +3768,8 @@  void qemu_init_vcpu(void *_env)
 {
     CPUState *env = _env;
 
+    qemu_cond_init(&env->work_cond);
+
     if (kvm_enabled())
         kvm_start_vcpu(env);
     else