diff mbox

[RFC,V2,3/3] cpus: introduce async_run_safe_work_on_cpu.

Message ID 1436544486-31169-4-git-send-email-fred.konrad@greensocs.com
State New
Headers show

Commit Message

fred.konrad@greensocs.com July 10, 2015, 4:08 p.m. UTC
From: KONRAD Frederic <fred.konrad@greensocs.com>

We already had async_run_on_cpu but we need all VCPUs outside their execution
loop to execute some tb_flush/invalidate task:

async_run_on_cpu_safe schedule a work on a VCPU but the work start when no more
VCPUs are executing code.
When a safe work is pending cpu_has_work returns true, so cpu_exec returns and
the VCPUs can't enters execution loop. cpu_thread_is_idle returns false so at
the moment where all VCPUs are stop || stopped the safe work queue can be
flushed.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>

Changes V2 -> V3:
  * Unlock the mutex while executing the callback.
Changes V1 -> V2:
  * Move qemu_cpu_kick_thread to avoid prototype declaration.
  * Use the work_mutex lock to protect the queued_safe_work_* structures.
---
 cpu-exec.c        |   5 ++
 cpus.c            | 153 ++++++++++++++++++++++++++++++++++++++++--------------
 include/qom/cpu.h |  24 ++++++++-
 3 files changed, 141 insertions(+), 41 deletions(-)

Comments

Alex Bennée July 13, 2015, 4:20 p.m. UTC | #1
fred.konrad@greensocs.com writes:

> From: KONRAD Frederic <fred.konrad@greensocs.com>
>
> We already had async_run_on_cpu but we need all VCPUs outside their execution
> loop to execute some tb_flush/invalidate task:
>
> async_run_on_cpu_safe schedule a work on a VCPU but the work start when no more
> VCPUs are executing code.
> When a safe work is pending cpu_has_work returns true, so cpu_exec returns and
> the VCPUs can't enters execution loop. cpu_thread_is_idle returns false so at
> the moment where all VCPUs are stop || stopped the safe work queue can be
> flushed.
>
> Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
>
> Changes V2 -> V3:
>   * Unlock the mutex while executing the callback.
> Changes V1 -> V2:
>   * Move qemu_cpu_kick_thread to avoid prototype declaration.
>   * Use the work_mutex lock to protect the queued_safe_work_* structures.
> ---
>  cpu-exec.c        |   5 ++
>  cpus.c            | 153 ++++++++++++++++++++++++++++++++++++++++--------------
>  include/qom/cpu.h |  24 ++++++++-
>  3 files changed, 141 insertions(+), 41 deletions(-)
>
> diff --git a/cpu-exec.c b/cpu-exec.c
> index 2fdf89d..7581f76 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -363,6 +363,11 @@ int cpu_exec(CPUState *cpu)
>      /* This must be volatile so it is not trashed by longjmp() */
>      volatile bool have_tb_lock = false;
>  
> +    if (async_safe_work_pending()) {
> +        cpu->exit_request = 1;
> +        return 0;
> +    }
> +
>      if (cpu->halted) {
>          if (!cpu_has_work(cpu)) {
>              return EXCP_HALTED;
> diff --git a/cpus.c b/cpus.c
> index eabd4b1..4321380 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -76,7 +76,7 @@ bool cpu_is_stopped(CPUState *cpu)
>  
>  static bool cpu_thread_is_idle(CPUState *cpu)
>  {
> -    if (cpu->stop || cpu->queued_work_first) {
> +    if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) {
>          return false;
>      }
>      if (cpu_is_stopped(cpu)) {
> @@ -833,6 +833,45 @@ void qemu_init_cpu_loop(void)
>      qemu_thread_get_self(&io_thread);
>  }
>  
> +static void qemu_cpu_kick_thread(CPUState *cpu)
> +{
> +#ifndef _WIN32
> +    int err;
> +
> +    err = pthread_kill(cpu->thread->thread, SIG_IPI);
> +    if (err) {
> +        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
> +        exit(1);
> +    }
> +#else /* _WIN32 */
> +    if (!qemu_cpu_is_self(cpu)) {
> +        CONTEXT tcgContext;
> +
> +        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
> +            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> +                    GetLastError());
> +            exit(1);
> +        }
> +
> +        /* On multi-core systems, we are not sure that the thread is actually
> +         * suspended until we can get the context.
> +         */
> +        tcgContext.ContextFlags = CONTEXT_CONTROL;
> +        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
> +            continue;
> +        }
> +
> +        cpu_signal(0);
> +
> +        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
> +            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> +                    GetLastError());
> +            exit(1);
> +        }
> +    }
> +#endif

I'm going to go out on a limb and guess these sort of implementation
specifics should be in the posix/win32 utility files, that is unless
Glib abstracts enough of it for us.

> +}
> +
>  void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
>  {
>      struct qemu_work_item wi;
> @@ -894,6 +933,76 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
>      qemu_cpu_kick(cpu);
>  }
>  
> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
> +                                void *data)
> +{
> +    struct qemu_work_item *wi;
> +
> +    wi = g_malloc0(sizeof(struct qemu_work_item));
> +    wi->func = func;
> +    wi->data = data;

Is there anything that prevents the user calling the function with the
same payload for multiple CPUs?

> +    wi->free = true;
> +
> +    qemu_mutex_lock(&cpu->work_mutex);
> +    if (cpu->queued_safe_work_first == NULL) {
> +        cpu->queued_safe_work_first = wi;
> +    } else {
> +        cpu->queued_safe_work_last->next = wi;
> +    }
> +    cpu->queued_safe_work_last = wi;

I'm surprised we haven't added some helpers to the qemu_work_queue API
for all this identical boilerplate but whatever...


> +    wi->next = NULL;
> +    wi->done = false;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +
> +    CPU_FOREACH(cpu) {
> +        qemu_cpu_kick_thread(cpu);
> +    }
> +}
> +
> +static void flush_queued_safe_work(CPUState *cpu)
> +{
> +    struct qemu_work_item *wi;
> +    CPUState *other_cpu;
> +
> +    if (cpu->queued_safe_work_first == NULL) {
> +        return;
> +    }
> +
> +    CPU_FOREACH(other_cpu) {
> +        if (other_cpu->tcg_executing != 0) {
> +            return;
> +        }
> +    }
> +
> +    qemu_mutex_lock(&cpu->work_mutex);
> +    while ((wi = cpu->queued_safe_work_first)) {
> +        cpu->queued_safe_work_first = wi->next;
> +        qemu_mutex_unlock(&cpu->work_mutex);
> +        wi->func(wi->data);
> +        qemu_mutex_lock(&cpu->work_mutex);
> +        wi->done = true;
> +        if (wi->free) {
> +            g_free(wi);
> +        }
> +    }
> +    cpu->queued_safe_work_last = NULL;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +    qemu_cond_broadcast(&qemu_work_cond);
> +}
> +
> +/* FIXME: add a counter to avoid this CPU_FOREACH() */
> +bool async_safe_work_pending(void)
> +{
> +    CPUState *cpu;
> +
> +    CPU_FOREACH(cpu) {
> +        if (cpu->queued_safe_work_first) {
> +            return true;
> +        }
> +    }
> +    return false;
> +}
> +
>  static void flush_queued_work(CPUState *cpu)
>  {
>      struct qemu_work_item *wi;
> @@ -926,6 +1035,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
>          cpu->stopped = true;
>          qemu_cond_signal(&qemu_pause_cond);
>      }
> +    qemu_mutex_unlock_iothread();
> +    flush_queued_safe_work(cpu);
> +    qemu_mutex_lock_iothread();
>      flush_queued_work(cpu);
>      cpu->thread_kicked = false;
>  }
> @@ -1085,45 +1197,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>      return NULL;
>  }
>  
> -static void qemu_cpu_kick_thread(CPUState *cpu)
> -{
> -#ifndef _WIN32
> -    int err;
> -
> -    err = pthread_kill(cpu->thread->thread, SIG_IPI);
> -    if (err) {
> -        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
> -        exit(1);
> -    }
> -#else /* _WIN32 */
> -    if (!qemu_cpu_is_self(cpu)) {
> -        CONTEXT tcgContext;
> -
> -        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
> -            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> -                    GetLastError());
> -            exit(1);
> -        }
> -
> -        /* On multi-core systems, we are not sure that the thread is actually
> -         * suspended until we can get the context.
> -         */
> -        tcgContext.ContextFlags = CONTEXT_CONTROL;
> -        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
> -            continue;
> -        }
> -
> -        cpu_signal(0);
> -
> -        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
> -            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> -                    GetLastError());
> -            exit(1);
> -        }
> -    }
> -#endif
> -}
> -

Ahh here it is, could you not just have put a function declaration at
the top to avoid churning the code too much?

>  void qemu_cpu_kick(CPUState *cpu)
>  {
>      qemu_cond_broadcast(cpu->halt_cond);
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index a2de536..692d0d3 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -243,8 +243,9 @@ struct kvm_run;
>   * @mem_io_pc: Host Program Counter at which the memory was accessed.
>   * @mem_io_vaddr: Target virtual address at which the memory was accessed.
>   * @kvm_fd: vCPU file descriptor for KVM.
> - * @work_mutex: Lock to prevent multiple access to queued_work_*.
> + * @work_mutex: Lock to prevent multiple access to queued_* qemu_work_item.
>   * @queued_work_first: First asynchronous work pending.
> + * @queued_safe_work_first: First item of safe work pending.
>   *
>   * State of one CPU core or thread.
>   */
> @@ -267,6 +268,7 @@ struct CPUState {
>      struct QemuCond *halt_cond;
>      QemuMutex work_mutex;
>      struct qemu_work_item *queued_work_first, *queued_work_last;
> +    struct qemu_work_item *queued_safe_work_first, *queued_safe_work_last;
>      bool thread_kicked;
>      bool created;
>      bool stop;
> @@ -546,6 +548,26 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
>  void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
>  
>  /**
> + * async_run_safe_work_on_cpu:
> + * @cpu: The vCPU to run on.
> + * @func: The function to be executed.
> + * @data: Data to pass to the function.
> + *
> + * Schedules the function @func for execution on the vCPU @cpu asynchronously
> + * when all the VCPUs are outside their loop.
> + */
> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
> +                                void *data);

We should probably document the fact that the user should allocate a
data structure for each CPU they send async work to.

> +
> +/**
> + * async_safe_work_pending:
> + *
> + * Check whether any safe work is pending on any VCPUs.
> + * Returns: @true if a safe work is pending, @false otherwise.
> + */
> +bool async_safe_work_pending(void);
> +
> +/**
>   * qemu_get_cpu:
>   * @index: The CPUState@cpu_index value of the CPU to obtain.
>   *

Otherwise looks pretty reasonable to me.
Paolo Bonzini July 13, 2015, 10:56 p.m. UTC | #2
On 13/07/2015 18:20, Alex Bennée wrote:
>> +static void qemu_cpu_kick_thread(CPUState *cpu)
>> +{
>> +#ifndef _WIN32
>> +    int err;
>> +
>> +    err = pthread_kill(cpu->thread->thread, SIG_IPI);
>> +    if (err) {
>> +        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
>> +        exit(1);
>> +    }
>> +#else /* _WIN32 */
>> +    if (!qemu_cpu_is_self(cpu)) {
>> +        CONTEXT tcgContext;
>> +
>> +        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
>> +            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
>> +                    GetLastError());
>> +            exit(1);
>> +        }
>> +
>> +        /* On multi-core systems, we are not sure that the thread is actually
>> +         * suspended until we can get the context.
>> +         */
>> +        tcgContext.ContextFlags = CONTEXT_CONTROL;
>> +        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
>> +            continue;
>> +        }
>> +
>> +        cpu_signal(0);
>> +
>> +        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
>> +            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
>> +                    GetLastError());
>> +            exit(1);
>> +        }
>> +    }
>> +#endif
> 
> I'm going to go out on a limb and guess these sort of implementation
> specifics should be in the posix/win32 utility files, that is unless
> Glib abstracts enough of it for us.

As you found later, this part of the patch is just moving code around.
However, getting ultimately rid of this is basically the reason why I'm
interested in MTTCG. :)

>> +}
>> +
>>  void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
>>  {
>>      struct qemu_work_item wi;
>> @@ -894,6 +933,76 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
>>      qemu_cpu_kick(cpu);
>>  }
>>  
>> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
>> +                                void *data)
>> +{
>> +    struct qemu_work_item *wi;
>> +
>> +    wi = g_malloc0(sizeof(struct qemu_work_item));
>> +    wi->func = func;
>> +    wi->data = data;
> 
> Is there anything that prevents the user calling the function with the
> same payload for multiple CPUs?

Why?  The data could be read only, or could be known to outlive the CPUs.

>> +    wi->free = true;
>> +
>> +    qemu_mutex_lock(&cpu->work_mutex);
>> +    if (cpu->queued_safe_work_first == NULL) {
>> +        cpu->queued_safe_work_first = wi;
>> +    } else {
>> +        cpu->queued_safe_work_last->next = wi;
>> +    }
>> +    cpu->queued_safe_work_last = wi;
> 
> I'm surprised we haven't added some helpers to the qemu_work_queue API
> for all this identical boilerplate but whatever...

Yes, it should be using QSIMPLEQ.  Can be done later.

It is indeed reasonable, but it should be used with great care.

Paolo
diff mbox

Patch

diff --git a/cpu-exec.c b/cpu-exec.c
index 2fdf89d..7581f76 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -363,6 +363,11 @@  int cpu_exec(CPUState *cpu)
     /* This must be volatile so it is not trashed by longjmp() */
     volatile bool have_tb_lock = false;
 
+    if (async_safe_work_pending()) {
+        cpu->exit_request = 1;
+        return 0;
+    }
+
     if (cpu->halted) {
         if (!cpu_has_work(cpu)) {
             return EXCP_HALTED;
diff --git a/cpus.c b/cpus.c
index eabd4b1..4321380 100644
--- a/cpus.c
+++ b/cpus.c
@@ -76,7 +76,7 @@  bool cpu_is_stopped(CPUState *cpu)
 
 static bool cpu_thread_is_idle(CPUState *cpu)
 {
-    if (cpu->stop || cpu->queued_work_first) {
+    if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) {
         return false;
     }
     if (cpu_is_stopped(cpu)) {
@@ -833,6 +833,45 @@  void qemu_init_cpu_loop(void)
     qemu_thread_get_self(&io_thread);
 }
 
+static void qemu_cpu_kick_thread(CPUState *cpu)
+{
+#ifndef _WIN32
+    int err;
+
+    err = pthread_kill(cpu->thread->thread, SIG_IPI);
+    if (err) {
+        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
+        exit(1);
+    }
+#else /* _WIN32 */
+    if (!qemu_cpu_is_self(cpu)) {
+        CONTEXT tcgContext;
+
+        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
+            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
+                    GetLastError());
+            exit(1);
+        }
+
+        /* On multi-core systems, we are not sure that the thread is actually
+         * suspended until we can get the context.
+         */
+        tcgContext.ContextFlags = CONTEXT_CONTROL;
+        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
+            continue;
+        }
+
+        cpu_signal(0);
+
+        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
+            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
+                    GetLastError());
+            exit(1);
+        }
+    }
+#endif
+}
+
 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
 {
     struct qemu_work_item wi;
@@ -894,6 +933,76 @@  void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     qemu_cpu_kick(cpu);
 }
 
+void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
+                                void *data)
+{
+    struct qemu_work_item *wi;
+
+    wi = g_malloc0(sizeof(struct qemu_work_item));
+    wi->func = func;
+    wi->data = data;
+    wi->free = true;
+
+    qemu_mutex_lock(&cpu->work_mutex);
+    if (cpu->queued_safe_work_first == NULL) {
+        cpu->queued_safe_work_first = wi;
+    } else {
+        cpu->queued_safe_work_last->next = wi;
+    }
+    cpu->queued_safe_work_last = wi;
+    wi->next = NULL;
+    wi->done = false;
+    qemu_mutex_unlock(&cpu->work_mutex);
+
+    CPU_FOREACH(cpu) {
+        qemu_cpu_kick_thread(cpu);
+    }
+}
+
+static void flush_queued_safe_work(CPUState *cpu)
+{
+    struct qemu_work_item *wi;
+    CPUState *other_cpu;
+
+    if (cpu->queued_safe_work_first == NULL) {
+        return;
+    }
+
+    CPU_FOREACH(other_cpu) {
+        if (other_cpu->tcg_executing != 0) {
+            return;
+        }
+    }
+
+    qemu_mutex_lock(&cpu->work_mutex);
+    while ((wi = cpu->queued_safe_work_first)) {
+        cpu->queued_safe_work_first = wi->next;
+        qemu_mutex_unlock(&cpu->work_mutex);
+        wi->func(wi->data);
+        qemu_mutex_lock(&cpu->work_mutex);
+        wi->done = true;
+        if (wi->free) {
+            g_free(wi);
+        }
+    }
+    cpu->queued_safe_work_last = NULL;
+    qemu_mutex_unlock(&cpu->work_mutex);
+    qemu_cond_broadcast(&qemu_work_cond);
+}
+
+/* FIXME: add a counter to avoid this CPU_FOREACH() */
+bool async_safe_work_pending(void)
+{
+    CPUState *cpu;
+
+    CPU_FOREACH(cpu) {
+        if (cpu->queued_safe_work_first) {
+            return true;
+        }
+    }
+    return false;
+}
+
 static void flush_queued_work(CPUState *cpu)
 {
     struct qemu_work_item *wi;
@@ -926,6 +1035,9 @@  static void qemu_wait_io_event_common(CPUState *cpu)
         cpu->stopped = true;
         qemu_cond_signal(&qemu_pause_cond);
     }
+    qemu_mutex_unlock_iothread();
+    flush_queued_safe_work(cpu);
+    qemu_mutex_lock_iothread();
     flush_queued_work(cpu);
     cpu->thread_kicked = false;
 }
@@ -1085,45 +1197,6 @@  static void *qemu_tcg_cpu_thread_fn(void *arg)
     return NULL;
 }
 
-static void qemu_cpu_kick_thread(CPUState *cpu)
-{
-#ifndef _WIN32
-    int err;
-
-    err = pthread_kill(cpu->thread->thread, SIG_IPI);
-    if (err) {
-        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
-        exit(1);
-    }
-#else /* _WIN32 */
-    if (!qemu_cpu_is_self(cpu)) {
-        CONTEXT tcgContext;
-
-        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
-            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
-                    GetLastError());
-            exit(1);
-        }
-
-        /* On multi-core systems, we are not sure that the thread is actually
-         * suspended until we can get the context.
-         */
-        tcgContext.ContextFlags = CONTEXT_CONTROL;
-        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
-            continue;
-        }
-
-        cpu_signal(0);
-
-        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
-            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
-                    GetLastError());
-            exit(1);
-        }
-    }
-#endif
-}
-
 void qemu_cpu_kick(CPUState *cpu)
 {
     qemu_cond_broadcast(cpu->halt_cond);
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index a2de536..692d0d3 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -243,8 +243,9 @@  struct kvm_run;
  * @mem_io_pc: Host Program Counter at which the memory was accessed.
  * @mem_io_vaddr: Target virtual address at which the memory was accessed.
  * @kvm_fd: vCPU file descriptor for KVM.
- * @work_mutex: Lock to prevent multiple access to queued_work_*.
+ * @work_mutex: Lock to prevent multiple access to queued_* qemu_work_item.
  * @queued_work_first: First asynchronous work pending.
+ * @queued_safe_work_first: First item of safe work pending.
  *
  * State of one CPU core or thread.
  */
@@ -267,6 +268,7 @@  struct CPUState {
     struct QemuCond *halt_cond;
     QemuMutex work_mutex;
     struct qemu_work_item *queued_work_first, *queued_work_last;
+    struct qemu_work_item *queued_safe_work_first, *queued_safe_work_last;
     bool thread_kicked;
     bool created;
     bool stop;
@@ -546,6 +548,26 @@  void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 
 /**
+ * async_run_safe_work_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu asynchronously
+ * when all the VCPUs are outside their loop.
+ */
+void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
+                                void *data);
+
+/**
+ * async_safe_work_pending:
+ *
+ * Check whether any safe work is pending on any VCPUs.
+ * Returns: @true if a safe work is pending, @false otherwise.
+ */
+bool async_safe_work_pending(void);
+
+/**
  * qemu_get_cpu:
  * @index: The CPUState@cpu_index value of the CPU to obtain.
  *