Patchwork [06/17] aio: introduce AioContext, move bottom halves there

login
register
mail settings
Submitter Paolo Bonzini
Date Sept. 25, 2012, 12:55 p.m.
Message ID <1348577763-12920-7-git-send-email-pbonzini@redhat.com>
Download mbox | patch
Permalink /patch/186814/
State New
Headers show

Comments

Paolo Bonzini - Sept. 25, 2012, 12:55 p.m.
Start introducing AioContext, which will let us remove globals from
aio.c/async.c, and introduce multiple I/O threads.

The bottom half functions now take an additional AioContext argument.
A bottom half is created with a specific AioContext that remains the
same throughout the lifetime.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 async.c               | 30 +++++++++----------
 hw/hw.h               |  1 +
 iohandler.c           |  1 +
 main-loop.c           | 18 +++++++++++-
 main-loop.h           | 54 ++---------------------------------
 qemu-aio.h            | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++-
 qemu-char.h           |  1 +
 qemu-common.h         |  1 +
 qemu-coroutine-lock.c |  2 +-
 9 file modificati, 118 inserzioni(+), 69 rimozioni(-)
Anthony Liguori - Sept. 25, 2012, 9:51 p.m.
Paolo Bonzini <pbonzini@redhat.com> writes:

> Start introducing AioContext, which will let us remove globals from
> aio.c/async.c, and introduce multiple I/O threads.
>
> The bottom half functions now take an additional AioContext argument.
> A bottom half is created with a specific AioContext that remains the
> same throughout the lifetime.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  async.c               | 30 +++++++++----------
>  hw/hw.h               |  1 +
>  iohandler.c           |  1 +
>  main-loop.c           | 18 +++++++++++-
>  main-loop.h           | 54 ++---------------------------------
>  qemu-aio.h            | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++-
>  qemu-char.h           |  1 +
>  qemu-common.h         |  1 +
>  qemu-coroutine-lock.c |  2 +-
>  9 file modificati, 118 inserzioni(+), 69 rimozioni(-)
>
> diff --git a/async.c b/async.c
> index 85cc641..189ee1b 100644
> --- a/async.c
> +++ b/async.c
> @@ -26,9 +26,6 @@
>  #include "qemu-aio.h"
>  #include "main-loop.h"
>  
> -/* Anchor of the list of Bottom Halves belonging to the context */
> -static struct QEMUBH *first_bh;
> -
>  /***********************************************************/
>  /* bottom halves (can be seen as timers which expire ASAP) */
>  
> @@ -41,27 +38,26 @@ struct QEMUBH {
>      bool deleted;
>  };
>  
> -QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
> +QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
>  {
>      QEMUBH *bh;
>      bh = g_malloc0(sizeof(QEMUBH));
>      bh->cb = cb;
>      bh->opaque = opaque;
> -    bh->next = first_bh;
> -    first_bh = bh;
> +    bh->next = ctx->first_bh;
> +    ctx->first_bh = bh;
>      return bh;
>  }
>  
> -int qemu_bh_poll(void)
> +int aio_bh_poll(AioContext *ctx)
>  {
>      QEMUBH *bh, **bhp, *next;
>      int ret;
> -    static int nesting = 0;
>  
> -    nesting++;
> +    ctx->walking_bh++;
>  
>      ret = 0;
> -    for (bh = first_bh; bh; bh = next) {
> +    for (bh = ctx->first_bh; bh; bh = next) {
>          next = bh->next;
>          if (!bh->deleted && bh->scheduled) {
>              bh->scheduled = 0;
> @@ -72,11 +68,11 @@ int qemu_bh_poll(void)
>          }
>      }
>  
> -    nesting--;
> +    ctx->walking_bh--;
>  
>      /* remove deleted bhs */
> -    if (!nesting) {
> -        bhp = &first_bh;
> +    if (!ctx->walking_bh) {
> +        bhp = &ctx->first_bh;
>          while (*bhp) {
>              bh = *bhp;
>              if (bh->deleted) {
> @@ -120,11 +116,11 @@ void qemu_bh_delete(QEMUBH *bh)
>      bh->deleted = 1;
>  }
>  
> -void qemu_bh_update_timeout(uint32_t *timeout)
> +void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout)
>  {
>      QEMUBH *bh;
>  
> -    for (bh = first_bh; bh; bh = bh->next) {
> +    for (bh = ctx->first_bh; bh; bh = bh->next) {
>          if (!bh->deleted && bh->scheduled) {
>              if (bh->idle) {
>                  /* idle bottom halves will be polled at least
> @@ -140,3 +136,7 @@ void qemu_bh_update_timeout(uint32_t *timeout)
>      }
>  }
>  
> +AioContext *aio_context_new(void)
> +{
> +    return g_new0(AioContext, 1);
> +}
> diff --git a/hw/hw.h b/hw/hw.h
> index e5cb9bf..acb718d 100644
> --- a/hw/hw.h
> +++ b/hw/hw.h
> @@ -10,6 +10,7 @@
>  
>  #include "ioport.h"
>  #include "irq.h"
> +#include "qemu-aio.h"
>  #include "qemu-file.h"
>  #include "vmstate.h"
>  
> diff --git a/iohandler.c b/iohandler.c
> index a2d871b..60460a6 100644
> --- a/iohandler.c
> +++ b/iohandler.c
> @@ -26,6 +26,7 @@
>  #include "qemu-common.h"
>  #include "qemu-char.h"
>  #include "qemu-queue.h"
> +#include "qemu-aio.h"
>  #include "main-loop.h"
>  
>  #ifndef _WIN32
> diff --git a/main-loop.c b/main-loop.c
> index eb3b6e6..f0bc515 100644
> --- a/main-loop.c
> +++ b/main-loop.c
> @@ -26,6 +26,7 @@
>  #include "qemu-timer.h"
>  #include "slirp/slirp.h"
>  #include "main-loop.h"
> +#include "qemu-aio.h"
>  
>  #ifndef _WIN32
>  
> @@ -199,6 +200,8 @@ static int qemu_signal_init(void)
>  }
>  #endif
>  
> +static AioContext *qemu_aio_context;
> +
>  int main_loop_init(void)
>  {
>      int ret;
> @@ -215,6 +218,7 @@ int main_loop_init(void)
>          return ret;
>      }
>  
> +    qemu_aio_context = aio_context_new();
>      return 0;
>  }
>  
> @@ -478,7 +482,7 @@ int main_loop_wait(int nonblocking)
>      if (nonblocking) {
>          timeout = 0;
>      } else {
> -        qemu_bh_update_timeout(&timeout);
> +        aio_bh_update_timeout(qemu_aio_context, &timeout);
>      }
>  
>      /* poll any events */
> @@ -507,3 +511,15 @@ int main_loop_wait(int nonblocking)
>  
>      return ret;
>  }
> +
> +/* Functions to operate on the main QEMU AioContext.  */
> +
> +QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
> +{
> +    return aio_bh_new(qemu_aio_context, cb, opaque);
> +}
> +
> +int qemu_bh_poll(void)
> +{
> +    return aio_bh_poll(qemu_aio_context);
> +}
> diff --git a/main-loop.h b/main-loop.h
> index dce1cd9..47644ce 100644
> --- a/main-loop.h
> +++ b/main-loop.h
> @@ -25,6 +25,8 @@
>  #ifndef QEMU_MAIN_LOOP_H
>  #define QEMU_MAIN_LOOP_H 1
>  
> +#include "qemu-aio.h"
> +
>  #define SIG_IPI SIGUSR1
>  
>  /**
> @@ -173,7 +175,6 @@ void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque);
>  
>  typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size);
>  typedef int IOCanReadHandler(void *opaque);
> -typedef void IOHandler(void *opaque);
>  
>  /**
>   * qemu_set_fd_handler2: Register a file descriptor with the main loop
> @@ -254,56 +255,6 @@ int qemu_set_fd_handler(int fd,
>                          IOHandler *fd_write,
>                          void *opaque);
>  
> -typedef struct QEMUBH QEMUBH;
> -typedef void QEMUBHFunc(void *opaque);
> -
> -/**
> - * qemu_bh_new: Allocate a new bottom half structure.
> - *
> - * Bottom halves are lightweight callbacks whose invocation is guaranteed
> - * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
> - * is opaque and must be allocated prior to its use.
> - */
> -QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
> -
> -/**
> - * qemu_bh_schedule: Schedule a bottom half.
> - *
> - * Scheduling a bottom half interrupts the main loop and causes the
> - * execution of the callback that was passed to qemu_bh_new.
> - *
> - * Bottom halves that are scheduled from a bottom half handler are instantly
> - * invoked.  This can create an infinite loop if a bottom half handler
> - * schedules itself.
> - *
> - * @bh: The bottom half to be scheduled.
> - */
> -void qemu_bh_schedule(QEMUBH *bh);
> -
> -/**
> - * qemu_bh_cancel: Cancel execution of a bottom half.
> - *
> - * Canceling execution of a bottom half undoes the effect of calls to
> - * qemu_bh_schedule without freeing its resources yet.  While cancellation
> - * itself is also wait-free and thread-safe, it can of course race with the
> - * loop that executes bottom halves unless you are holding the iothread
> - * mutex.  This makes it mostly useless if you are not holding the mutex.
> - *
> - * @bh: The bottom half to be canceled.
> - */
> -void qemu_bh_cancel(QEMUBH *bh);
> -
> -/**
> - *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
> - *
> - * Deleting a bottom half frees the memory that was allocated for it by
> - * qemu_bh_new.  It also implies canceling the bottom half if it was
> - * scheduled.
> - *
> - * @bh: The bottom half to be deleted.
> - */
> -void qemu_bh_delete(QEMUBH *bh);
> -
>  #ifdef CONFIG_POSIX
>  /**
>   * qemu_add_child_watch: Register a child process for reaping.
> @@ -359,6 +310,7 @@ void qemu_fd_register(int fd);
>  void qemu_iohandler_fill(int *pnfds, fd_set *readfds, fd_set *writefds, fd_set *xfds);
>  void qemu_iohandler_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds, int rc);
>  
> +QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
>  void qemu_bh_schedule_idle(QEMUBH *bh);
>  int qemu_bh_poll(void);
>  void qemu_bh_update_timeout(uint32_t *timeout);
> diff --git a/qemu-aio.h b/qemu-aio.h
> index dc416a5..2ed6ad3 100644
> --- a/qemu-aio.h
> +++ b/qemu-aio.h
> @@ -15,7 +15,6 @@
>  #define QEMU_AIO_H
>  
>  #include "qemu-common.h"
> -#include "qemu-char.h"
>  #include "event_notifier.h"
>  
>  typedef struct BlockDriverAIOCB BlockDriverAIOCB;
> @@ -39,9 +38,87 @@ void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
>                     BlockDriverCompletionFunc *cb, void *opaque);
>  void qemu_aio_release(void *p);
>  
> +typedef struct AioHandler AioHandler;
> +typedef void QEMUBHFunc(void *opaque);
> +typedef void IOHandler(void *opaque);
> +
> +typedef struct AioContext {
> +    /* Anchor of the list of Bottom Halves belonging to the context */
> +    struct QEMUBH *first_bh;
> +
> +    /* A simple lock used to protect the first_bh list, and ensure that
> +     * no callbacks are removed while we're walking and dispatching callbacks.
> +     */
> +    int walking_bh;
> +} AioContext;
> +
>  /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
>  typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
>  
> +/**
> + * aio_context_new: Allocate a new AioContext.
> + *
> + * AioContext provide a mini event-loop that can be waited on synchronously.
> + * They also provide bottom halves, a service to execute a piece of code
> + * as soon as possible.
> + */
> +AioContext *aio_context_new(void);
> +
> +/**
> + * aio_bh_new: Allocate a new bottom half structure.
> + *
> + * Bottom halves are lightweight callbacks whose invocation is guaranteed
> + * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
> + * is opaque and must be allocated prior to its use.
> + */
> +QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
> +
> +/**
> + * aio_bh_poll: Poll bottom halves for an AioContext.
> + *
> + * These are internal functions used by the QEMU main loop.
> + */
> +int aio_bh_poll(AioContext *ctx);
> +void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout);
> +
> +/**
> + * qemu_bh_schedule: Schedule a bottom half.
> + *
> + * Scheduling a bottom half interrupts the main loop and causes the
> + * execution of the callback that was passed to qemu_bh_new.
> + *
> + * Bottom halves that are scheduled from a bottom half handler are instantly
> + * invoked.  This can create an infinite loop if a bottom half handler
> + * schedules itself.
> + *
> + * @bh: The bottom half to be scheduled.
> + */
> +void qemu_bh_schedule(QEMUBH *bh);
> +
> +/**
> + * qemu_bh_cancel: Cancel execution of a bottom half.
> + *
> + * Canceling execution of a bottom half undoes the effect of calls to
> + * qemu_bh_schedule without freeing its resources yet.  While cancellation
> + * itself is also wait-free and thread-safe, it can of course race with the
> + * loop that executes bottom halves unless you are holding the iothread
> + * mutex.  This makes it mostly useless if you are not holding the mutex.
> + *
> + * @bh: The bottom half to be canceled.
> + */
> +void qemu_bh_cancel(QEMUBH *bh);
> +
> +/**
> + *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
> + *
> + * Deleting a bottom half frees the memory that was allocated for it by
> + * qemu_bh_new.  It also implies canceling the bottom half if it was
> + * scheduled.
> + *
> + * @bh: The bottom half to be deleted.
> + */
> +void qemu_bh_delete(QEMUBH *bh);
> +
>  /* Flush any pending AIO operation. This function will block until all
>   * outstanding AIO operations have been completed or cancelled. */
>  void qemu_aio_flush(void);
> diff --git a/qemu-char.h b/qemu-char.h
> index 486644b..5087168 100644
> --- a/qemu-char.h
> +++ b/qemu-char.h
> @@ -5,6 +5,7 @@
>  #include "qemu-queue.h"
>  #include "qemu-option.h"
>  #include "qemu-config.h"
> +#include "qemu-aio.h"
>  #include "qobject.h"
>  #include "qstring.h"
>  #include "main-loop.h"
> diff --git a/qemu-common.h b/qemu-common.h
> index e5c2bcd..ac44657 100644
> --- a/qemu-common.h
> +++ b/qemu-common.h
> @@ -14,6 +14,7 @@
>  
>  typedef struct QEMUTimer QEMUTimer;
>  typedef struct QEMUFile QEMUFile;
> +typedef struct QEMUBH QEMUBH;
>  typedef struct DeviceState DeviceState;
>  
>  struct Monitor;

Any reason to do this here vs. just #include "qemu-aio.h" in
qemu-common.h?

I don't see an obvious dependency on qemu-common.h in qemu-aio.h other
than this typedef.

Regards,

Anthony Liguori

> diff --git a/qemu-coroutine-lock.c b/qemu-coroutine-lock.c
> index 26ad76b..9dda3f8 100644
> --- a/qemu-coroutine-lock.c
> +++ b/qemu-coroutine-lock.c
> @@ -26,7 +26,7 @@
>  #include "qemu-coroutine.h"
>  #include "qemu-coroutine-int.h"
>  #include "qemu-queue.h"
> -#include "main-loop.h"
> +#include "qemu-aio.h"
>  #include "trace.h"
>  
>  static QTAILQ_HEAD(, Coroutine) unlock_bh_queue =
> -- 
> 1.7.12
Paolo Bonzini - Sept. 26, 2012, 6:30 a.m.
Il 25/09/2012 23:51, Anthony Liguori ha scritto:
>> >  typedef struct QEMUTimer QEMUTimer;
>> >  typedef struct QEMUFile QEMUFile;
>> > +typedef struct QEMUBH QEMUBH;
>> >  typedef struct DeviceState DeviceState;
>> >  
>> >  struct Monitor;
> Any reason to do this here vs. just #include "qemu-aio.h" in
> qemu-common.h?
> 
> I don't see an obvious dependency on qemu-common.h in qemu-aio.h other
> than this typedef.

I thought we were moving away from including everything in
qemu-common.h.  In fact, the only includes from QEMU in qemu-common.h are:

#ifdef _WIN32
#include "qemu-os-win32.h"
#endif

#ifdef CONFIG_POSIX
#include "qemu-os-posix.h"
#endif

#include "osdep.h"
#include "bswap.h"

#include "cpu.h"

where cpu.h could probably be removed---perhaps should.

Paolo

Patch

diff --git a/async.c b/async.c
index 85cc641..189ee1b 100644
--- a/async.c
+++ b/async.c
@@ -26,9 +26,6 @@ 
 #include "qemu-aio.h"
 #include "main-loop.h"
 
-/* Anchor of the list of Bottom Halves belonging to the context */
-static struct QEMUBH *first_bh;
-
 /***********************************************************/
 /* bottom halves (can be seen as timers which expire ASAP) */
 
@@ -41,27 +38,26 @@  struct QEMUBH {
     bool deleted;
 };
 
-QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
+QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
 {
     QEMUBH *bh;
     bh = g_malloc0(sizeof(QEMUBH));
     bh->cb = cb;
     bh->opaque = opaque;
-    bh->next = first_bh;
-    first_bh = bh;
+    bh->next = ctx->first_bh;
+    ctx->first_bh = bh;
     return bh;
 }
 
-int qemu_bh_poll(void)
+int aio_bh_poll(AioContext *ctx)
 {
     QEMUBH *bh, **bhp, *next;
     int ret;
-    static int nesting = 0;
 
-    nesting++;
+    ctx->walking_bh++;
 
     ret = 0;
-    for (bh = first_bh; bh; bh = next) {
+    for (bh = ctx->first_bh; bh; bh = next) {
         next = bh->next;
         if (!bh->deleted && bh->scheduled) {
             bh->scheduled = 0;
@@ -72,11 +68,11 @@  int qemu_bh_poll(void)
         }
     }
 
-    nesting--;
+    ctx->walking_bh--;
 
     /* remove deleted bhs */
-    if (!nesting) {
-        bhp = &first_bh;
+    if (!ctx->walking_bh) {
+        bhp = &ctx->first_bh;
         while (*bhp) {
             bh = *bhp;
             if (bh->deleted) {
@@ -120,11 +116,11 @@  void qemu_bh_delete(QEMUBH *bh)
     bh->deleted = 1;
 }
 
-void qemu_bh_update_timeout(uint32_t *timeout)
+void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout)
 {
     QEMUBH *bh;
 
-    for (bh = first_bh; bh; bh = bh->next) {
+    for (bh = ctx->first_bh; bh; bh = bh->next) {
         if (!bh->deleted && bh->scheduled) {
             if (bh->idle) {
                 /* idle bottom halves will be polled at least
@@ -140,3 +136,7 @@  void qemu_bh_update_timeout(uint32_t *timeout)
     }
 }
 
+AioContext *aio_context_new(void)
+{
+    return g_new0(AioContext, 1);
+}
diff --git a/hw/hw.h b/hw/hw.h
index e5cb9bf..acb718d 100644
--- a/hw/hw.h
+++ b/hw/hw.h
@@ -10,6 +10,7 @@ 
 
 #include "ioport.h"
 #include "irq.h"
+#include "qemu-aio.h"
 #include "qemu-file.h"
 #include "vmstate.h"
 
diff --git a/iohandler.c b/iohandler.c
index a2d871b..60460a6 100644
--- a/iohandler.c
+++ b/iohandler.c
@@ -26,6 +26,7 @@ 
 #include "qemu-common.h"
 #include "qemu-char.h"
 #include "qemu-queue.h"
+#include "qemu-aio.h"
 #include "main-loop.h"
 
 #ifndef _WIN32
diff --git a/main-loop.c b/main-loop.c
index eb3b6e6..f0bc515 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -26,6 +26,7 @@ 
 #include "qemu-timer.h"
 #include "slirp/slirp.h"
 #include "main-loop.h"
+#include "qemu-aio.h"
 
 #ifndef _WIN32
 
@@ -199,6 +200,8 @@  static int qemu_signal_init(void)
 }
 #endif
 
+static AioContext *qemu_aio_context;
+
 int main_loop_init(void)
 {
     int ret;
@@ -215,6 +218,7 @@  int main_loop_init(void)
         return ret;
     }
 
+    qemu_aio_context = aio_context_new();
     return 0;
 }
 
@@ -478,7 +482,7 @@  int main_loop_wait(int nonblocking)
     if (nonblocking) {
         timeout = 0;
     } else {
-        qemu_bh_update_timeout(&timeout);
+        aio_bh_update_timeout(qemu_aio_context, &timeout);
     }
 
     /* poll any events */
@@ -507,3 +511,15 @@  int main_loop_wait(int nonblocking)
 
     return ret;
 }
+
+/* Functions to operate on the main QEMU AioContext.  */
+
+QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
+{
+    return aio_bh_new(qemu_aio_context, cb, opaque);
+}
+
+int qemu_bh_poll(void)
+{
+    return aio_bh_poll(qemu_aio_context);
+}
diff --git a/main-loop.h b/main-loop.h
index dce1cd9..47644ce 100644
--- a/main-loop.h
+++ b/main-loop.h
@@ -25,6 +25,8 @@ 
 #ifndef QEMU_MAIN_LOOP_H
 #define QEMU_MAIN_LOOP_H 1
 
+#include "qemu-aio.h"
+
 #define SIG_IPI SIGUSR1
 
 /**
@@ -173,7 +175,6 @@  void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque);
 
 typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size);
 typedef int IOCanReadHandler(void *opaque);
-typedef void IOHandler(void *opaque);
 
 /**
  * qemu_set_fd_handler2: Register a file descriptor with the main loop
@@ -254,56 +255,6 @@  int qemu_set_fd_handler(int fd,
                         IOHandler *fd_write,
                         void *opaque);
 
-typedef struct QEMUBH QEMUBH;
-typedef void QEMUBHFunc(void *opaque);
-
-/**
- * qemu_bh_new: Allocate a new bottom half structure.
- *
- * Bottom halves are lightweight callbacks whose invocation is guaranteed
- * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
- * is opaque and must be allocated prior to its use.
- */
-QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
-
-/**
- * qemu_bh_schedule: Schedule a bottom half.
- *
- * Scheduling a bottom half interrupts the main loop and causes the
- * execution of the callback that was passed to qemu_bh_new.
- *
- * Bottom halves that are scheduled from a bottom half handler are instantly
- * invoked.  This can create an infinite loop if a bottom half handler
- * schedules itself.
- *
- * @bh: The bottom half to be scheduled.
- */
-void qemu_bh_schedule(QEMUBH *bh);
-
-/**
- * qemu_bh_cancel: Cancel execution of a bottom half.
- *
- * Canceling execution of a bottom half undoes the effect of calls to
- * qemu_bh_schedule without freeing its resources yet.  While cancellation
- * itself is also wait-free and thread-safe, it can of course race with the
- * loop that executes bottom halves unless you are holding the iothread
- * mutex.  This makes it mostly useless if you are not holding the mutex.
- *
- * @bh: The bottom half to be canceled.
- */
-void qemu_bh_cancel(QEMUBH *bh);
-
-/**
- *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
- *
- * Deleting a bottom half frees the memory that was allocated for it by
- * qemu_bh_new.  It also implies canceling the bottom half if it was
- * scheduled.
- *
- * @bh: The bottom half to be deleted.
- */
-void qemu_bh_delete(QEMUBH *bh);
-
 #ifdef CONFIG_POSIX
 /**
  * qemu_add_child_watch: Register a child process for reaping.
@@ -359,6 +310,7 @@  void qemu_fd_register(int fd);
 void qemu_iohandler_fill(int *pnfds, fd_set *readfds, fd_set *writefds, fd_set *xfds);
 void qemu_iohandler_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds, int rc);
 
+QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
 void qemu_bh_schedule_idle(QEMUBH *bh);
 int qemu_bh_poll(void);
 void qemu_bh_update_timeout(uint32_t *timeout);
diff --git a/qemu-aio.h b/qemu-aio.h
index dc416a5..2ed6ad3 100644
--- a/qemu-aio.h
+++ b/qemu-aio.h
@@ -15,7 +15,6 @@ 
 #define QEMU_AIO_H
 
 #include "qemu-common.h"
-#include "qemu-char.h"
 #include "event_notifier.h"
 
 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
@@ -39,9 +38,87 @@  void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
                    BlockDriverCompletionFunc *cb, void *opaque);
 void qemu_aio_release(void *p);
 
+typedef struct AioHandler AioHandler;
+typedef void QEMUBHFunc(void *opaque);
+typedef void IOHandler(void *opaque);
+
+typedef struct AioContext {
+    /* Anchor of the list of Bottom Halves belonging to the context */
+    struct QEMUBH *first_bh;
+
+    /* A simple lock used to protect the first_bh list, and ensure that
+     * no callbacks are removed while we're walking and dispatching callbacks.
+     */
+    int walking_bh;
+} AioContext;
+
 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
 typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
 
+/**
+ * aio_context_new: Allocate a new AioContext.
+ *
+ * AioContext provide a mini event-loop that can be waited on synchronously.
+ * They also provide bottom halves, a service to execute a piece of code
+ * as soon as possible.
+ */
+AioContext *aio_context_new(void);
+
+/**
+ * aio_bh_new: Allocate a new bottom half structure.
+ *
+ * Bottom halves are lightweight callbacks whose invocation is guaranteed
+ * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
+ * is opaque and must be allocated prior to its use.
+ */
+QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+
+/**
+ * aio_bh_poll: Poll bottom halves for an AioContext.
+ *
+ * These are internal functions used by the QEMU main loop.
+ */
+int aio_bh_poll(AioContext *ctx);
+void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout);
+
+/**
+ * qemu_bh_schedule: Schedule a bottom half.
+ *
+ * Scheduling a bottom half interrupts the main loop and causes the
+ * execution of the callback that was passed to qemu_bh_new.
+ *
+ * Bottom halves that are scheduled from a bottom half handler are instantly
+ * invoked.  This can create an infinite loop if a bottom half handler
+ * schedules itself.
+ *
+ * @bh: The bottom half to be scheduled.
+ */
+void qemu_bh_schedule(QEMUBH *bh);
+
+/**
+ * qemu_bh_cancel: Cancel execution of a bottom half.
+ *
+ * Canceling execution of a bottom half undoes the effect of calls to
+ * qemu_bh_schedule without freeing its resources yet.  While cancellation
+ * itself is also wait-free and thread-safe, it can of course race with the
+ * loop that executes bottom halves unless you are holding the iothread
+ * mutex.  This makes it mostly useless if you are not holding the mutex.
+ *
+ * @bh: The bottom half to be canceled.
+ */
+void qemu_bh_cancel(QEMUBH *bh);
+
+/**
+ *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
+ *
+ * Deleting a bottom half frees the memory that was allocated for it by
+ * qemu_bh_new.  It also implies canceling the bottom half if it was
+ * scheduled.
+ *
+ * @bh: The bottom half to be deleted.
+ */
+void qemu_bh_delete(QEMUBH *bh);
+
 /* Flush any pending AIO operation. This function will block until all
  * outstanding AIO operations have been completed or cancelled. */
 void qemu_aio_flush(void);
diff --git a/qemu-char.h b/qemu-char.h
index 486644b..5087168 100644
--- a/qemu-char.h
+++ b/qemu-char.h
@@ -5,6 +5,7 @@ 
 #include "qemu-queue.h"
 #include "qemu-option.h"
 #include "qemu-config.h"
+#include "qemu-aio.h"
 #include "qobject.h"
 #include "qstring.h"
 #include "main-loop.h"
diff --git a/qemu-common.h b/qemu-common.h
index e5c2bcd..ac44657 100644
--- a/qemu-common.h
+++ b/qemu-common.h
@@ -14,6 +14,7 @@ 
 
 typedef struct QEMUTimer QEMUTimer;
 typedef struct QEMUFile QEMUFile;
+typedef struct QEMUBH QEMUBH;
 typedef struct DeviceState DeviceState;
 
 struct Monitor;
diff --git a/qemu-coroutine-lock.c b/qemu-coroutine-lock.c
index 26ad76b..9dda3f8 100644
--- a/qemu-coroutine-lock.c
+++ b/qemu-coroutine-lock.c
@@ -26,7 +26,7 @@ 
 #include "qemu-coroutine.h"
 #include "qemu-coroutine-int.h"
 #include "qemu-queue.h"
-#include "main-loop.h"
+#include "qemu-aio.h"
 #include "trace.h"
 
 static QTAILQ_HEAD(, Coroutine) unlock_bh_queue =