diff mbox

[v4,5/5] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel

Message ID 1426496617-10702-6-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng March 16, 2015, 9:03 a.m. UTC
If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.

Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
---
 dma-helpers.c             | 17 ++++++++---------
 exec.c                    | 33 +++++++++++++++++++++------------
 include/exec/cpu-common.h |  3 ++-
 3 files changed, 31 insertions(+), 22 deletions(-)
diff mbox

Patch

diff --git a/dma-helpers.c b/dma-helpers.c
index 6918572..1fddf6a 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -92,14 +92,6 @@  static void reschedule_dma(void *opaque)
     dma_blk_cb(dbs, 0);
 }
 
-static void continue_after_map_failure(void *opaque)
-{
-    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-
-    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
-    qemu_bh_schedule(dbs->bh);
-}
-
 static void dma_blk_unmap(DMAAIOCB *dbs)
 {
     int i;
@@ -161,7 +153,9 @@  static void dma_blk_cb(void *opaque, int ret)
 
     if (dbs->iov.size == 0) {
         trace_dma_map_wait(dbs);
-        cpu_register_map_client(dbs, continue_after_map_failure);
+        dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
+                             reschedule_dma, dbs);
+        cpu_register_map_client(dbs->bh);
         return;
     }
 
@@ -183,6 +177,11 @@  static void dma_aio_cancel(BlockAIOCB *acb)
     if (dbs->acb) {
         blk_aio_cancel_async(dbs->acb);
     }
+    if (dbs->bh) {
+        cpu_unregister_map_client(dbs->bh);
+        qemu_bh_delete(dbs->bh);
+        dbs->bh = NULL;
+    }
 }
 
 
diff --git a/exec.c b/exec.c
index 0fa7487..0f81358 100644
--- a/exec.c
+++ b/exec.c
@@ -2480,8 +2480,7 @@  typedef struct {
 static BounceBuffer bounce;
 
 typedef struct MapClient {
-    void *opaque;
-    void (*callback)(void *opaque);
+    QEMUBH *bh;
     QLIST_ENTRY(MapClient) link;
 } MapClient;
 
@@ -2489,31 +2488,29 @@  QemuMutex map_client_list_lock;
 static QLIST_HEAD(map_client_list, MapClient) map_client_list
     = QLIST_HEAD_INITIALIZER(map_client_list);
 
-static void cpu_unregister_map_client(void *_client);
+static void cpu_unregister_map_client_do(MapClient *client);
 static void cpu_notify_map_clients_locked(void)
 {
     MapClient *client;
 
     while (!QLIST_EMPTY(&map_client_list)) {
         client = QLIST_FIRST(&map_client_list);
-        client->callback(client->opaque);
-        cpu_unregister_map_client(client);
+        qemu_bh_schedule(client->bh);
+        cpu_unregister_map_client_do(client);
     }
 }
 
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+void cpu_register_map_client(QEMUBH *bh)
 {
     MapClient *client = g_malloc(sizeof(*client));
 
     qemu_mutex_lock(&map_client_list_lock);
-    client->opaque = opaque;
-    client->callback = callback;
+    client->bh = bh;
     QLIST_INSERT_HEAD(&map_client_list, client, link);
     if (!atomic_read(&bounce.in_use)) {
         cpu_notify_map_clients_locked();
     }
     qemu_mutex_unlock(&map_client_list_lock);
-    return client;
 }
 
 void cpu_exec_init_all(void)
@@ -2526,14 +2523,26 @@  void cpu_exec_init_all(void)
     qemu_mutex_init(&map_client_list_lock);
 }
 
-static void cpu_unregister_map_client(void *_client)
+static void cpu_unregister_map_client_do(MapClient *client)
 {
-    MapClient *client = (MapClient *)_client;
-
     QLIST_REMOVE(client, link);
     g_free(client);
 }
 
+void cpu_unregister_map_client(QEMUBH *bh)
+{
+    MapClient *client;
+
+    qemu_mutex_lock(&map_client_list_lock);
+    QLIST_FOREACH(client, &map_client_list, link) {
+        if (client->bh == bh) {
+            cpu_unregister_map_client_do(client);
+            break;
+        }
+    }
+    qemu_mutex_unlock(&map_client_list_lock);
+}
+
 static void cpu_notify_map_clients(void)
 {
     qemu_mutex_lock(&map_client_list_lock);
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index fcc3162..43428bd 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -82,7 +82,8 @@  void *cpu_physical_memory_map(hwaddr addr,
                               int is_write);
 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
                                int is_write, hwaddr access_len);
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+void cpu_register_map_client(QEMUBH *bh);
+void cpu_unregister_map_client(QEMUBH *bh);
 
 bool cpu_physical_memory_is_io(hwaddr phys_addr);