diff mbox

[03/10] dma-helpers: rewrite completion/cancellation

Message ID 1312478089-806-4-git-send-email-pbonzini@redhat.com
State New
Headers show

Commit Message

Paolo Bonzini Aug. 4, 2011, 5:14 p.m. UTC
This fixes various problems with completion/cancellation:

* If DMA encounters a bounce buffer conflict, and the DMA operation is
canceled before the bottom half fires, bad things happen.

* memory is not unmapped after cancellation, again causing problems
when doing DMA to I/O areas

* cancellation could leak the iovec

and probably more that I've missed.  The patch fixes them by sharing
the cleanup code between completion and cancellation.  The dma_bdrv_cb
now returns a boolean completed/not completed flag, and the wrapper
dma_continue takes care of tasks to do upon completion.

Most of these are basically impossible in practice, but the resulting
code is also more suitable for introduction of dma_buf_read and
dma_buf_write.

One note: since memory-based DMA will not use dbs->acb, here I'm
switching dbs->common.cb == NULL to mark a canceled operation.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 dma-helpers.c |   90 ++++++++++++++++++++++++++++++++++++++------------------
 1 files changed, 61 insertions(+), 29 deletions(-)
diff mbox

Patch

diff --git a/dma-helpers.c b/dma-helpers.c
index 6a59f59..d716524 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -90,7 +90,49 @@  typedef struct {
     DMAIOFunc *io_func;
 } DMAAIOCB;
 
-static void dma_bdrv_cb(void *opaque, int ret);
+static int dma_bdrv_cb(DMAAIOCB *opaque, int ret);
+
+static void dma_bdrv_unmap(DMAAIOCB *dbs)
+{
+    int i;
+
+    for (i = 0; i < dbs->iov.niov; ++i) {
+        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
+                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
+                                  dbs->iov.iov[i].iov_len);
+    }
+    qemu_iovec_reset(&dbs->iov);
+}
+
+static void dma_complete(DMAAIOCB *dbs, int ret)
+{
+    dma_bdrv_unmap(dbs);
+    if (dbs->common.cb) {
+        dbs->common.cb(dbs->common.opaque, ret);
+    }
+    qemu_iovec_destroy(&dbs->iov);
+    if (dbs->bh) {
+        qemu_bh_delete(dbs->bh);
+        dbs->bh = NULL;
+    }
+    qemu_aio_release(dbs);
+}
+
+static BlockDriverAIOCB *dma_continue(DMAAIOCB *dbs, int ret)
+{
+    assert(ret != -EAGAIN);
+    if (ret == 0) {
+        /* No error so far, try doing more DMA.  If dma_bdrv_cb starts an
+           asynchronous operation, it returns -EAGAIN and we will be
+           called again by either reschedule_dma or dma_bdrv_aio_cb.
+           If not, call the BlockDriverCompletionFunc.  */
+        ret = dma_bdrv_cb(dbs, ret);
+    }
+    if (ret != -EAGAIN) {
+        dma_complete(dbs, ret);
+    }
+    return &dbs->common;
+}
 
 static void reschedule_dma(void *opaque)
 {
@@ -98,7 +140,7 @@  static void reschedule_dma(void *opaque)
 
     qemu_bh_delete(dbs->bh);
     dbs->bh = NULL;
-    dma_bdrv_cb(opaque, 0);
+    dma_continue(dbs, 0);
 }
 
 static void continue_after_map_failure(void *opaque)
@@ -109,33 +151,23 @@  static void continue_after_map_failure(void *opaque)
     qemu_bh_schedule(dbs->bh);
 }
 
-static void dma_bdrv_unmap(DMAAIOCB *dbs)
+static void dma_bdrv_aio_cb(void *opaque, int ret)
 {
-    int i;
-
-    for (i = 0; i < dbs->iov.niov; ++i) {
-        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
-                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
-                                  dbs->iov.iov[i].iov_len);
-    }
+    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
+    dma_continue(dbs, ret);
 }
 
-static void dma_bdrv_cb(void *opaque, int ret)
+static int dma_bdrv_cb(DMAAIOCB *dbs, int ret)
 {
-    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
     void *mem;
     target_phys_addr_t cur_len;
 
     dbs->acb = NULL;
     dbs->sector_num += dbs->iov.size / 512;
     dma_bdrv_unmap(dbs);
-    qemu_iovec_reset(&dbs->iov);
 
     if (dbs->sg->cur_index == dbs->sg->nsg || ret < 0) {
-        dbs->common.cb(dbs->common.opaque, ret);
-        qemu_iovec_destroy(&dbs->iov);
-        qemu_aio_release(dbs);
-        return;
+        return ret;
     }
 
     while ((mem = qemu_sglist_map_segment(dbs->sg, &cur_len,
@@ -145,16 +177,17 @@  static void dma_bdrv_cb(void *opaque, int ret)
 
     if (dbs->iov.size == 0) {
         cpu_register_map_client(dbs, continue_after_map_failure);
-        return;
+        return -EAGAIN;
     }
 
     dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
-                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
+                            dbs->iov.size / 512, dma_bdrv_aio_cb, dbs);
     if (!dbs->acb) {
-        dma_bdrv_unmap(dbs);
-        qemu_iovec_destroy(&dbs->iov);
-        return;
+        dbs->common.cb = NULL;
+        return -EIO;
     }
+
+    return -EAGAIN;
 }
 
 static void dma_aio_cancel(BlockDriverAIOCB *acb)
@@ -162,8 +195,12 @@  static void dma_aio_cancel(BlockDriverAIOCB *acb)
     DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 
     if (dbs->acb) {
-        bdrv_aio_cancel(dbs->acb);
+        BlockDriverAIOCB *acb = dbs->acb;
+        dbs->acb = NULL;
+        bdrv_aio_cancel(acb);
     }
+    dbs->common.cb = NULL;
+    dma_complete(dbs, 0);
 }
 
 static AIOPool dma_aio_pool = {
@@ -187,12 +224,7 @@  BlockDriverAIOCB *dma_bdrv_io(
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
     qemu_sglist_rewind(sg);
-    dma_bdrv_cb(dbs, 0);
-    if (!dbs->acb) {
-        qemu_aio_release(dbs);
-        return NULL;
-    }
-    return &dbs->common;
+    return dma_continue(dbs, 0);
 }