diff mbox

[v3,2/8] block: Add bdrv_aio_cancel_async

Message ID 1409107756-5967-3-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng Aug. 27, 2014, 2:49 a.m. UTC
This is the async version of bdrv_aio_cancel, which doesn't block the
caller. It guarantees that the cb is called either before returning or
some time later.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 block.c               | 26 ++++++++++++++++++++++++++
 include/block/aio.h   |  1 +
 include/block/block.h |  1 +
 3 files changed, 28 insertions(+)

Comments

Stefan Hajnoczi Sept. 2, 2014, 10:55 a.m. UTC | #1
On Wed, Aug 27, 2014 at 10:49:10AM +0800, Fam Zheng wrote:
> +/* Async version of aio cancel. The caller is not blocked if the acb implements
> + * cancel_async, otherwise fall back to bdrv_aio_cancel. In both cases, acb->cb
> + * is guarenteed to be called, before or after function returns. */
> +void bdrv_aio_cancel_async(BlockDriverAIOCB *acb)
> +{
> +    if (acb->aiocb_info->cancel_async) {
> +        acb->aiocb_info->cancel_async(acb);
> +    } else {
> +        /* Mask the cb and cancel, we'll call it manually once the synchronous
> +         * cancel is done. */
> +        BlockDriverCompletionFunc *cb = acb->cb;
> +        void *opaque = acb->opaque;
> +        acb->cb = bdrv_aio_cancel_cb_nop;
> +        acb->opaque = NULL;
> +        qemu_aio_ref(acb);
> +        acb->aiocb_info->cancel(acb);
> +        cb(opaque, -ECANCELED);
> +        qemu_aio_release(acb);
> +    }
> +}

It is not totally obvious why we hijack the callback.  If you respin,
please rephrase the comment along the lines of:

/* bdrv_aio_cancel() does not guarantee to invoke cb() so mask it during
 * bdrv_aio_cancel() and always invoke it ourselves.
 */

Stefan
diff mbox

Patch

diff --git a/block.c b/block.c
index f8e342f..f4c77ec 100644
--- a/block.c
+++ b/block.c
@@ -4612,6 +4612,32 @@  void bdrv_aio_cancel(BlockDriverAIOCB *acb)
     acb->aiocb_info->cancel(acb);
 }
 
+static void bdrv_aio_cancel_cb_nop(void *opaque, int ret)
+{
+    /* nop */
+}
+
+/* Async version of aio cancel. The caller is not blocked if the acb implements
+ * cancel_async, otherwise fall back to bdrv_aio_cancel. In both cases, acb->cb
+ * is guarenteed to be called, before or after function returns. */
+void bdrv_aio_cancel_async(BlockDriverAIOCB *acb)
+{
+    if (acb->aiocb_info->cancel_async) {
+        acb->aiocb_info->cancel_async(acb);
+    } else {
+        /* Mask the cb and cancel, we'll call it manually once the synchronous
+         * cancel is done. */
+        BlockDriverCompletionFunc *cb = acb->cb;
+        void *opaque = acb->opaque;
+        acb->cb = bdrv_aio_cancel_cb_nop;
+        acb->opaque = NULL;
+        qemu_aio_ref(acb);
+        acb->aiocb_info->cancel(acb);
+        cb(opaque, -ECANCELED);
+        qemu_aio_release(acb);
+    }
+}
+
 /**************************************************************/
 /* async block device emulation */
 
diff --git a/include/block/aio.h b/include/block/aio.h
index 8c216f6..c434466 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -27,6 +27,7 @@  typedef void BlockDriverCompletionFunc(void *opaque, int ret);
 
 typedef struct AIOCBInfo {
     void (*cancel)(BlockDriverAIOCB *acb);
+    void (*cancel_async)(BlockDriverAIOCB *acb);
     size_t aiocb_size;
 } AIOCBInfo;
 
diff --git a/include/block/block.h b/include/block/block.h
index 8f4ad16..35a2448 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -336,6 +336,7 @@  BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
                                    int64_t sector_num, int nb_sectors,
                                    BlockDriverCompletionFunc *cb, void *opaque);
 void bdrv_aio_cancel(BlockDriverAIOCB *acb);
+void bdrv_aio_cancel_async(BlockDriverAIOCB *acb);
 
 typedef struct BlockRequest {
     /* Fields to be filled by multiwrite caller */