diff mbox

[3/5] ide: move buffered DMA cancel to core

Message ID 1453179117-17909-4-git-send-email-jsnow@redhat.com
State New
Headers show

Commit Message

John Snow Jan. 19, 2016, 4:51 a.m. UTC
Buffered DMA cancellation was added to ATAPI devices and implemented
for the BMDMA HBA. Move the code over to common IDE code and allow
it to be used for any HBA.

Signed-off-by: John Snow <jsnow@redhat.com>
---
 hw/ide/core.c     | 45 +++++++++++++++++++++++++++++++++++++++++++++
 hw/ide/internal.h |  1 +
 hw/ide/pci.c      | 36 +-----------------------------------
 3 files changed, 47 insertions(+), 35 deletions(-)

Comments

Paolo Bonzini Jan. 19, 2016, 11:50 a.m. UTC | #1
On 19/01/2016 05:51, John Snow wrote:
> Buffered DMA cancellation was added to ATAPI devices and implemented
> for the BMDMA HBA. Move the code over to common IDE code and allow
> it to be used for any HBA.
> 
> Signed-off-by: John Snow <jsnow@redhat.com>
> ---
>  hw/ide/core.c     | 45 +++++++++++++++++++++++++++++++++++++++++++++
>  hw/ide/internal.h |  1 +
>  hw/ide/pci.c      | 36 +-----------------------------------
>  3 files changed, 47 insertions(+), 35 deletions(-)
> 
> diff --git a/hw/ide/core.c b/hw/ide/core.c
> index 75486c2..5d81840 100644
> --- a/hw/ide/core.c
> +++ b/hw/ide/core.c
> @@ -608,6 +608,51 @@ BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
>      return aioreq;
>  }
>  
> +/**
> + * Cancel all pending DMA requests.
> + * Any buffered DMA requests are instantly canceled,
> + * but any pending unbuffered DMA requests must be waited on.
> + */
> +void ide_cancel_dma_sync(IDEState *s)
> +{
> +    IDEBufferedRequest *req;
> +
> +    /* First invoke the callbacks of all buffered requests
> +     * and flag those requests as orphaned. Ideally there
> +     * are no unbuffered (Scatter Gather DMA Requests or
> +     * write requests) pending and we can avoid to drain. */
> +    QLIST_FOREACH(req, &s->buffered_requests, list) {
> +        if (!req->orphaned) {
> +#ifdef DEBUG_IDE
> +            printf("%s: invoking cb %p of buffered request %p with"
> +                   " -ECANCELED\n", __func__, req->original_cb, req);
> +#endif
> +            req->original_cb(req->original_opaque, -ECANCELED);
> +        }
> +        req->orphaned = true;
> +    }
> +
> +    /*
> +     * We can't cancel Scatter Gather DMA in the middle of the
> +     * operation or a partial (not full) DMA transfer would reach
> +     * the storage so we wait for completion instead (we beahve
> +     * like if the DMA was completed by the time the guest trying
> +     * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
> +     * set).
> +     *
> +     * In the future we'll be able to safely cancel the I/O if the
> +     * whole DMA operation will be submitted to disk with a single
> +     * aio operation with preadv/pwritev.
> +     */
> +    if (s->bus->dma->aiocb) {
> +#ifdef DEBUG_IDE
> +        printf("%s: draining all remaining requests", __func__);
> +#endif
> +        blk_drain_all();

As a separate patch you can change this to blk_drain(s->blk), which is
already an improvement.

Paolo

> +        assert(s->bus->dma->aiocb == NULL);
> +    }
> +}
> +
>  static void ide_sector_read(IDEState *s);
>  
>  static void ide_sector_read_cb(void *opaque, int ret)
> diff --git a/hw/ide/internal.h b/hw/ide/internal.h
> index 2d1e2d2..86bde26 100644
> --- a/hw/ide/internal.h
> +++ b/hw/ide/internal.h
> @@ -586,6 +586,7 @@ BlockAIOCB *ide_issue_trim(BlockBackend *blk,
>  BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
>                                 QEMUIOVector *iov, int nb_sectors,
>                                 BlockCompletionFunc *cb, void *opaque);
> +void ide_cancel_dma_sync(IDEState *s);
>  
>  /* hw/ide/atapi.c */
>  void ide_atapi_cmd(IDEState *s);
> diff --git a/hw/ide/pci.c b/hw/ide/pci.c
> index 37dbc29..6b780b8 100644
> --- a/hw/ide/pci.c
> +++ b/hw/ide/pci.c
> @@ -233,41 +233,7 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
>      /* Ignore writes to SSBM if it keeps the old value */
>      if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
>          if (!(val & BM_CMD_START)) {
> -            /* First invoke the callbacks of all buffered requests
> -             * and flag those requests as orphaned. Ideally there
> -             * are no unbuffered (Scatter Gather DMA Requests or
> -             * write requests) pending and we can avoid to drain. */
> -            IDEBufferedRequest *req;
> -            IDEState *s = idebus_active_if(bm->bus);
> -            QLIST_FOREACH(req, &s->buffered_requests, list) {
> -                if (!req->orphaned) {
> -#ifdef DEBUG_IDE
> -                    printf("%s: invoking cb %p of buffered request %p with"
> -                           " -ECANCELED\n", __func__, req->original_cb, req);
> -#endif
> -                    req->original_cb(req->original_opaque, -ECANCELED);
> -                }
> -                req->orphaned = true;
> -            }
> -            /*
> -             * We can't cancel Scatter Gather DMA in the middle of the
> -             * operation or a partial (not full) DMA transfer would reach
> -             * the storage so we wait for completion instead (we beahve
> -             * like if the DMA was completed by the time the guest trying
> -             * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
> -             * set).
> -             *
> -             * In the future we'll be able to safely cancel the I/O if the
> -             * whole DMA operation will be submitted to disk with a single
> -             * aio operation with preadv/pwritev.
> -             */
> -            if (bm->bus->dma->aiocb) {
> -#ifdef DEBUG_IDE
> -                printf("%s: draining all remaining requests", __func__);
> -#endif
> -                blk_drain_all();
> -                assert(bm->bus->dma->aiocb == NULL);
> -            }
> +            ide_cancel_dma_sync(idebus_active_if(bm->bus));
>              bm->status &= ~BM_STATUS_DMAING;
>          } else {
>              bm->cur_addr = bm->addr;
>
diff mbox

Patch

diff --git a/hw/ide/core.c b/hw/ide/core.c
index 75486c2..5d81840 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -608,6 +608,51 @@  BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
     return aioreq;
 }
 
+/**
+ * Cancel all pending DMA requests.
+ * Any buffered DMA requests are instantly canceled,
+ * but any pending unbuffered DMA requests must be waited on.
+ */
+void ide_cancel_dma_sync(IDEState *s)
+{
+    IDEBufferedRequest *req;
+
+    /* First invoke the callbacks of all buffered requests
+     * and flag those requests as orphaned. Ideally there
+     * are no unbuffered (Scatter Gather DMA Requests or
+     * write requests) pending and we can avoid to drain. */
+    QLIST_FOREACH(req, &s->buffered_requests, list) {
+        if (!req->orphaned) {
+#ifdef DEBUG_IDE
+            printf("%s: invoking cb %p of buffered request %p with"
+                   " -ECANCELED\n", __func__, req->original_cb, req);
+#endif
+            req->original_cb(req->original_opaque, -ECANCELED);
+        }
+        req->orphaned = true;
+    }
+
+    /*
+     * We can't cancel Scatter Gather DMA in the middle of the
+     * operation or a partial (not full) DMA transfer would reach
+     * the storage so we wait for completion instead (we beahve
+     * like if the DMA was completed by the time the guest trying
+     * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
+     * set).
+     *
+     * In the future we'll be able to safely cancel the I/O if the
+     * whole DMA operation will be submitted to disk with a single
+     * aio operation with preadv/pwritev.
+     */
+    if (s->bus->dma->aiocb) {
+#ifdef DEBUG_IDE
+        printf("%s: draining all remaining requests", __func__);
+#endif
+        blk_drain_all();
+        assert(s->bus->dma->aiocb == NULL);
+    }
+}
+
 static void ide_sector_read(IDEState *s);
 
 static void ide_sector_read_cb(void *opaque, int ret)
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index 2d1e2d2..86bde26 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -586,6 +586,7 @@  BlockAIOCB *ide_issue_trim(BlockBackend *blk,
 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
                                QEMUIOVector *iov, int nb_sectors,
                                BlockCompletionFunc *cb, void *opaque);
+void ide_cancel_dma_sync(IDEState *s);
 
 /* hw/ide/atapi.c */
 void ide_atapi_cmd(IDEState *s);
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 37dbc29..6b780b8 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -233,41 +233,7 @@  void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
     /* Ignore writes to SSBM if it keeps the old value */
     if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
         if (!(val & BM_CMD_START)) {
-            /* First invoke the callbacks of all buffered requests
-             * and flag those requests as orphaned. Ideally there
-             * are no unbuffered (Scatter Gather DMA Requests or
-             * write requests) pending and we can avoid to drain. */
-            IDEBufferedRequest *req;
-            IDEState *s = idebus_active_if(bm->bus);
-            QLIST_FOREACH(req, &s->buffered_requests, list) {
-                if (!req->orphaned) {
-#ifdef DEBUG_IDE
-                    printf("%s: invoking cb %p of buffered request %p with"
-                           " -ECANCELED\n", __func__, req->original_cb, req);
-#endif
-                    req->original_cb(req->original_opaque, -ECANCELED);
-                }
-                req->orphaned = true;
-            }
-            /*
-             * We can't cancel Scatter Gather DMA in the middle of the
-             * operation or a partial (not full) DMA transfer would reach
-             * the storage so we wait for completion instead (we beahve
-             * like if the DMA was completed by the time the guest trying
-             * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
-             * set).
-             *
-             * In the future we'll be able to safely cancel the I/O if the
-             * whole DMA operation will be submitted to disk with a single
-             * aio operation with preadv/pwritev.
-             */
-            if (bm->bus->dma->aiocb) {
-#ifdef DEBUG_IDE
-                printf("%s: draining all remaining requests", __func__);
-#endif
-                blk_drain_all();
-                assert(bm->bus->dma->aiocb == NULL);
-            }
+            ide_cancel_dma_sync(idebus_active_if(bm->bus));
             bm->status &= ~BM_STATUS_DMAING;
         } else {
             bm->cur_addr = bm->addr;