diff mbox

[09/12] iommu: Add facility to cancel in-use dma memory maps

Message ID 1337142445-26548-10-git-send-email-benh@kernel.crashing.org
State New
Headers show

Commit Message

Benjamin Herrenschmidt May 16, 2012, 4:27 a.m. UTC
From: David Gibson <david@gibson.dropbear.id.au>

One new complication raised by IOMMU support over only handling DMA
directly to physical addresses is handling dma_memory_map() case
(replacing cpu_physical_memory_map()) when the IOMMU translation the
IOVAs covered by such a map are invalidated or changed while the map
is active.  This should never happen with correct guest software, but
we do need to handle buggy guests.  This case might also occur during
handovers between different guest software stages if the handover
protocols aren't fully seamless.

The iommu implementation will have to wait for maps to be removed
before it can "complete" an invalidation of a translation, which
can take a long time. In order to make it possible to speed that
process up, we add a "Cancel" callback to the map function which
the clients can optionally provide.

The core makes no use of that, but the iommu backend implementation
may choose to keep track of maps and call the respective cancel
callback whenever a translation within a map is removed, allowing
the driver to do things like cancel async IOs etc.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
 dma-helpers.c |   49 ++++++++++++++++++++++++++++---------------------
 dma.h         |   23 +++++++++++++++++++----
 2 files changed, 47 insertions(+), 25 deletions(-)
diff mbox

Patch

diff --git a/dma-helpers.c b/dma-helpers.c
index b4ee827..6e6c7b3 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -107,6 +107,28 @@  static void dma_complete(DMAAIOCB *dbs, int ret)
     }
 }
 
+static void dma_aio_cancel(BlockDriverAIOCB *acb)
+{
+    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
+
+    trace_dma_aio_cancel(dbs);
+
+    if (dbs->acb) {
+        BlockDriverAIOCB *acb = dbs->acb;
+        dbs->acb = NULL;
+        dbs->in_cancel = true;
+        bdrv_aio_cancel(acb);
+        dbs->in_cancel = false;
+    }
+    dbs->common.cb = NULL;
+    dma_complete(dbs, 0);
+}
+
+static void dma_bdrv_cancel_cb(void *opaque)
+{
+    dma_aio_cancel(&((DMAAIOCB *)opaque)->common);
+}
+
 static void dma_bdrv_cb(void *opaque, int ret)
 {
     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
@@ -127,7 +149,8 @@  static void dma_bdrv_cb(void *opaque, int ret)
     while (dbs->sg_cur_index < dbs->sg->nsg) {
         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
-        mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
+        mem = dma_memory_map_with_cancel(dbs->sg->dma, dma_bdrv_cancel_cb, dbs,
+                                         cur_addr, &cur_len, dbs->dir);
         if (!mem)
             break;
         qemu_iovec_add(&dbs->iov, mem, cur_len);
@@ -149,23 +172,6 @@  static void dma_bdrv_cb(void *opaque, int ret)
     assert(dbs->acb);
 }
 
-static void dma_aio_cancel(BlockDriverAIOCB *acb)
-{
-    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
-
-    trace_dma_aio_cancel(dbs);
-
-    if (dbs->acb) {
-        BlockDriverAIOCB *acb = dbs->acb;
-        dbs->acb = NULL;
-        dbs->in_cancel = true;
-        bdrv_aio_cancel(acb);
-        dbs->in_cancel = false;
-    }
-    dbs->common.cb = NULL;
-    dma_complete(dbs, 0);
-}
-
 static AIOPool dma_aio_pool = {
     .aiocb_size         = sizeof(DMAAIOCB),
     .cancel             = dma_aio_cancel,
@@ -353,7 +359,9 @@  void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
     dma->unmap = unmap;
 }
 
-void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
+void *iommu_dma_memory_map(DMAContext *dma,
+                           DMACancelMapFunc cb, void *cb_opaque,
+                           dma_addr_t addr, dma_addr_t *len,
                            DMADirection dir)
 {
     int err;
@@ -361,7 +369,7 @@  void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
     void *buf;
 
     if (dma->map) {
-        return dma->map(dma, addr, len, dir);
+        return dma->map(dma, cb, cb_opaque, addr, len, dir);
     }
 
     plen = *len;
@@ -397,5 +405,4 @@  void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
     cpu_physical_memory_unmap(buffer, len,
                               dir == DMA_DIRECTION_FROM_DEVICE,
                               access_len);
-
 }
diff --git a/dma.h b/dma.h
index 14fe17d..f1fcb71 100644
--- a/dma.h
+++ b/dma.h
@@ -49,10 +49,15 @@  typedef int DMATranslateFunc(DMAContext *dma,
                              target_phys_addr_t *paddr,
                              target_phys_addr_t *len,
                              DMADirection dir);
+
+typedef void DMACancelMapFunc(void *);
 typedef void* DMAMapFunc(DMAContext *dma,
+                         DMACancelMapFunc cb,
+                         void *cb_opaque,	 
                          dma_addr_t addr,
                          dma_addr_t *len,
                          DMADirection dir);
+
 typedef void DMAUnmapFunc(DMAContext *dma,
                           void *buffer,
                           dma_addr_t len,
@@ -129,11 +134,15 @@  static inline int dma_memory_set(DMAContext *dma, dma_addr_t addr,
 }
 
 void *iommu_dma_memory_map(DMAContext *dma,
+                           DMACancelMapFunc *cb, void *opaque,
                            dma_addr_t addr, dma_addr_t *len,
                            DMADirection dir);
-static inline void *dma_memory_map(DMAContext *dma,
-                                   dma_addr_t addr, dma_addr_t *len,
-                                   DMADirection dir)
+static inline void *dma_memory_map_with_cancel(DMAContext *dma,
+                                               DMACancelMapFunc *cb,
+                                               void *opaque,
+                                               dma_addr_t addr,
+                                               dma_addr_t *len,
+                                               DMADirection dir)
 {
     if (!dma_has_iommu(dma)) {
         target_phys_addr_t xlen = *len;
@@ -144,9 +153,15 @@  static inline void *dma_memory_map(DMAContext *dma,
         *len = xlen;
         return p;
     } else {
-        return iommu_dma_memory_map(dma, addr, len, dir);
+        return iommu_dma_memory_map(dma, cb, opaque, addr, len, dir);
     }
 }
+static inline void *dma_memory_map(DMAContext *dma,
+                                   dma_addr_t addr, dma_addr_t *len,
+                                   DMADirection dir)
+{
+    return dma_memory_map_with_cancel(dma, NULL, NULL, addr, len, dir);
+}
 
 void iommu_dma_memory_unmap(DMAContext *dma,
                             void *buffer, dma_addr_t len,