@@ -107,6 +107,28 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
}
}
+static void dma_aio_cancel(BlockDriverAIOCB *acb)
+{
+ DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
+
+ trace_dma_aio_cancel(dbs);
+
+ if (dbs->acb) {
+ BlockDriverAIOCB *acb = dbs->acb;
+ dbs->acb = NULL;
+ dbs->in_cancel = true;
+ bdrv_aio_cancel(acb);
+ dbs->in_cancel = false;
+ }
+ dbs->common.cb = NULL;
+ dma_complete(dbs, 0);
+}
+
+static void dma_bdrv_cancel_cb(void *opaque)
+{
+ dma_aio_cancel(&((DMAAIOCB *)opaque)->common);
+}
+
static void dma_bdrv_cb(void *opaque, int ret)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
@@ -127,7 +149,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
while (dbs->sg_cur_index < dbs->sg->nsg) {
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
- mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
+ mem = dma_memory_map_with_cancel(dbs->sg->dma, dma_bdrv_cancel_cb, dbs,
+ cur_addr, &cur_len, dbs->dir);
if (!mem)
break;
qemu_iovec_add(&dbs->iov, mem, cur_len);
@@ -149,23 +172,6 @@ static void dma_bdrv_cb(void *opaque, int ret)
assert(dbs->acb);
}
-static void dma_aio_cancel(BlockDriverAIOCB *acb)
-{
- DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
-
- trace_dma_aio_cancel(dbs);
-
- if (dbs->acb) {
- BlockDriverAIOCB *acb = dbs->acb;
- dbs->acb = NULL;
- dbs->in_cancel = true;
- bdrv_aio_cancel(acb);
- dbs->in_cancel = false;
- }
- dbs->common.cb = NULL;
- dma_complete(dbs, 0);
-}
-
static AIOPool dma_aio_pool = {
.aiocb_size = sizeof(DMAAIOCB),
.cancel = dma_aio_cancel,
@@ -353,7 +359,9 @@ void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
dma->unmap = unmap;
}
-void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
+void *iommu_dma_memory_map(DMAContext *dma,
+ DMACancelMapFunc cb, void *cb_opaque,
+ dma_addr_t addr, dma_addr_t *len,
DMADirection dir)
{
int err;
@@ -361,7 +369,7 @@ void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
void *buf;
if (dma->map) {
- return dma->map(dma, addr, len, dir);
+ return dma->map(dma, cb, cb_opaque, addr, len, dir);
}
plen = *len;
@@ -397,5 +405,4 @@ void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
cpu_physical_memory_unmap(buffer, len,
dir == DMA_DIRECTION_FROM_DEVICE,
access_len);
-
}
@@ -49,10 +49,15 @@ typedef int DMATranslateFunc(DMAContext *dma,
target_phys_addr_t *paddr,
target_phys_addr_t *len,
DMADirection dir);
+
+typedef void DMACancelMapFunc(void *);
typedef void* DMAMapFunc(DMAContext *dma,
+ DMACancelMapFunc cb,
+ void *cb_opaque,
dma_addr_t addr,
dma_addr_t *len,
DMADirection dir);
+
typedef void DMAUnmapFunc(DMAContext *dma,
void *buffer,
dma_addr_t len,
@@ -129,11 +134,15 @@ static inline int dma_memory_set(DMAContext *dma, dma_addr_t addr,
}
void *iommu_dma_memory_map(DMAContext *dma,
+ DMACancelMapFunc *cb, void *opaque,
dma_addr_t addr, dma_addr_t *len,
DMADirection dir);
-static inline void *dma_memory_map(DMAContext *dma,
- dma_addr_t addr, dma_addr_t *len,
- DMADirection dir)
+static inline void *dma_memory_map_with_cancel(DMAContext *dma,
+ DMACancelMapFunc *cb,
+ void *opaque,
+ dma_addr_t addr,
+ dma_addr_t *len,
+ DMADirection dir)
{
if (!dma_has_iommu(dma)) {
target_phys_addr_t xlen = *len;
@@ -144,9 +153,15 @@ static inline void *dma_memory_map(DMAContext *dma,
*len = xlen;
return p;
} else {
- return iommu_dma_memory_map(dma, addr, len, dir);
+ return iommu_dma_memory_map(dma, cb, opaque, addr, len, dir);
}
}
+static inline void *dma_memory_map(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir)
+{
+ return dma_memory_map_with_cancel(dma, NULL, NULL, addr, len, dir);
+}
void iommu_dma_memory_unmap(DMAContext *dma,
void *buffer, dma_addr_t len,