diff mbox

[2/3] scsi: Introduce scsi_req_cancel_async

Message ID 1411007799-23199-3-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng Sept. 18, 2014, 2:36 a.m. UTC
Devices can call this function to start an asynchronous cancellation.
The bus->info->cancel will be called later.

Two fields are added to SCSIRequest to respectively keep track of:

 1) The list of (TMF) requests that are waiting for this request to be
    canceled.

 2) The number of (IO) requests that this request (as a TMF request) is
    waiting for.

So when scsi_req_cancel_async is called, the tmf request is tracked with
these fields. When cancel is done, we check if we should notify the bus.

If this is the last canceled request this tmf request is waiting for
(cancel_dep_count drops to 0), we call .cancel_dep_complete so the bus
can complete it.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 hw/scsi/scsi-bus.c     | 50 ++++++++++++++++++++++++++++++++++++++++++++++++--
 include/hw/scsi/scsi.h | 14 ++++++++++++++
 2 files changed, 62 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini Sept. 18, 2014, 9:17 a.m. UTC | #1
Il 18/09/2014 04:36, Fam Zheng ha scritto:
> +    QTAILQ_FOREACH_SAFE(ref, &req->cancel_deps, next, next) {
> +        SCSIRequest *r = ref->req;
> +        assert(r->cancel_dep_count);
> +        r->cancel_dep_count--;
> +        if (!r->cancel_dep_count && req->bus->info->cancel_dep_complete) {
> +            req->bus->info->cancel_dep_complete(r);
> +        }
> +        QTAILQ_REMOVE(&req->cancel_deps, ref, next);
> +        scsi_req_unref(r);
> +        g_free(ref);
> +    }

I think there is one problem here.

scsi_req_cancel_async can actually be synchronous if you're unlucky
(because bdrv_aio_cancel_async can be synchronous too, for example in
the case of a linux-aio AIOCB).  So you could end up calling
cancel_dep_complete even if the caller intends to cancel more requests.

I think it's better to track the count in virtio-scsi instead.  You can
initialize it similar to bdrv_aio_multiwrite:

    /* Run the aio requests. */
    mcb->num_requests = num_reqs;
    for (i = 0; i < num_reqs; i++) {
        bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
                              reqs[i].nb_sectors, reqs[i].flags,
                              multiwrite_cb, mcb,
                              true);
    }

    return 0;

and decrement the count on every call to the notifier.

This is independent of the choice to make bdrv_aio_cancel_async
semantics stricter.  In the case of bdrv_aio_multiwrite, we know that
bdrv_co_aio_rw_vector is never synchronous, but the code is simply nicer
that way. :)

Paolo
Paolo Bonzini Sept. 18, 2014, 9:18 a.m. UTC | #2
Il 18/09/2014 04:36, Fam Zheng ha scritto:
> @@ -552,7 +552,7 @@ SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
>      SCSIBus *bus = scsi_bus_from_device(d);
>      BusState *qbus = BUS(bus);
>  
> -    req = g_malloc0(reqops->size);
> +    req = g_malloc0(reqops ? reqops->size : sizeof(SCSIRequest));

Let's just add a NotifierList to SCSIRequest instead, and pass a
Notifier to scsi_req_cancel_async.  There can be multiple aborts for the
same SCSIRequest, we can leave it to the HBA to create a Notifier for
each TMF.

The notifier can be embedded in a struct to track the request count, as
mentioned in the other message I just sent.

Paolo
diff mbox

Patch

diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 74172cc..47ad0b4 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -552,7 +552,7 @@  SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
     SCSIBus *bus = scsi_bus_from_device(d);
     BusState *qbus = BUS(bus);
 
-    req = g_malloc0(reqops->size);
+    req = g_malloc0(reqops ? reqops->size : sizeof(SCSIRequest));
     req->refcount = 1;
     req->bus = bus;
     req->dev = d;
@@ -564,6 +564,7 @@  SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
     req->ops = reqops;
     object_ref(OBJECT(d));
     object_ref(OBJECT(qbus->parent));
+    QTAILQ_INIT(&req->cancel_deps);
     trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
     return req;
 }
@@ -1598,7 +1599,7 @@  void scsi_req_unref(SCSIRequest *req)
         if (bus->info->free_request && req->hba_private) {
             bus->info->free_request(bus, req->hba_private);
         }
-        if (req->ops->free_req) {
+        if (req->ops && req->ops->free_req) {
             req->ops->free_req(req);
         }
         object_unref(OBJECT(req->dev));
@@ -1717,13 +1718,58 @@  void scsi_req_complete(SCSIRequest *req, int status)
 /* Called by the devices when the request is canceled. */
 void scsi_req_canceled(SCSIRequest *req)
 {
+    SCSIRequestReference *ref, *next;
     assert(req->io_canceled);
     if (req->bus->info->cancel) {
         req->bus->info->cancel(req);
     }
+    QTAILQ_FOREACH_SAFE(ref, &req->cancel_deps, next, next) {
+        SCSIRequest *r = ref->req;
+        assert(r->cancel_dep_count);
+        r->cancel_dep_count--;
+        if (!r->cancel_dep_count && req->bus->info->cancel_dep_complete) {
+            req->bus->info->cancel_dep_complete(r);
+        }
+        QTAILQ_REMOVE(&req->cancel_deps, ref, next);
+        scsi_req_unref(r);
+        g_free(ref);
+    }
     scsi_req_unref(req);
 }
 
+static void scsi_req_cancel_dep_add(SCSIRequest *req, SCSIRequest *tmf_req)
+{
+    SCSIRequestReference *ref = g_new0(SCSIRequestReference, 1);
+    ref->req = tmf_req;
+    tmf_req->cancel_dep_count++;
+    scsi_req_ref(tmf_req);
+    QTAILQ_INSERT_TAIL(&req->cancel_deps, ref, next);
+}
+
+/* Cancel @req asynchronously.
+ * @tmf_req is added to @req's cancellation dependency list, the bus will be
+ * notified with @tmf_req when all the requests it depends on are canceled.
+ *
+ * @red:     The request to cancel.
+ * @tmf_req: The tmf request which cancels @req.
+ * */
+void scsi_req_cancel_async(SCSIRequest *req, SCSIRequest *tmf_req)
+{
+    trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
+    if (tmf_req) {
+        scsi_req_cancel_dep_add(req, tmf_req);
+    }
+    if (req->io_canceled) {
+        return;
+    }
+    scsi_req_ref(req);
+    scsi_req_dequeue(req);
+    req->io_canceled = true;
+    if (req->aiocb) {
+        bdrv_aio_cancel_async(req->aiocb);
+    }
+}
+
 void scsi_req_cancel(SCSIRequest *req)
 {
     trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index 25a5617..d0e0fb4 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -42,6 +42,13 @@  struct SCSICommand {
     enum SCSIXferMode mode;
 };
 
+struct SCSIRequest;
+
+typedef struct SCSIRequestReference {
+    struct SCSIRequest *req;
+    QTAILQ_ENTRY(SCSIRequestReference) next;
+} SCSIRequestReference;
+
 struct SCSIRequest {
     SCSIBus           *bus;
     SCSIDevice        *dev;
@@ -62,6 +69,11 @@  struct SCSIRequest {
     bool retry;
     void *hba_private;
     QTAILQ_ENTRY(SCSIRequest) next;
+
+    /* List of requests that depend on this one */
+    QTAILQ_HEAD(, SCSIRequestReference) cancel_deps;
+    /* The number of requests this one depends on */
+    int cancel_dep_count;
 };
 
 #define TYPE_SCSI_DEVICE "scsi-device"
@@ -137,6 +149,7 @@  struct SCSIBusInfo {
     void (*transfer_data)(SCSIRequest *req, uint32_t arg);
     void (*complete)(SCSIRequest *req, uint32_t arg, size_t resid);
     void (*cancel)(SCSIRequest *req);
+    void (*cancel_dep_complete)(SCSIRequest *req);
     void (*hotplug)(SCSIBus *bus, SCSIDevice *dev);
     void (*hot_unplug)(SCSIBus *bus, SCSIDevice *dev);
     void (*change)(SCSIBus *bus, SCSIDevice *dev, SCSISense sense);
@@ -260,6 +273,7 @@  int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len);
 void scsi_req_abort(SCSIRequest *req, int status);
 void scsi_req_canceled(SCSIRequest *req);
 void scsi_req_cancel(SCSIRequest *req);
+void scsi_req_cancel_async(SCSIRequest *req, SCSIRequest *tmf_req);
 void scsi_req_retry(SCSIRequest *req);
 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense);
 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense);