diff mbox

[RFC,3/6] block: wait for overlapping requests

Message ID 1318866452-30026-4-git-send-email-stefanha@linux.vnet.ibm.com
State New
Headers show

Commit Message

Stefan Hajnoczi Oct. 17, 2011, 3:47 p.m. UTC
When copy-on-read is enabled it is necessary to wait for overlapping
requests before issuing new requests.  This prevents races between the
copy-on-read and a write request.

Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
 block.c |   39 +++++++++++++++++++++++++++++++++++++++
 1 files changed, 39 insertions(+), 0 deletions(-)

Comments

Marcelo Tosatti Oct. 18, 2011, 1:48 p.m. UTC | #1
On Mon, Oct 17, 2011 at 04:47:29PM +0100, Stefan Hajnoczi wrote:
> When copy-on-read is enabled it is necessary to wait for overlapping
> requests before issuing new requests.  This prevents races between the
> copy-on-read and a write request.
> 
> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> ---
>  block.c |   39 +++++++++++++++++++++++++++++++++++++++
>  1 files changed, 39 insertions(+), 0 deletions(-)
> 
> diff --git a/block.c b/block.c
> index e624ac3..cc3202c 100644
> --- a/block.c
> +++ b/block.c
> @@ -1001,6 +1001,7 @@ struct BdrvTrackedRequest {
>      int nb_sectors;
>      bool is_write;
>      QLIST_ENTRY(BdrvTrackedRequest) list;
> +    CoQueue wait_queue; /* coroutines blocked on this request */
>  };
>  
>  /**
> @@ -1014,6 +1015,12 @@ static void tracked_request_remove(BdrvTrackedRequest *req)
>  {
>      if (req) {
>          QLIST_REMOVE(req, list);
> +
> +        /* Wake up all coroutines blocked on this request */
> +        while (qemu_co_queue_next(&req->wait_queue)) {
> +            /* Do nothing */
> +        }
> +
>          g_free(req);
>      }
>  }
> @@ -1038,12 +1045,36 @@ static BdrvTrackedRequest *tracked_request_add(BlockDriverState *bs,
>      req->sector_num = sector_num;
>      req->nb_sectors = nb_sectors;
>      req->is_write = is_write;
> +    qemu_co_queue_init(&req->wait_queue);
>  
>      QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
>  
>      return req;
>  }
>  
> +static bool tracked_request_overlaps(BdrvTrackedRequest *req,
> +                                     int64_t sector_num, int nb_sectors) {
> +    return false; /* not yet implemented */
> +}
> +
> +static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
> +        int64_t sector_num, int nb_sectors)
> +{
> +    BdrvTrackedRequest *req;
> +    bool retry;
> +
> +    do {
> +        retry = false;
> +        QLIST_FOREACH(req, &bs->tracked_requests, list) {
> +            if (tracked_request_overlaps(req, sector_num, nb_sectors)) {
> +                qemu_co_queue_wait(&req->wait_queue);
> +                retry = true;

What prevents overlapping requests (from waiter criteria) to be inserted
to the queue while there are waiters again?

That is, why is it not possible for a waiter to wait indefinetely?
Stefan Hajnoczi Oct. 20, 2011, 5:34 p.m. UTC | #2
On Tue, Oct 18, 2011 at 6:48 AM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> On Mon, Oct 17, 2011 at 04:47:29PM +0100, Stefan Hajnoczi wrote:
>> When copy-on-read is enabled it is necessary to wait for overlapping
>> requests before issuing new requests.  This prevents races between the
>> copy-on-read and a write request.
>>
>> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
>> ---
>>  block.c |   39 +++++++++++++++++++++++++++++++++++++++
>>  1 files changed, 39 insertions(+), 0 deletions(-)
>>
>> diff --git a/block.c b/block.c
>> index e624ac3..cc3202c 100644
>> --- a/block.c
>> +++ b/block.c
>> @@ -1001,6 +1001,7 @@ struct BdrvTrackedRequest {
>>      int nb_sectors;
>>      bool is_write;
>>      QLIST_ENTRY(BdrvTrackedRequest) list;
>> +    CoQueue wait_queue; /* coroutines blocked on this request */
>>  };
>>
>>  /**
>> @@ -1014,6 +1015,12 @@ static void tracked_request_remove(BdrvTrackedRequest *req)
>>  {
>>      if (req) {
>>          QLIST_REMOVE(req, list);
>> +
>> +        /* Wake up all coroutines blocked on this request */
>> +        while (qemu_co_queue_next(&req->wait_queue)) {
>> +            /* Do nothing */
>> +        }
>> +
>>          g_free(req);
>>      }
>>  }
>> @@ -1038,12 +1045,36 @@ static BdrvTrackedRequest *tracked_request_add(BlockDriverState *bs,
>>      req->sector_num = sector_num;
>>      req->nb_sectors = nb_sectors;
>>      req->is_write = is_write;
>> +    qemu_co_queue_init(&req->wait_queue);
>>
>>      QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
>>
>>      return req;
>>  }
>>
>> +static bool tracked_request_overlaps(BdrvTrackedRequest *req,
>> +                                     int64_t sector_num, int nb_sectors) {
>> +    return false; /* not yet implemented */
>> +}
>> +
>> +static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
>> +        int64_t sector_num, int nb_sectors)
>> +{
>> +    BdrvTrackedRequest *req;
>> +    bool retry;
>> +
>> +    do {
>> +        retry = false;
>> +        QLIST_FOREACH(req, &bs->tracked_requests, list) {
>> +            if (tracked_request_overlaps(req, sector_num, nb_sectors)) {
>> +                qemu_co_queue_wait(&req->wait_queue);
>> +                retry = true;
>
> What prevents overlapping requests (from waiter criteria) to be inserted
> to the queue while there are waiters again?
>
> That is, why is it not possible for a waiter to wait indefinetely?

CoQueue is FIFO so overlapping requests are processed in order and
will not wait indefinitely.

Stefan
Kevin Wolf Nov. 3, 2011, 2:17 p.m. UTC | #3
Am 17.10.2011 17:47, schrieb Stefan Hajnoczi:
> When copy-on-read is enabled it is necessary to wait for overlapping
> requests before issuing new requests.  This prevents races between the
> copy-on-read and a write request.
> 
> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>

This doesn't only order guest request against COR requests, but also
makes guest requests wait on each other. It's probably not a big
problem, but if we had to optimise performance with COR later, this is
something to remember.

Doing an optimisation that only requests of different type are ordered
wouldn't be too hard, though maybe avoiding starvation of the other type
could get a bit harder.

Let's leave it as it is for now.

Kevin
diff mbox

Patch

diff --git a/block.c b/block.c
index e624ac3..cc3202c 100644
--- a/block.c
+++ b/block.c
@@ -1001,6 +1001,7 @@  struct BdrvTrackedRequest {
     int nb_sectors;
     bool is_write;
     QLIST_ENTRY(BdrvTrackedRequest) list;
+    CoQueue wait_queue; /* coroutines blocked on this request */
 };
 
 /**
@@ -1014,6 +1015,12 @@  static void tracked_request_remove(BdrvTrackedRequest *req)
 {
     if (req) {
         QLIST_REMOVE(req, list);
+
+        /* Wake up all coroutines blocked on this request */
+        while (qemu_co_queue_next(&req->wait_queue)) {
+            /* Do nothing */
+        }
+
         g_free(req);
     }
 }
@@ -1038,12 +1045,36 @@  static BdrvTrackedRequest *tracked_request_add(BlockDriverState *bs,
     req->sector_num = sector_num;
     req->nb_sectors = nb_sectors;
     req->is_write = is_write;
+    qemu_co_queue_init(&req->wait_queue);
 
     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
 
     return req;
 }
 
+static bool tracked_request_overlaps(BdrvTrackedRequest *req,
+                                     int64_t sector_num, int nb_sectors) {
+    return false; /* not yet implemented */
+}
+
+static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors)
+{
+    BdrvTrackedRequest *req;
+    bool retry;
+
+    do {
+        retry = false;
+        QLIST_FOREACH(req, &bs->tracked_requests, list) {
+            if (tracked_request_overlaps(req, sector_num, nb_sectors)) {
+                qemu_co_queue_wait(&req->wait_queue);
+                retry = true;
+                break;
+            }
+        }
+    } while (retry);
+}
+
 /**
  * Enable tracking of incoming requests
  *
@@ -1360,6 +1391,10 @@  static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
         return -EIO;
     }
 
+    if (bs->copy_on_read) {
+        wait_for_overlapping_requests(bs, sector_num, nb_sectors);
+    }
+
     req = tracked_request_add(bs, sector_num, nb_sectors, false);
     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
     tracked_request_remove(req);
@@ -1394,6 +1429,10 @@  static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
         return -EIO;
     }
 
+    if (bs->copy_on_read) {
+        wait_for_overlapping_requests(bs, sector_num, nb_sectors);
+    }
+
     req = tracked_request_add(bs, sector_num, nb_sectors, true);
 
     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);