diff mbox

[RFC,5/6] block: core copy-on-read logic

Message ID 1318866452-30026-6-git-send-email-stefanha@linux.vnet.ibm.com
State New
Headers show

Commit Message

Stefan Hajnoczi Oct. 17, 2011, 3:47 p.m. UTC
Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
---
 block.c      |   69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 trace-events |    1 +
 2 files changed, 70 insertions(+), 0 deletions(-)

Comments

Marcelo Tosatti Oct. 18, 2011, 2 p.m. UTC | #1
On Mon, Oct 17, 2011 at 04:47:31PM +0100, Stefan Hajnoczi wrote:
> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> ---
>  block.c      |   69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  trace-events |    1 +
>  2 files changed, 70 insertions(+), 0 deletions(-)
> 
> diff --git a/block.c b/block.c
> index 0c22741..2aec6b4 100644
> --- a/block.c
> +++ b/block.c
> @@ -1409,6 +1409,55 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
>      return 0;
>  }
>  
> +static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
> +        int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
> +{
> +    void *bounce_buffer;
> +    struct iovec iov;
> +    QEMUIOVector bounce_qiov;
> +    int64_t cluster_sector_num;
> +    int cluster_nb_sectors;
> +    size_t skip_bytes;
> +    int ret;
> +
> +    /* Cover entire cluster so no additional backing file I/O is required when
> +     * allocating cluster in the image file.
> +     */
> +    round_to_clusters(bs, sector_num, nb_sectors,
> +                      &cluster_sector_num, &cluster_nb_sectors);
> +
> +    trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
> +                                cluster_sector_num, cluster_nb_sectors);
> +
> +    iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
> +    iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
> +    qemu_iovec_init_external(&bounce_qiov, &iov, 1);
> +
> +    ret = bs->drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
> +                                 &bounce_qiov);
> +    if (ret < 0) {
> +        goto err;
> +    }
> +
> +    ret = bs->drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
> +                                  &bounce_qiov);
> +    if (ret < 0) {
> +        /* It might be okay to ignore write errors for guest requests.  If this
> +         * is a deliberate copy-on-read then we don't want to ignore the error.
> +         * Simply report it in all cases.
> +         */
> +        goto err;
> +    }
> +
> +    skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
> +    qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
> +                           nb_sectors * BDRV_SECTOR_SIZE);
> +
> +err:
> +    qemu_vfree(bounce_buffer);
> +    return ret;
> +}
> +
>  /*
>   * Handle a read request in coroutine context
>   */
> @@ -1431,7 +1480,27 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
>      }
>  
>      req = tracked_request_add(bs, sector_num, nb_sectors, false);

The tracked request should include cluster round info?
Marcelo Tosatti Oct. 18, 2011, 2:03 p.m. UTC | #2
On Mon, Oct 17, 2011 at 04:47:31PM +0100, Stefan Hajnoczi wrote:
> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> ---
>  block.c      |   69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  trace-events |    1 +
>  2 files changed, 70 insertions(+), 0 deletions(-)
> 
> diff --git a/block.c b/block.c
> index 0c22741..2aec6b4 100644
> --- a/block.c
> +++ b/block.c
> @@ -1409,6 +1409,55 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
>      return 0;
>  }
>  
> +static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
> +        int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
> +{
> +    void *bounce_buffer;
> +    struct iovec iov;
> +    QEMUIOVector bounce_qiov;
> +    int64_t cluster_sector_num;
> +    int cluster_nb_sectors;
> +    size_t skip_bytes;
> +    int ret;
> +
> +    /* Cover entire cluster so no additional backing file I/O is required when
> +     * allocating cluster in the image file.
> +     */
> +    round_to_clusters(bs, sector_num, nb_sectors,
> +                      &cluster_sector_num, &cluster_nb_sectors);
> +
> +    trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
> +                                cluster_sector_num, cluster_nb_sectors);
> +
> +    iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
> +    iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
> +    qemu_iovec_init_external(&bounce_qiov, &iov, 1);
> +
> +    ret = bs->drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
> +                                 &bounce_qiov);
> +    if (ret < 0) {
> +        goto err;
> +    }
> +
> +    ret = bs->drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
> +                                  &bounce_qiov);
> +    if (ret < 0) {
> +        /* It might be okay to ignore write errors for guest requests.  If this
> +         * is a deliberate copy-on-read then we don't want to ignore the error.
> +         * Simply report it in all cases.
> +         */
> +        goto err;
> +    }
> +
> +    skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
> +    qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
> +                           nb_sectors * BDRV_SECTOR_SIZE);
> +
> +err:
> +    qemu_vfree(bounce_buffer);
> +    return ret;
> +}
> +
>  /*
>   * Handle a read request in coroutine context
>   */
> @@ -1431,7 +1480,27 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
>      }
>  
>      req = tracked_request_add(bs, sector_num, nb_sectors, false);
> +
> +    if (bs->copy_on_read) {
> +        int pnum;
> +
> +        /* TODO it is not safe to call bdrv_is_allocated() in coroutine context
> +         * because it's a synchronous interface.  We probably want a
> +         * bdrv_co_is_allocated(). */
> +        ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
> +        if (ret < 0) {
> +            goto out;
> +        }

This lacks shared base image support, BTW (as in copy should be
performed only if cluster not in destination chain). Could be added
later.
Stefan Hajnoczi Oct. 20, 2011, 5:40 p.m. UTC | #3
On Tue, Oct 18, 2011 at 7:00 AM, Marcelo Tosatti <mtosatti@redhat.com> wrote:
> On Mon, Oct 17, 2011 at 04:47:31PM +0100, Stefan Hajnoczi wrote:
>> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
>> ---
>>  block.c      |   69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>>  trace-events |    1 +
>>  2 files changed, 70 insertions(+), 0 deletions(-)
>>
>> diff --git a/block.c b/block.c
>> index 0c22741..2aec6b4 100644
>> --- a/block.c
>> +++ b/block.c
>> @@ -1409,6 +1409,55 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
>>      return 0;
>>  }
>>
>> +static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
>> +        int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
>> +{
>> +    void *bounce_buffer;
>> +    struct iovec iov;
>> +    QEMUIOVector bounce_qiov;
>> +    int64_t cluster_sector_num;
>> +    int cluster_nb_sectors;
>> +    size_t skip_bytes;
>> +    int ret;
>> +
>> +    /* Cover entire cluster so no additional backing file I/O is required when
>> +     * allocating cluster in the image file.
>> +     */
>> +    round_to_clusters(bs, sector_num, nb_sectors,
>> +                      &cluster_sector_num, &cluster_nb_sectors);
>> +
>> +    trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
>> +                                cluster_sector_num, cluster_nb_sectors);
>> +
>> +    iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
>> +    iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
>> +    qemu_iovec_init_external(&bounce_qiov, &iov, 1);
>> +
>> +    ret = bs->drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
>> +                                 &bounce_qiov);
>> +    if (ret < 0) {
>> +        goto err;
>> +    }
>> +
>> +    ret = bs->drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
>> +                                  &bounce_qiov);
>> +    if (ret < 0) {
>> +        /* It might be okay to ignore write errors for guest requests.  If this
>> +         * is a deliberate copy-on-read then we don't want to ignore the error.
>> +         * Simply report it in all cases.
>> +         */
>> +        goto err;
>> +    }
>> +
>> +    skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
>> +    qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
>> +                           nb_sectors * BDRV_SECTOR_SIZE);
>> +
>> +err:
>> +    qemu_vfree(bounce_buffer);
>> +    return ret;
>> +}
>> +
>>  /*
>>   * Handle a read request in coroutine context
>>   */
>> @@ -1431,7 +1480,27 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
>>      }
>>
>>      req = tracked_request_add(bs, sector_num, nb_sectors, false);
>
> The tracked request should include cluster round info?

When checking A and B for overlap, only one of them needs to be
rounded in order for overlap detection to be correct.  Therefore we
can store the original request [start, length) in tracked_requests and
only round the new request.

Stefan
Kevin Wolf Nov. 3, 2011, 2:30 p.m. UTC | #4
Am 17.10.2011 17:47, schrieb Stefan Hajnoczi:
> Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> ---
>  block.c      |   69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  trace-events |    1 +
>  2 files changed, 70 insertions(+), 0 deletions(-)
> 
> diff --git a/block.c b/block.c
> index 0c22741..2aec6b4 100644
> --- a/block.c
> +++ b/block.c
> @@ -1409,6 +1409,55 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
>      return 0;
>  }
>  
> +static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
> +        int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
> +{
> +    void *bounce_buffer;

I think the bounce buffer deserves a comment. It may not be obvious for
everyone why it's needed.

> +    struct iovec iov;
> +    QEMUIOVector bounce_qiov;
> +    int64_t cluster_sector_num;
> +    int cluster_nb_sectors;
> +    size_t skip_bytes;
> +    int ret;
> +
> +    /* Cover entire cluster so no additional backing file I/O is required when
> +     * allocating cluster in the image file.
> +     */
> +    round_to_clusters(bs, sector_num, nb_sectors,
> +                      &cluster_sector_num, &cluster_nb_sectors);
> +
> +    trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
> +                                cluster_sector_num, cluster_nb_sectors);
> +
> +    iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
> +    iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
> +    qemu_iovec_init_external(&bounce_qiov, &iov, 1);
> +
> +    ret = bs->drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
> +                                 &bounce_qiov);
> +    if (ret < 0) {
> +        goto err;
> +    }
> +
> +    ret = bs->drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
> +                                  &bounce_qiov);
> +    if (ret < 0) {
> +        /* It might be okay to ignore write errors for guest requests.  If this
> +         * is a deliberate copy-on-read then we don't want to ignore the error.
> +         * Simply report it in all cases.
> +         */
> +        goto err;
> +    }
> +
> +    skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
> +    qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
> +                           nb_sectors * BDRV_SECTOR_SIZE);
> +
> +err:
> +    qemu_vfree(bounce_buffer);
> +    return ret;
> +}
> +
>  /*
>   * Handle a read request in coroutine context
>   */
> @@ -1431,7 +1480,27 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
>      }
>  
>      req = tracked_request_add(bs, sector_num, nb_sectors, false);
> +
> +    if (bs->copy_on_read) {
> +        int pnum;
> +
> +        /* TODO it is not safe to call bdrv_is_allocated() in coroutine context
> +         * because it's a synchronous interface.  We probably want a
> +         * bdrv_co_is_allocated(). */
> +        ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);

I think I already said it in a reply to another patch, but for the
record: We need bdrv_co_is_allocated() before this can be merged.

Kevin
diff mbox

Patch

diff --git a/block.c b/block.c
index 0c22741..2aec6b4 100644
--- a/block.c
+++ b/block.c
@@ -1409,6 +1409,55 @@  int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
     return 0;
 }
 
+static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+{
+    void *bounce_buffer;
+    struct iovec iov;
+    QEMUIOVector bounce_qiov;
+    int64_t cluster_sector_num;
+    int cluster_nb_sectors;
+    size_t skip_bytes;
+    int ret;
+
+    /* Cover entire cluster so no additional backing file I/O is required when
+     * allocating cluster in the image file.
+     */
+    round_to_clusters(bs, sector_num, nb_sectors,
+                      &cluster_sector_num, &cluster_nb_sectors);
+
+    trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
+                                cluster_sector_num, cluster_nb_sectors);
+
+    iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
+    iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
+    qemu_iovec_init_external(&bounce_qiov, &iov, 1);
+
+    ret = bs->drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
+                                 &bounce_qiov);
+    if (ret < 0) {
+        goto err;
+    }
+
+    ret = bs->drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
+                                  &bounce_qiov);
+    if (ret < 0) {
+        /* It might be okay to ignore write errors for guest requests.  If this
+         * is a deliberate copy-on-read then we don't want to ignore the error.
+         * Simply report it in all cases.
+         */
+        goto err;
+    }
+
+    skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
+    qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
+                           nb_sectors * BDRV_SECTOR_SIZE);
+
+err:
+    qemu_vfree(bounce_buffer);
+    return ret;
+}
+
 /*
  * Handle a read request in coroutine context
  */
@@ -1431,7 +1480,27 @@  static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
     }
 
     req = tracked_request_add(bs, sector_num, nb_sectors, false);
+
+    if (bs->copy_on_read) {
+        int pnum;
+
+        /* TODO it is not safe to call bdrv_is_allocated() in coroutine context
+         * because it's a synchronous interface.  We probably want a
+         * bdrv_co_is_allocated(). */
+        ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
+        if (ret < 0) {
+            goto out;
+        }
+
+        if (!ret || pnum != nb_sectors) {
+            ret = bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, qiov);
+            goto out;
+        }
+    }
+
     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
+
+out:
     tracked_request_remove(req);
     return ret;
 }
diff --git a/trace-events b/trace-events
index 63d8c8e..7e52ed6 100644
--- a/trace-events
+++ b/trace-events
@@ -68,6 +68,7 @@  bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
 bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
 bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
 bdrv_co_io_em(void *bs, int64_t sector_num, int nb_sectors, int is_write, void *acb) "bs %p sector_num %"PRId64" nb_sectors %d is_write %d acb %p"
+bdrv_co_copy_on_readv(void *bs, int64_t sector_num, int nb_sectors, int64_t cluster_sector_num, int cluster_nb_sectors) "bs %p sector_num %"PRId64" nb_sectors %d cluster_sector_num %"PRId64" cluster_nb_sectors %d"
 
 # hw/virtio-blk.c
 virtio_blk_req_complete(void *req, int status) "req %p status %d"