diff mbox series

[v7,07/33] block/block-copy: introduce block_copy_set_copy_opts()

Message ID 20210804093813.20688-8-vsementsov@virtuozzo.com
State New
Headers show
Series block: publish backup-top filter | expand

Commit Message

Vladimir Sementsov-Ogievskiy Aug. 4, 2021, 9:37 a.m. UTC
We'll need a possibility to set compress and use_copy_range options
after initialization of the state. So make corresponding part of
block_copy_state_new() separate and public.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 include/block/block-copy.h |  2 ++
 block/block-copy.c         | 66 +++++++++++++++++++++-----------------
 2 files changed, 39 insertions(+), 29 deletions(-)

Comments

Hanna Czenczek Aug. 10, 2021, 2:55 p.m. UTC | #1
On 04.08.21 11:37, Vladimir Sementsov-Ogievskiy wrote:
> We'll need a possibility to set compress and use_copy_range options
> after initialization of the state. So make corresponding part of
> block_copy_state_new() separate and public.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
>   include/block/block-copy.h |  2 ++
>   block/block-copy.c         | 66 +++++++++++++++++++++-----------------
>   2 files changed, 39 insertions(+), 29 deletions(-)
>
> diff --git a/include/block/block-copy.h b/include/block/block-copy.h
> index 734389d32a..f0ba7bc828 100644
> --- a/include/block/block-copy.h
> +++ b/include/block/block-copy.h
> @@ -28,6 +28,8 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>                                        int64_t cluster_size, bool use_copy_range,
>                                        bool compress, Error **errp);
>   
> +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
> +                              bool compress);
>   void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
>   
>   void block_copy_state_free(BlockCopyState *s);
> diff --git a/block/block-copy.c b/block/block-copy.c
> index 58b4345a5a..84c29fb233 100644
> --- a/block/block-copy.c
> +++ b/block/block-copy.c
> @@ -315,21 +315,11 @@ static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
>                                        target->bs->bl.max_transfer));
>   }
>   
> -BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
> -                                     int64_t cluster_size, bool use_copy_range,
> -                                     bool compress, Error **errp)
> +/* Function should be called prior any actual copy request */

Given this is something the caller should know, I’d prefer putting this 
into the block-copy.h header.

However, I realize we have a wild mix of this in qemu and often do put 
such information into the C source file, so...

> +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
> +                              bool compress)
>   {
> -    BlockCopyState *s;
> -    BdrvDirtyBitmap *copy_bitmap;
>       bool is_fleecing;
> -
> -    copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
> -                                           errp);
> -    if (!copy_bitmap) {
> -        return NULL;
> -    }
> -    bdrv_disable_dirty_bitmap(copy_bitmap);
> -
>       /*
>        * If source is in backing chain of target assume that target is going to be
>        * used for "image fleecing", i.e. it should represent a kind of snapshot of
> @@ -344,24 +334,12 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>        * For more information see commit f8d59dfb40bb and test
>        * tests/qemu-iotests/222
>        */
> -    is_fleecing = bdrv_chain_contains(target->bs, source->bs);
> +    is_fleecing = bdrv_chain_contains(s->target->bs, s->source->bs);
>   
> -    s = g_new(BlockCopyState, 1);
> -    *s = (BlockCopyState) {
> -        .source = source,
> -        .target = target,
> -        .copy_bitmap = copy_bitmap,
> -        .cluster_size = cluster_size,
> -        .len = bdrv_dirty_bitmap_size(copy_bitmap),
> -        .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
> -            (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
> -        .mem = shres_create(BLOCK_COPY_MAX_MEM),
> -        .max_transfer = QEMU_ALIGN_DOWN(
> -                                    block_copy_max_transfer(source, target),
> -                                    cluster_size),
> -    };
> +    s->write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
> +        (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);

Shouldn’t we keep the is_fleecing check in block_copy_state_new()? We 
must perform it at least once, but there is no benefit in repeating it 
on every block_copy_set_copy_opts() call, right?

Hanna
Vladimir Sementsov-Ogievskiy Aug. 23, 2021, 12:05 p.m. UTC | #2
10.08.2021 17:55, Hanna Reitz wrote:
> On 04.08.21 11:37, Vladimir Sementsov-Ogievskiy wrote:
>> We'll need a possibility to set compress and use_copy_range options
>> after initialization of the state. So make corresponding part of
>> block_copy_state_new() separate and public.
>>
>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>> ---
>>   include/block/block-copy.h |  2 ++
>>   block/block-copy.c         | 66 +++++++++++++++++++++-----------------
>>   2 files changed, 39 insertions(+), 29 deletions(-)
>>
>> diff --git a/include/block/block-copy.h b/include/block/block-copy.h
>> index 734389d32a..f0ba7bc828 100644
>> --- a/include/block/block-copy.h
>> +++ b/include/block/block-copy.h
>> @@ -28,6 +28,8 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>>                                        int64_t cluster_size, bool use_copy_range,
>>                                        bool compress, Error **errp);
>> +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
>> +                              bool compress);
>>   void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
>>   void block_copy_state_free(BlockCopyState *s);
>> diff --git a/block/block-copy.c b/block/block-copy.c
>> index 58b4345a5a..84c29fb233 100644
>> --- a/block/block-copy.c
>> +++ b/block/block-copy.c
>> @@ -315,21 +315,11 @@ static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
>>                                        target->bs->bl.max_transfer));
>>   }
>> -BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>> -                                     int64_t cluster_size, bool use_copy_range,
>> -                                     bool compress, Error **errp)
>> +/* Function should be called prior any actual copy request */
> 
> Given this is something the caller should know, I’d prefer putting this into the block-copy.h header.
> 
> However, I realize we have a wild mix of this in qemu and often do put such information into the C source file, so...
> 
>> +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
>> +                              bool compress)
>>   {
>> -    BlockCopyState *s;
>> -    BdrvDirtyBitmap *copy_bitmap;
>>       bool is_fleecing;
>> -
>> -    copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
>> -                                           errp);
>> -    if (!copy_bitmap) {
>> -        return NULL;
>> -    }
>> -    bdrv_disable_dirty_bitmap(copy_bitmap);
>> -
>>       /*
>>        * If source is in backing chain of target assume that target is going to be
>>        * used for "image fleecing", i.e. it should represent a kind of snapshot of
>> @@ -344,24 +334,12 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>>        * For more information see commit f8d59dfb40bb and test
>>        * tests/qemu-iotests/222
>>        */
>> -    is_fleecing = bdrv_chain_contains(target->bs, source->bs);
>> +    is_fleecing = bdrv_chain_contains(s->target->bs, s->source->bs);
>> -    s = g_new(BlockCopyState, 1);
>> -    *s = (BlockCopyState) {
>> -        .source = source,
>> -        .target = target,
>> -        .copy_bitmap = copy_bitmap,
>> -        .cluster_size = cluster_size,
>> -        .len = bdrv_dirty_bitmap_size(copy_bitmap),
>> -        .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
>> -            (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
>> -        .mem = shres_create(BLOCK_COPY_MAX_MEM),
>> -        .max_transfer = QEMU_ALIGN_DOWN(
>> -                                    block_copy_max_transfer(source, target),
>> -                                    cluster_size),
>> -    };
>> +    s->write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
>> +        (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
> 
> Shouldn’t we keep the is_fleecing check in block_copy_state_new()? We must perform it at least once, but there is no benefit in repeating it on every block_copy_set_copy_opts() call, right?
> 

I think, it may help if user calls block_copy_set_copy_opts() after graph change
Vladimir Sementsov-Ogievskiy Aug. 23, 2021, 5:40 p.m. UTC | #3
23.08.2021 15:05, Vladimir Sementsov-Ogievskiy wrote:
> 10.08.2021 17:55, Hanna Reitz wrote:
>> On 04.08.21 11:37, Vladimir Sementsov-Ogievskiy wrote:
>>> We'll need a possibility to set compress and use_copy_range options
>>> after initialization of the state. So make corresponding part of
>>> block_copy_state_new() separate and public.
>>>
>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>>> ---
>>>   include/block/block-copy.h |  2 ++
>>>   block/block-copy.c         | 66 +++++++++++++++++++++-----------------
>>>   2 files changed, 39 insertions(+), 29 deletions(-)
>>>
>>> diff --git a/include/block/block-copy.h b/include/block/block-copy.h
>>> index 734389d32a..f0ba7bc828 100644
>>> --- a/include/block/block-copy.h
>>> +++ b/include/block/block-copy.h
>>> @@ -28,6 +28,8 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>>>                                        int64_t cluster_size, bool use_copy_range,
>>>                                        bool compress, Error **errp);
>>> +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
>>> +                              bool compress);
>>>   void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
>>>   void block_copy_state_free(BlockCopyState *s);
>>> diff --git a/block/block-copy.c b/block/block-copy.c
>>> index 58b4345a5a..84c29fb233 100644
>>> --- a/block/block-copy.c
>>> +++ b/block/block-copy.c
>>> @@ -315,21 +315,11 @@ static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
>>>                                        target->bs->bl.max_transfer));
>>>   }
>>> -BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>>> -                                     int64_t cluster_size, bool use_copy_range,
>>> -                                     bool compress, Error **errp)
>>> +/* Function should be called prior any actual copy request */
>>
>> Given this is something the caller should know, I’d prefer putting this into the block-copy.h header.
>>
>> However, I realize we have a wild mix of this in qemu and often do put such information into the C source file, so...
>>
>>> +void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
>>> +                              bool compress)
>>>   {
>>> -    BlockCopyState *s;
>>> -    BdrvDirtyBitmap *copy_bitmap;
>>>       bool is_fleecing;
>>> -
>>> -    copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
>>> -                                           errp);
>>> -    if (!copy_bitmap) {
>>> -        return NULL;
>>> -    }
>>> -    bdrv_disable_dirty_bitmap(copy_bitmap);
>>> -
>>>       /*
>>>        * If source is in backing chain of target assume that target is going to be
>>>        * used for "image fleecing", i.e. it should represent a kind of snapshot of
>>> @@ -344,24 +334,12 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>>>        * For more information see commit f8d59dfb40bb and test
>>>        * tests/qemu-iotests/222
>>>        */
>>> -    is_fleecing = bdrv_chain_contains(target->bs, source->bs);
>>> +    is_fleecing = bdrv_chain_contains(s->target->bs, s->source->bs);
>>> -    s = g_new(BlockCopyState, 1);
>>> -    *s = (BlockCopyState) {
>>> -        .source = source,
>>> -        .target = target,
>>> -        .copy_bitmap = copy_bitmap,
>>> -        .cluster_size = cluster_size,
>>> -        .len = bdrv_dirty_bitmap_size(copy_bitmap),
>>> -        .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
>>> -            (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
>>> -        .mem = shres_create(BLOCK_COPY_MAX_MEM),
>>> -        .max_transfer = QEMU_ALIGN_DOWN(
>>> -                                    block_copy_max_transfer(source, target),
>>> -                                    cluster_size),
>>> -    };
>>> +    s->write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
>>> +        (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
>>
>> Shouldn’t we keep the is_fleecing check in block_copy_state_new()? We must perform it at least once, but there is no benefit in repeating it on every block_copy_set_copy_opts() call, right?
>>
> 
> I think, it may help if user calls block_copy_set_copy_opts() after graph change
> 
> 

On the other hand, nobody actually call this function after generic graph change. And intended usage is start backup when source is already backing child of target.. Will change it, at least for code be more obvious and not raise questions.
diff mbox series

Patch

diff --git a/include/block/block-copy.h b/include/block/block-copy.h
index 734389d32a..f0ba7bc828 100644
--- a/include/block/block-copy.h
+++ b/include/block/block-copy.h
@@ -28,6 +28,8 @@  BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
                                      int64_t cluster_size, bool use_copy_range,
                                      bool compress, Error **errp);
 
+void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
+                              bool compress);
 void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
 
 void block_copy_state_free(BlockCopyState *s);
diff --git a/block/block-copy.c b/block/block-copy.c
index 58b4345a5a..84c29fb233 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -315,21 +315,11 @@  static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
                                      target->bs->bl.max_transfer));
 }
 
-BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
-                                     int64_t cluster_size, bool use_copy_range,
-                                     bool compress, Error **errp)
+/* Function should be called prior any actual copy request */
+void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
+                              bool compress)
 {
-    BlockCopyState *s;
-    BdrvDirtyBitmap *copy_bitmap;
     bool is_fleecing;
-
-    copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
-                                           errp);
-    if (!copy_bitmap) {
-        return NULL;
-    }
-    bdrv_disable_dirty_bitmap(copy_bitmap);
-
     /*
      * If source is in backing chain of target assume that target is going to be
      * used for "image fleecing", i.e. it should represent a kind of snapshot of
@@ -344,24 +334,12 @@  BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
      * For more information see commit f8d59dfb40bb and test
      * tests/qemu-iotests/222
      */
-    is_fleecing = bdrv_chain_contains(target->bs, source->bs);
+    is_fleecing = bdrv_chain_contains(s->target->bs, s->source->bs);
 
-    s = g_new(BlockCopyState, 1);
-    *s = (BlockCopyState) {
-        .source = source,
-        .target = target,
-        .copy_bitmap = copy_bitmap,
-        .cluster_size = cluster_size,
-        .len = bdrv_dirty_bitmap_size(copy_bitmap),
-        .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
-            (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
-        .mem = shres_create(BLOCK_COPY_MAX_MEM),
-        .max_transfer = QEMU_ALIGN_DOWN(
-                                    block_copy_max_transfer(source, target),
-                                    cluster_size),
-    };
+    s->write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
+        (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
 
-    if (s->max_transfer < cluster_size) {
+    if (s->max_transfer < s->cluster_size) {
         /*
          * copy_range does not respect max_transfer. We don't want to bother
          * with requests smaller than block-copy cluster size, so fallback to
@@ -379,6 +357,36 @@  BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
          */
         s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
     }
+}
+
+BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
+                                     int64_t cluster_size, bool use_copy_range,
+                                     bool compress, Error **errp)
+{
+    BlockCopyState *s;
+    BdrvDirtyBitmap *copy_bitmap;
+
+    copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
+                                           errp);
+    if (!copy_bitmap) {
+        return NULL;
+    }
+    bdrv_disable_dirty_bitmap(copy_bitmap);
+
+    s = g_new(BlockCopyState, 1);
+    *s = (BlockCopyState) {
+        .source = source,
+        .target = target,
+        .copy_bitmap = copy_bitmap,
+        .cluster_size = cluster_size,
+        .len = bdrv_dirty_bitmap_size(copy_bitmap),
+        .mem = shres_create(BLOCK_COPY_MAX_MEM),
+        .max_transfer = QEMU_ALIGN_DOWN(
+                                    block_copy_max_transfer(source, target),
+                                    cluster_size),
+    };
+
+    block_copy_set_copy_opts(s, use_copy_range, compress);
 
     ratelimit_init(&s->rate_limit);
     qemu_co_mutex_init(&s->lock);