diff mbox series

[v9,02/13] block/backup: split shareable copying part from backup_do_cow

Message ID 20190826161312.489398-3-vsementsov@virtuozzo.com
State New
Headers show
Series backup-top filter driver for backup | expand

Commit Message

Vladimir Sementsov-Ogievskiy Aug. 26, 2019, 4:13 p.m. UTC
Split copying logic which will be shared with backup-top filter.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 block/backup.c | 47 ++++++++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 15 deletions(-)

Comments

Max Reitz Aug. 28, 2019, 2:22 p.m. UTC | #1
On 26.08.19 18:13, Vladimir Sementsov-Ogievskiy wrote:
> Split copying logic which will be shared with backup-top filter.
> 
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
>  block/backup.c | 47 ++++++++++++++++++++++++++++++++---------------
>  1 file changed, 32 insertions(+), 15 deletions(-)
> 
> diff --git a/block/backup.c b/block/backup.c
> index 33b144305f..13a1d80157 100644
> --- a/block/backup.c
> +++ b/block/backup.c
> @@ -248,26 +248,18 @@ static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
>      return ret;
>  }
>  
> -static int coroutine_fn backup_do_cow(BackupBlockJob *job,
> -                                      int64_t offset, uint64_t bytes,
> -                                      bool *error_is_read,
> -                                      bool is_write_notifier)
> +static int coroutine_fn backup_do_copy(BackupBlockJob *job,
> +                                       int64_t offset, uint64_t bytes,
> +                                       bool *error_is_read,
> +                                       bool is_write_notifier)
>  {
> -    CowRequest cow_request;
>      int ret = 0;
> -    int64_t start, end; /* bytes */
> +    int64_t start = offset, end = bytes + offset; /* bytes */

Maybe just rename the “offset” parameter to “start”, replace the “bytes”
parameter by an “end” parameter, and drop this line?

Max

>      void *bounce_buffer = NULL;
>      int64_t status_bytes;
>  
> -    qemu_co_rwlock_rdlock(&job->flush_rwlock);
> -
> -    start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
> -    end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
> -
> -    trace_backup_do_cow_enter(job, start, offset, bytes);
> -
> -    wait_for_overlapping_requests(job, start, end);
> -    cow_request_begin(&cow_request, job, start, end);
> +    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
> +    assert(QEMU_IS_ALIGNED(end, job->cluster_size));
>  
>      while (start < end) {
>          int64_t dirty_end;
Vladimir Sementsov-Ogievskiy Aug. 28, 2019, 2:27 p.m. UTC | #2
28.08.2019 17:22, Max Reitz wrote:
> On 26.08.19 18:13, Vladimir Sementsov-Ogievskiy wrote:
>> Split copying logic which will be shared with backup-top filter.
>>
>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>> ---
>>   block/backup.c | 47 ++++++++++++++++++++++++++++++++---------------
>>   1 file changed, 32 insertions(+), 15 deletions(-)
>>
>> diff --git a/block/backup.c b/block/backup.c
>> index 33b144305f..13a1d80157 100644
>> --- a/block/backup.c
>> +++ b/block/backup.c
>> @@ -248,26 +248,18 @@ static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
>>       return ret;
>>   }
>>   
>> -static int coroutine_fn backup_do_cow(BackupBlockJob *job,
>> -                                      int64_t offset, uint64_t bytes,
>> -                                      bool *error_is_read,
>> -                                      bool is_write_notifier)
>> +static int coroutine_fn backup_do_copy(BackupBlockJob *job,
>> +                                       int64_t offset, uint64_t bytes,
>> +                                       bool *error_is_read,
>> +                                       bool is_write_notifier)
>>   {
>> -    CowRequest cow_request;
>>       int ret = 0;
>> -    int64_t start, end; /* bytes */
>> +    int64_t start = offset, end = bytes + offset; /* bytes */
> 
> Maybe just rename the “offset” parameter to “start”, replace the “bytes”
> parameter by an “end” parameter, and drop this line?
> 

I really want final block_copy have more common in block-layer offset+bytes
interface. So better to refactor a bit the function itself, but I'd prefer
to do it as a follow-up and keep this patch simpler..

> 
>>       void *bounce_buffer = NULL;
>>       int64_t status_bytes;
>>   
>> -    qemu_co_rwlock_rdlock(&job->flush_rwlock);
>> -
>> -    start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
>> -    end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
>> -
>> -    trace_backup_do_cow_enter(job, start, offset, bytes);
>> -
>> -    wait_for_overlapping_requests(job, start, end);
>> -    cow_request_begin(&cow_request, job, start, end);
>> +    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
>> +    assert(QEMU_IS_ALIGNED(end, job->cluster_size));
>>   
>>       while (start < end) {
>>           int64_t dirty_end;
>
Max Reitz Aug. 28, 2019, 2:32 p.m. UTC | #3
On 28.08.19 16:27, Vladimir Sementsov-Ogievskiy wrote:
> 28.08.2019 17:22, Max Reitz wrote:
>> On 26.08.19 18:13, Vladimir Sementsov-Ogievskiy wrote:
>>> Split copying logic which will be shared with backup-top filter.
>>>
>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>>> ---
>>>   block/backup.c | 47 ++++++++++++++++++++++++++++++++---------------
>>>   1 file changed, 32 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/block/backup.c b/block/backup.c
>>> index 33b144305f..13a1d80157 100644
>>> --- a/block/backup.c
>>> +++ b/block/backup.c
>>> @@ -248,26 +248,18 @@ static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
>>>       return ret;
>>>   }
>>>   
>>> -static int coroutine_fn backup_do_cow(BackupBlockJob *job,
>>> -                                      int64_t offset, uint64_t bytes,
>>> -                                      bool *error_is_read,
>>> -                                      bool is_write_notifier)
>>> +static int coroutine_fn backup_do_copy(BackupBlockJob *job,
>>> +                                       int64_t offset, uint64_t bytes,
>>> +                                       bool *error_is_read,
>>> +                                       bool is_write_notifier)
>>>   {
>>> -    CowRequest cow_request;
>>>       int ret = 0;
>>> -    int64_t start, end; /* bytes */
>>> +    int64_t start = offset, end = bytes + offset; /* bytes */
>>
>> Maybe just rename the “offset” parameter to “start”, replace the “bytes”
>> parameter by an “end” parameter, and drop this line?
>>
> 
> I really want final block_copy have more common in block-layer offset+bytes
> interface. So better to refactor a bit the function itself, but I'd prefer
> to do it as a follow-up and keep this patch simpler..

OK, but s/offset/start/ should still be possible.

Max
diff mbox series

Patch

diff --git a/block/backup.c b/block/backup.c
index 33b144305f..13a1d80157 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -248,26 +248,18 @@  static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
     return ret;
 }
 
-static int coroutine_fn backup_do_cow(BackupBlockJob *job,
-                                      int64_t offset, uint64_t bytes,
-                                      bool *error_is_read,
-                                      bool is_write_notifier)
+static int coroutine_fn backup_do_copy(BackupBlockJob *job,
+                                       int64_t offset, uint64_t bytes,
+                                       bool *error_is_read,
+                                       bool is_write_notifier)
 {
-    CowRequest cow_request;
     int ret = 0;
-    int64_t start, end; /* bytes */
+    int64_t start = offset, end = bytes + offset; /* bytes */
     void *bounce_buffer = NULL;
     int64_t status_bytes;
 
-    qemu_co_rwlock_rdlock(&job->flush_rwlock);
-
-    start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
-    end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
-
-    trace_backup_do_cow_enter(job, start, offset, bytes);
-
-    wait_for_overlapping_requests(job, start, end);
-    cow_request_begin(&cow_request, job, start, end);
+    assert(QEMU_IS_ALIGNED(start, job->cluster_size));
+    assert(QEMU_IS_ALIGNED(end, job->cluster_size));
 
     while (start < end) {
         int64_t dirty_end;
@@ -326,6 +318,31 @@  static int coroutine_fn backup_do_cow(BackupBlockJob *job,
         qemu_vfree(bounce_buffer);
     }
 
+    return ret;
+}
+
+static int coroutine_fn backup_do_cow(BackupBlockJob *job,
+                                      int64_t offset, uint64_t bytes,
+                                      bool *error_is_read,
+                                      bool is_write_notifier)
+{
+    CowRequest cow_request;
+    int ret = 0;
+    int64_t start, end; /* bytes */
+
+    qemu_co_rwlock_rdlock(&job->flush_rwlock);
+
+    start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
+    end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
+
+    trace_backup_do_cow_enter(job, start, offset, bytes);
+
+    wait_for_overlapping_requests(job, start, end);
+    cow_request_begin(&cow_request, job, start, end);
+
+    ret = backup_do_copy(job, start, end - start, error_is_read,
+                         is_write_notifier);
+
     cow_request_end(&cow_request);
 
     trace_backup_do_cow_return(job, offset, bytes, ret);