diff mbox series

[v3,05/10] block/dirty-bitmap: switch _next_dirty_area and _next_zero to int64_t

Message ID 20191219100348.24827-6-vsementsov@virtuozzo.com
State New
Headers show
Series [v3,01/10] hbitmap: assert that we don't create bitmap larger than INT64_MAX | expand

Commit Message

Vladimir Sementsov-Ogievskiy Dec. 19, 2019, 10:03 a.m. UTC
We are going to introduce bdrv_dirty_bitmap_next_dirty so that same
variable may be used to store its return value and to be its parameter,
so it would int64_t.

Similarly, we are going to refactor hbitmap_next_dirty_area to use
hbitmap_next_dirty together with hbitmap_next_zero, therefore we want
hbitmap_next_zero parameter type to be int64_t too.

So, for convenience update all parameters of *_next_zero and
*_next_dirty_area to be int64_t.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 include/block/dirty-bitmap.h |  6 +++---
 include/qemu/hbitmap.h       |  7 +++----
 block/dirty-bitmap.c         |  6 +++---
 nbd/server.c                 |  2 +-
 tests/test-hbitmap.c         | 32 ++++++++++++++++----------------
 util/hbitmap.c               | 13 ++++++++-----
 6 files changed, 34 insertions(+), 32 deletions(-)

Comments

Max Reitz Jan. 20, 2020, 11:59 a.m. UTC | #1
On 19.12.19 11:03, Vladimir Sementsov-Ogievskiy wrote:
> We are going to introduce bdrv_dirty_bitmap_next_dirty so that same
> variable may be used to store its return value and to be its parameter,
> so it would int64_t.
> 
> Similarly, we are going to refactor hbitmap_next_dirty_area to use
> hbitmap_next_dirty together with hbitmap_next_zero, therefore we want
> hbitmap_next_zero parameter type to be int64_t too.
> 
> So, for convenience update all parameters of *_next_zero and
> *_next_dirty_area to be int64_t.
> 
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
>  include/block/dirty-bitmap.h |  6 +++---
>  include/qemu/hbitmap.h       |  7 +++----
>  block/dirty-bitmap.c         |  6 +++---
>  nbd/server.c                 |  2 +-
>  tests/test-hbitmap.c         | 32 ++++++++++++++++----------------
>  util/hbitmap.c               | 13 ++++++++-----
>  6 files changed, 34 insertions(+), 32 deletions(-)

[...]

> diff --git a/util/hbitmap.c b/util/hbitmap.c
> index b6d4b99a06..df22f06be6 100644
> --- a/util/hbitmap.c
> +++ b/util/hbitmap.c
> @@ -193,7 +193,7 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
>      }
>  }
>  
> -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
> +int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)
>  {
>      size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
>      unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
> @@ -202,6 +202,8 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
>      uint64_t end_bit, sz;
>      int64_t res;
>  
> +    assert(start >= 0 && count >= 0);
> +
>      if (start >= hb->orig_size || count == 0) {
>          return -1;
>      }
As far as I can see, NBD just passes NBDRequest.from (which is a
uint64_t) to this function (on NBD_CMD_BLOCK_STATUS).  Would this allow
a malicious client to send a value > INT64_MAX, thus provoking an
overflow and killing the server with this new assertion?

On second thought, we have this problem already everywhere in
nbd_handle_request().  I don’t see it or its caller ever checking
whether the received values are in bounds, it just passes them to all
kind of block layer functions that sometimes even just accept plain
ints.  Well, I suppose all other functions just error out, so it
probably isn’t an actual problem in practice so far...

Max
Vladimir Sementsov-Ogievskiy Jan. 20, 2020, 12:28 p.m. UTC | #2
20.01.2020 14:59, Max Reitz wrote:
> On 19.12.19 11:03, Vladimir Sementsov-Ogievskiy wrote:
>> We are going to introduce bdrv_dirty_bitmap_next_dirty so that same
>> variable may be used to store its return value and to be its parameter,
>> so it would int64_t.
>>
>> Similarly, we are going to refactor hbitmap_next_dirty_area to use
>> hbitmap_next_dirty together with hbitmap_next_zero, therefore we want
>> hbitmap_next_zero parameter type to be int64_t too.
>>
>> So, for convenience update all parameters of *_next_zero and
>> *_next_dirty_area to be int64_t.
>>
>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>> ---
>>   include/block/dirty-bitmap.h |  6 +++---
>>   include/qemu/hbitmap.h       |  7 +++----
>>   block/dirty-bitmap.c         |  6 +++---
>>   nbd/server.c                 |  2 +-
>>   tests/test-hbitmap.c         | 32 ++++++++++++++++----------------
>>   util/hbitmap.c               | 13 ++++++++-----
>>   6 files changed, 34 insertions(+), 32 deletions(-)
> 
> [...]
> 
>> diff --git a/util/hbitmap.c b/util/hbitmap.c
>> index b6d4b99a06..df22f06be6 100644
>> --- a/util/hbitmap.c
>> +++ b/util/hbitmap.c
>> @@ -193,7 +193,7 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
>>       }
>>   }
>>   
>> -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
>> +int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)
>>   {
>>       size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
>>       unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
>> @@ -202,6 +202,8 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
>>       uint64_t end_bit, sz;
>>       int64_t res;
>>   
>> +    assert(start >= 0 && count >= 0);
>> +
>>       if (start >= hb->orig_size || count == 0) {
>>           return -1;
>>       }
> As far as I can see, NBD just passes NBDRequest.from (which is a
> uint64_t) to this function (on NBD_CMD_BLOCK_STATUS).  Would this allow
> a malicious client to send a value > INT64_MAX, thus provoking an
> overflow and killing the server with this new assertion?


in nbd_co_receive_request() we have


     if (request->from > client->exp->size ||
         request->len > client->exp->size - request->from) {


So, we check that from is <= exp->size. and exp->size cant be greater than INT64_MAX,
as it derived from bdrv_getlength, which returns int64_t.



Interesting, should we be more strict in server:?

--- a/nbd/server.c
+++ b/nbd/server.c
@@ -2178,7 +2178,7 @@ static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
          error_setg(errp, "Export is read-only");
          return -EROFS;
      }
-    if (request->from > client->exp->size ||
+    if (request->from >= client->exp->size ||
          request->len > client->exp->size - request->from) {
          error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu32
                     ", Size: %" PRIu64, request->from, request->len,

Or is it intentional? Looking through NBD spec I found only

    client MUST NOT use a length ... or which, when added to offset, would exceed the export size.

So, formally pair offset=<export size>, len=0 is valid...

> 
> On second thought, we have this problem already everywhere in
> nbd_handle_request().  I don’t see it or its caller ever checking
> whether the received values are in bounds, it just passes them to all
> kind of block layer functions that sometimes even just accept plain
> ints.  Well, I suppose all other functions just error out, so it
> probably isn’t an actual problem in practice so far...
> 
> Max
>
Max Reitz Jan. 20, 2020, 12:53 p.m. UTC | #3
On 20.01.20 13:28, Vladimir Sementsov-Ogievskiy wrote:
> 20.01.2020 14:59, Max Reitz wrote:
>> On 19.12.19 11:03, Vladimir Sementsov-Ogievskiy wrote:
>>> We are going to introduce bdrv_dirty_bitmap_next_dirty so that same
>>> variable may be used to store its return value and to be its parameter,
>>> so it would int64_t.
>>>
>>> Similarly, we are going to refactor hbitmap_next_dirty_area to use
>>> hbitmap_next_dirty together with hbitmap_next_zero, therefore we want
>>> hbitmap_next_zero parameter type to be int64_t too.
>>>
>>> So, for convenience update all parameters of *_next_zero and
>>> *_next_dirty_area to be int64_t.
>>>
>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>>> ---
>>>   include/block/dirty-bitmap.h |  6 +++---
>>>   include/qemu/hbitmap.h       |  7 +++----
>>>   block/dirty-bitmap.c         |  6 +++---
>>>   nbd/server.c                 |  2 +-
>>>   tests/test-hbitmap.c         | 32 ++++++++++++++++----------------
>>>   util/hbitmap.c               | 13 ++++++++-----
>>>   6 files changed, 34 insertions(+), 32 deletions(-)
>>
>> [...]
>>
>>> diff --git a/util/hbitmap.c b/util/hbitmap.c
>>> index b6d4b99a06..df22f06be6 100644
>>> --- a/util/hbitmap.c
>>> +++ b/util/hbitmap.c
>>> @@ -193,7 +193,7 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
>>>       }
>>>   }
>>>   
>>> -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
>>> +int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)
>>>   {
>>>       size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
>>>       unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
>>> @@ -202,6 +202,8 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
>>>       uint64_t end_bit, sz;
>>>       int64_t res;
>>>   
>>> +    assert(start >= 0 && count >= 0);
>>> +
>>>       if (start >= hb->orig_size || count == 0) {
>>>           return -1;
>>>       }
>> As far as I can see, NBD just passes NBDRequest.from (which is a
>> uint64_t) to this function (on NBD_CMD_BLOCK_STATUS).  Would this allow
>> a malicious client to send a value > INT64_MAX, thus provoking an
>> overflow and killing the server with this new assertion?
> 
> 
> in nbd_co_receive_request() we have
> 
> 
>      if (request->from > client->exp->size ||
>          request->len > client->exp->size - request->from) {
> 
> 
> So, we check that from is <= exp->size. and exp->size cant be greater than INT64_MAX,
> as it derived from bdrv_getlength, which returns int64_t.

Ah, OK, so I just overlooked that.

> Interesting, should we be more strict in server:?
> 
> --- a/nbd/server.c
> +++ b/nbd/server.c
> @@ -2178,7 +2178,7 @@ static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
>           error_setg(errp, "Export is read-only");
>           return -EROFS;
>       }
> -    if (request->from > client->exp->size ||
> +    if (request->from >= client->exp->size ||
>           request->len > client->exp->size - request->from) {
>           error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu32
>                      ", Size: %" PRIu64, request->from, request->len,
> 
> Or is it intentional? Looking through NBD spec I found only
> 
>     client MUST NOT use a length ... or which, when added to offset, would exceed the export size.
> 
> So, formally pair offset=<export size>, len=0 is valid...

Sounds valid, yes.

In any case:

Reviewed-by: Max Reitz <mreitz@redhat.com>
Eric Blake Jan. 20, 2020, 7:56 p.m. UTC | #4
On 1/20/20 6:28 AM, Vladimir Sementsov-Ogievskiy wrote:

>> As far as I can see, NBD just passes NBDRequest.from (which is a
>> uint64_t) to this function (on NBD_CMD_BLOCK_STATUS).  Would this allow
>> a malicious client to send a value > INT64_MAX, thus provoking an
>> overflow and killing the server with this new assertion?
> 
> 
> in nbd_co_receive_request() we have
> 
> 
>       if (request->from > client->exp->size ||
>           request->len > client->exp->size - request->from) {
> 
> 
> So, we check that from is <= exp->size. and exp->size cant be greater than INT64_MAX,
> as it derived from bdrv_getlength, which returns int64_t.
> 
> 
> 
> Interesting, should we be more strict in server:?

I think we're okay based on the existing bounds checks.

> 
> --- a/nbd/server.c
> +++ b/nbd/server.c
> @@ -2178,7 +2178,7 @@ static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
>            error_setg(errp, "Export is read-only");
>            return -EROFS;
>        }
> -    if (request->from > client->exp->size ||
> +    if (request->from >= client->exp->size ||
>            request->len > client->exp->size - request->from) {
>            error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu32
>                       ", Size: %" PRIu64, request->from, request->len,
> 
> Or is it intentional? Looking through NBD spec I found only
> 
>      client MUST NOT use a length ... or which, when added to offset, would exceed the export size.
> 
> So, formally pair offset=<export size>, len=0 is valid...

Except that the spec also says that len=0 is generally unspecified 
behavior (whether it is a no-op, or means special handling, or whatever 
else, is up to the server, but clients shouldn't be sending it - thus a 
server that rejects it instead of handling it as a no-op is no worse for 
the wear).

> 
>>
>> On second thought, we have this problem already everywhere in
>> nbd_handle_request().  I don’t see it or its caller ever checking
>> whether the received values are in bounds, it just passes them to all
>> kind of block layer functions that sometimes even just accept plain
>> ints.  Well, I suppose all other functions just error out, so it
>> probably isn’t an actual problem in practice so far...
>>
>> Max
>>
> 
>
diff mbox series

Patch

diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
index e2b20ecab9..27c72cc56a 100644
--- a/include/block/dirty-bitmap.h
+++ b/include/block/dirty-bitmap.h
@@ -105,10 +105,10 @@  for (bitmap = bdrv_dirty_bitmap_first(bs); bitmap; \
      bitmap = bdrv_dirty_bitmap_next(bitmap))
 
 char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp);
-int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset,
-                                    uint64_t bytes);
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset,
+                                    int64_t bytes);
 bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
-                                       uint64_t *offset, uint64_t *bytes);
+                                       int64_t *offset, int64_t *bytes);
 BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
                                                   Error **errp);
 
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index df922d8517..b6e85f3d5d 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -304,10 +304,10 @@  void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
  * @hb: The HBitmap to operate on
  * @start: The bit to start from.
  * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole
- * bitmap is looked through. You can use UINT64_MAX as @count to search up to
+ * bitmap is looked through. You can use INT64_MAX as @count to search up to
  * the bitmap end.
  */
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count);
+int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count);
 
 /* hbitmap_next_dirty_area:
  * @hb: The HBitmap to operate on
@@ -322,8 +322,7 @@  int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count);
  * @offset and @bytes appropriately. Otherwise returns false and leaves @offset
  * and @bytes unchanged.
  */
-bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
-                             uint64_t *count);
+bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count);
 
 /**
  * hbitmap_iter_next:
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
index 7039e82520..af9f5411a6 100644
--- a/block/dirty-bitmap.c
+++ b/block/dirty-bitmap.c
@@ -860,14 +860,14 @@  char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp)
     return hbitmap_sha256(bitmap->bitmap, errp);
 }
 
-int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset,
-                                    uint64_t bytes)
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset,
+                                    int64_t bytes)
 {
     return hbitmap_next_zero(bitmap->bitmap, offset, bytes);
 }
 
 bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
-                                       uint64_t *offset, uint64_t *bytes)
+                                       int64_t *offset, int64_t *bytes)
 {
     return hbitmap_next_dirty_area(bitmap->bitmap, offset, bytes);
 }
diff --git a/nbd/server.c b/nbd/server.c
index 24ebc1a805..a4b348eb32 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -2055,7 +2055,7 @@  static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset,
         bool next_dirty = !dirty;
 
         if (dirty) {
-            end = bdrv_dirty_bitmap_next_zero(bitmap, begin, UINT64_MAX);
+            end = bdrv_dirty_bitmap_next_zero(bitmap, begin, INT64_MAX);
         } else {
             bdrv_set_dirty_iter(it, begin);
             end = bdrv_dirty_iter_next(it);
diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c
index aeaa0b3f22..0e1e5c64dd 100644
--- a/tests/test-hbitmap.c
+++ b/tests/test-hbitmap.c
@@ -837,7 +837,7 @@  static void test_hbitmap_next_zero_check_range(TestHBitmapData *data,
 
 static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start)
 {
-    test_hbitmap_next_zero_check_range(data, start, UINT64_MAX);
+    test_hbitmap_next_zero_check_range(data, start, INT64_MAX);
 }
 
 static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity)
@@ -905,11 +905,11 @@  static void test_hbitmap_next_zero_after_truncate(TestHBitmapData *data,
 }
 
 static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data,
-                                               uint64_t offset,
-                                               uint64_t count)
+                                               int64_t offset,
+                                               int64_t count)
 {
-    uint64_t off1, off2;
-    uint64_t len1 = 0, len2;
+    int64_t off1, off2;
+    int64_t len1 = 0, len2;
     bool ret1, ret2;
     int64_t end;
 
@@ -945,24 +945,24 @@  static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data,
                                             int granularity)
 {
     hbitmap_test_init(data, L3, granularity);
-    test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX);
     test_hbitmap_next_dirty_area_check(data, 0, 1);
     test_hbitmap_next_dirty_area_check(data, L3 - 1, 1);
 
     hbitmap_set(data->hb, L2, 1);
     test_hbitmap_next_dirty_area_check(data, 0, 1);
     test_hbitmap_next_dirty_area_check(data, 0, L2);
-    test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX);
-    test_hbitmap_next_dirty_area_check(data, L2 - 1, UINT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, L2 - 1, INT64_MAX);
     test_hbitmap_next_dirty_area_check(data, L2 - 1, 1);
     test_hbitmap_next_dirty_area_check(data, L2 - 1, 2);
     test_hbitmap_next_dirty_area_check(data, L2 - 1, 3);
-    test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX);
     test_hbitmap_next_dirty_area_check(data, L2, 1);
     test_hbitmap_next_dirty_area_check(data, L2 + 1, 1);
 
     hbitmap_set(data->hb, L2 + 5, L1);
-    test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX);
     test_hbitmap_next_dirty_area_check(data, L2 - 2, 8);
     test_hbitmap_next_dirty_area_check(data, L2 + 1, 5);
     test_hbitmap_next_dirty_area_check(data, L2 + 1, 3);
@@ -974,16 +974,16 @@  static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data,
     test_hbitmap_next_dirty_area_check(data, L2 + 1, 0);
 
     hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2);
-    test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX);
-    test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX);
-    test_hbitmap_next_dirty_area_check(data, L2 + 1, UINT64_MAX);
-    test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, UINT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, L2 + 1, INT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, INT64_MAX);
     test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1, 5);
     test_hbitmap_next_dirty_area_check(data, L2 * 2 - L1, L1 + 1);
     test_hbitmap_next_dirty_area_check(data, L2 * 2, L2);
 
     hbitmap_set(data->hb, 0, L3);
-    test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX);
 }
 
 static void test_hbitmap_next_dirty_area_0(TestHBitmapData *data,
@@ -1010,7 +1010,7 @@  static void test_hbitmap_next_dirty_area_after_truncate(TestHBitmapData *data,
     hbitmap_test_init(data, L1, 0);
     hbitmap_test_truncate_impl(data, L1 * 2);
     hbitmap_set(data->hb, L1 + 1, 1);
-    test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX);
+    test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX);
 }
 
 int main(int argc, char **argv)
diff --git a/util/hbitmap.c b/util/hbitmap.c
index b6d4b99a06..df22f06be6 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -193,7 +193,7 @@  void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
     }
 }
 
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
+int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)
 {
     size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
     unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
@@ -202,6 +202,8 @@  int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
     uint64_t end_bit, sz;
     int64_t res;
 
+    assert(start >= 0 && count >= 0);
+
     if (start >= hb->orig_size || count == 0) {
         return -1;
     }
@@ -244,14 +246,15 @@  int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
     return res;
 }
 
-bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
-                             uint64_t *count)
+bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count)
 {
     HBitmapIter hbi;
     int64_t firt_dirty_off, area_end;
     uint32_t granularity = 1UL << hb->granularity;
     uint64_t end;
 
+    assert(*start >= 0 && *count >= 0);
+
     if (*start >= hb->orig_size || *count == 0) {
         return false;
     }
@@ -834,8 +837,8 @@  bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
  */
 static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)
 {
-    uint64_t offset = 0;
-    uint64_t count = src->orig_size;
+    int64_t offset = 0;
+    int64_t count = src->orig_size;
 
     while (hbitmap_next_dirty_area(src, &offset, &count)) {
         hbitmap_set(dst, offset, count);