diff mbox

[06/13] HBitmap: Introduce "meta" bitmap to track bit changes

Message ID 1451903234-32529-7-git-send-email-famz@redhat.com
State New
Headers show

Commit Message

Fam Zheng Jan. 4, 2016, 10:27 a.m. UTC
Upon each bit toggle, the corresponding bit in the meta bitmap will be
set.

Signed-off-by: Fam Zheng <famz@redhat.com>
---
 include/qemu/hbitmap.h |  8 +++++++
 util/hbitmap.c         | 61 +++++++++++++++++++++++++++++++++++++-------------
 2 files changed, 54 insertions(+), 15 deletions(-)

Comments

John Snow Jan. 6, 2016, 12:09 a.m. UTC | #1
On 01/04/2016 05:27 AM, Fam Zheng wrote:
> Upon each bit toggle, the corresponding bit in the meta bitmap will be
> set.
> 
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  include/qemu/hbitmap.h |  8 +++++++
>  util/hbitmap.c         | 61 +++++++++++++++++++++++++++++++++++++-------------
>  2 files changed, 54 insertions(+), 15 deletions(-)
> 
> diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
> index bb94a00..ed672e7 100644
> --- a/include/qemu/hbitmap.h
> +++ b/include/qemu/hbitmap.h
> @@ -181,6 +181,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
>   */
>  unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
>  
> +/* hbitmap_create_meta
> + * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
> + *
> + * @hb: The HBitmap to operate on.
> + * @chunk_size: How many bits in @hb does one bit in the meta track.
> + */
> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
> +
>  /**
>   * hbitmap_iter_next:
>   * @hbi: HBitmapIter to operate on.
> diff --git a/util/hbitmap.c b/util/hbitmap.c
> index 50b888f..55d3182 100644
> --- a/util/hbitmap.c
> +++ b/util/hbitmap.c
> @@ -81,6 +81,9 @@ struct HBitmap {
>       */
>      int granularity;
>  
> +    /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
> +    HBitmap *meta;
> +
>      /* A number of progressively less coarse bitmaps (i.e. level 0 is the
>       * coarsest).  Each bit in level N represents a word in level N+1 that
>       * has a set bit, except the last level where each bit represents the
> @@ -212,25 +215,27 @@ static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
>  }
>  
>  /* Setting starts at the last layer and propagates up if an element
> - * changes from zero to non-zero.
> + * changes.
>   */

Isn't this comment wrong anyway? hb_set_elem does not propagate upward
by itself.

>  static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
>  {
>      unsigned long mask;
> -    bool changed;
> +    unsigned long old;
>  
>      assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
>      assert(start <= last);
>  
>      mask = 2UL << (last & (BITS_PER_LONG - 1));
>      mask -= 1UL << (start & (BITS_PER_LONG - 1));
> -    changed = (*elem == 0);
> +    old = *elem;
>      *elem |= mask;
> -    return changed;
> +    return old != *elem;
>  }
>  
> -/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
> -static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t last)
> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
> + * Returns true if at least one bit is changed. */
> +static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
> +                           uint64_t last)
>  {
>      size_t pos = start >> BITS_PER_LEVEL;
>      size_t lastpos = last >> BITS_PER_LEVEL;
> @@ -259,22 +264,27 @@ static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t last
>      if (level > 0 && changed) {
>          hb_set_between(hb, level - 1, pos, lastpos);
>      }
> +    return changed;
>  }
>  
>  void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
>  {
>      /* Compute range in the last layer.  */
> +    uint64_t first, n;
>      uint64_t last = start + count - 1;
>  
>      trace_hbitmap_set(hb, start, count,
>                        start >> hb->granularity, last >> hb->granularity);
>  
> -    start >>= hb->granularity;
> +    first = start >> hb->granularity;
>      last >>= hb->granularity;
> -    count = last - start + 1;
> +    n = last - first + 1;
>  
> -    hb->count += count - hb_count_between(hb, start, last);
> -    hb_set_between(hb, HBITMAP_LEVELS - 1, start, last);
> +    hb->count += n - hb_count_between(hb, first, last);
> +    if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
> +        hb->meta) {
> +        hbitmap_set(hb->meta, start, count);
> +    }
>  }
>  
>  /* Resetting works the other way round: propagate up if the new
> @@ -295,8 +305,10 @@ static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t l
>      return blanked;
>  }
>  
> -/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
> -static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t last)
> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
> + * Returns true if at least one bit is changed. */
> +static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
> +                             uint64_t last)
>  {
>      size_t pos = start >> BITS_PER_LEVEL;
>      size_t lastpos = last >> BITS_PER_LEVEL;
> @@ -339,21 +351,28 @@ static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t la
>      if (level > 0 && changed) {
>          hb_reset_between(hb, level - 1, pos, lastpos);
>      }
> +
> +    return changed;
> +
>  }
>  
>  void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
>  {
>      /* Compute range in the last layer.  */
> +    uint64_t first;
>      uint64_t last = start + count - 1;
>  
>      trace_hbitmap_reset(hb, start, count,
>                          start >> hb->granularity, last >> hb->granularity);
>  
> -    start >>= hb->granularity;
> +    first = start >> hb->granularity;
>      last >>= hb->granularity;
>  
> -    hb->count -= hb_count_between(hb, start, last);
> -    hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last);
> +    hb->count -= hb_count_between(hb, first, last);
> +    if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
> +        hb->meta) {
> +        hbitmap_set(hb->meta, start, count);
> +    }
>  }
>  
>  void hbitmap_reset_all(HBitmap *hb)
> @@ -384,6 +403,9 @@ void hbitmap_free(HBitmap *hb)
>      for (i = HBITMAP_LEVELS; i-- > 0; ) {
>          g_free(hb->levels[i]);
>      }
> +    if (hb->meta) {
> +        hbitmap_free(hb->meta);
> +    }
>      g_free(hb);
>  }
>  
> @@ -493,3 +515,12 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b)
>  
>      return true;
>  }
> +
> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
> +{
> +    assert(!(chunk_size & (chunk_size - 1)));
> +    assert(!hb->meta);
> +    hb->meta = hbitmap_alloc(hb->size << hb->granularity,
> +                             hb->granularity + ctz32(chunk_size));
> +    return hb->meta;
> +}
> 

I am a little skeptical of returning handles to internal state, but it's
the easiest way to re-use all of the existing HBitmap infrastructure to
iterate over the meta bitmap, so I guess this is fine.

Should we also add an hbitmap_destroy_meta for when we're done with it?


Regardless;

Reviewed-by: John Snow <jsnow@redhat.com>
Vladimir Sementsov-Ogievskiy Jan. 11, 2016, 3:40 p.m. UTC | #2
On 04.01.2016 13:27, Fam Zheng wrote:
> Upon each bit toggle, the corresponding bit in the meta bitmap will be
> set.
>
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>   include/qemu/hbitmap.h |  8 +++++++
>   util/hbitmap.c         | 61 +++++++++++++++++++++++++++++++++++++-------------
>   2 files changed, 54 insertions(+), 15 deletions(-)
>
> diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
> index bb94a00..ed672e7 100644
> --- a/include/qemu/hbitmap.h
> +++ b/include/qemu/hbitmap.h
> @@ -181,6 +181,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
>    */
>   unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
>   
> +/* hbitmap_create_meta
> + * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
> + *
> + * @hb: The HBitmap to operate on.
> + * @chunk_size: How many bits in @hb does one bit in the meta track.
> + */
> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
> +
>   /**
>    * hbitmap_iter_next:
>    * @hbi: HBitmapIter to operate on.
> diff --git a/util/hbitmap.c b/util/hbitmap.c
> index 50b888f..55d3182 100644
> --- a/util/hbitmap.c
> +++ b/util/hbitmap.c
> @@ -81,6 +81,9 @@ struct HBitmap {
>        */
>       int granularity;
>   
> +    /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
> +    HBitmap *meta;
> +
>       /* A number of progressively less coarse bitmaps (i.e. level 0 is the
>        * coarsest).  Each bit in level N represents a word in level N+1 that
>        * has a set bit, except the last level where each bit represents the
> @@ -212,25 +215,27 @@ static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
>   }
>   
>   /* Setting starts at the last layer and propagates up if an element
> - * changes from zero to non-zero.
> + * changes.
>    */
>   static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
>   {
>       unsigned long mask;
> -    bool changed;
> +    unsigned long old;
>   
>       assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
>       assert(start <= last);
>   
>       mask = 2UL << (last & (BITS_PER_LONG - 1));
>       mask -= 1UL << (start & (BITS_PER_LONG - 1));
> -    changed = (*elem == 0);
> +    old = *elem;
>       *elem |= mask;
> -    return changed;
> +    return old != *elem;
>   }
>   
> -/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
> -static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t last)
> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
> + * Returns true if at least one bit is changed. */
> +static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
> +                           uint64_t last)
>   {
>       size_t pos = start >> BITS_PER_LEVEL;
>       size_t lastpos = last >> BITS_PER_LEVEL;
> @@ -259,22 +264,27 @@ static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t last
>       if (level > 0 && changed) {
>           hb_set_between(hb, level - 1, pos, lastpos);
>       }
> +    return changed;
>   }
>   
>   void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
>   {
>       /* Compute range in the last layer.  */
> +    uint64_t first, n;
>       uint64_t last = start + count - 1;
>   
>       trace_hbitmap_set(hb, start, count,
>                         start >> hb->granularity, last >> hb->granularity);
>   
> -    start >>= hb->granularity;
> +    first = start >> hb->granularity;
>       last >>= hb->granularity;
> -    count = last - start + 1;
> +    n = last - first + 1;
>   
> -    hb->count += count - hb_count_between(hb, start, last);
> -    hb_set_between(hb, HBITMAP_LEVELS - 1, start, last);
> +    hb->count += n - hb_count_between(hb, first, last);
> +    if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
> +        hb->meta) {

I don't know, what optimizer things about it, but definetly

+    if (hb->meta &&
+        hb_set_between(hb, HBITMAP_LEVELS - 1, first, last))

should work faster for most cases, when hb->meta == NULL.


> +        hbitmap_set(hb->meta, start, count);
> +    }
>   }
>   
>   /* Resetting works the other way round: propagate up if the new
> @@ -295,8 +305,10 @@ static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t l
>       return blanked;
>   }
>   
> -/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
> -static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t last)
> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
> + * Returns true if at least one bit is changed. */
> +static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
> +                             uint64_t last)
>   {
>       size_t pos = start >> BITS_PER_LEVEL;
>       size_t lastpos = last >> BITS_PER_LEVEL;
> @@ -339,21 +351,28 @@ static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t la
>       if (level > 0 && changed) {
>           hb_reset_between(hb, level - 1, pos, lastpos);
>       }
> +
> +    return changed;
> +
>   }
>   
>   void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
>   {
>       /* Compute range in the last layer.  */
> +    uint64_t first;
>       uint64_t last = start + count - 1;
>   
>       trace_hbitmap_reset(hb, start, count,
>                           start >> hb->granularity, last >> hb->granularity);
>   
> -    start >>= hb->granularity;
> +    first = start >> hb->granularity;
>       last >>= hb->granularity;
>   
> -    hb->count -= hb_count_between(hb, start, last);
> -    hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last);
> +    hb->count -= hb_count_between(hb, first, last);
> +    if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
> +        hb->meta) {

and here

> +        hbitmap_set(hb->meta, start, count);
> +    }
>   }
>   
>   void hbitmap_reset_all(HBitmap *hb)
> @@ -384,6 +403,9 @@ void hbitmap_free(HBitmap *hb)
>       for (i = HBITMAP_LEVELS; i-- > 0; ) {
>           g_free(hb->levels[i]);
>       }
> +    if (hb->meta) {
> +        hbitmap_free(hb->meta);
> +    }

hmm, not obvious for me.. why not "the one who creates must than destroy"?

>       g_free(hb);
>   }
>   
> @@ -493,3 +515,12 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b)
>   
>       return true;
>   }
> +
> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
> +{
> +    assert(!(chunk_size & (chunk_size - 1)));
> +    assert(!hb->meta);
> +    hb->meta = hbitmap_alloc(hb->size << hb->granularity,
> +                             hb->granularity + ctz32(chunk_size));
> +    return hb->meta;
> +}
John Snow Jan. 11, 2016, 6:56 p.m. UTC | #3
On 01/11/2016 10:40 AM, Vladimir Sementsov-Ogievskiy wrote:
> On 04.01.2016 13:27, Fam Zheng wrote:
>> Upon each bit toggle, the corresponding bit in the meta bitmap will be
>> set.
>>
>> Signed-off-by: Fam Zheng <famz@redhat.com>
>> ---
>>   include/qemu/hbitmap.h |  8 +++++++
>>   util/hbitmap.c         | 61
>> +++++++++++++++++++++++++++++++++++++-------------
>>   2 files changed, 54 insertions(+), 15 deletions(-)
>>
>> diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
>> index bb94a00..ed672e7 100644
>> --- a/include/qemu/hbitmap.h
>> +++ b/include/qemu/hbitmap.h
>> @@ -181,6 +181,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const
>> HBitmap *hb, uint64_t first);
>>    */
>>   unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
>>   +/* hbitmap_create_meta
>> + * Create a "meta" hbitmap to track dirtiness of the bits in this
>> HBitmap.
>> + *
>> + * @hb: The HBitmap to operate on.
>> + * @chunk_size: How many bits in @hb does one bit in the meta track.
>> + */
>> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
>> +
>>   /**
>>    * hbitmap_iter_next:
>>    * @hbi: HBitmapIter to operate on.
>> diff --git a/util/hbitmap.c b/util/hbitmap.c
>> index 50b888f..55d3182 100644
>> --- a/util/hbitmap.c
>> +++ b/util/hbitmap.c
>> @@ -81,6 +81,9 @@ struct HBitmap {
>>        */
>>       int granularity;
>>   +    /* A meta dirty bitmap to track the dirtiness of bits in this
>> HBitmap. */
>> +    HBitmap *meta;
>> +
>>       /* A number of progressively less coarse bitmaps (i.e. level 0
>> is the
>>        * coarsest).  Each bit in level N represents a word in level
>> N+1 that
>>        * has a set bit, except the last level where each bit
>> represents the
>> @@ -212,25 +215,27 @@ static uint64_t hb_count_between(HBitmap *hb,
>> uint64_t start, uint64_t last)
>>   }
>>     /* Setting starts at the last layer and propagates up if an element
>> - * changes from zero to non-zero.
>> + * changes.
>>    */
>>   static inline bool hb_set_elem(unsigned long *elem, uint64_t start,
>> uint64_t last)
>>   {
>>       unsigned long mask;
>> -    bool changed;
>> +    unsigned long old;
>>         assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
>>       assert(start <= last);
>>         mask = 2UL << (last & (BITS_PER_LONG - 1));
>>       mask -= 1UL << (start & (BITS_PER_LONG - 1));
>> -    changed = (*elem == 0);
>> +    old = *elem;
>>       *elem |= mask;
>> -    return changed;
>> +    return old != *elem;
>>   }
>>   -/* The recursive workhorse (the depth is limited to
>> HBITMAP_LEVELS)... */
>> -static void hb_set_between(HBitmap *hb, int level, uint64_t start,
>> uint64_t last)
>> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
>> + * Returns true if at least one bit is changed. */
>> +static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
>> +                           uint64_t last)
>>   {
>>       size_t pos = start >> BITS_PER_LEVEL;
>>       size_t lastpos = last >> BITS_PER_LEVEL;
>> @@ -259,22 +264,27 @@ static void hb_set_between(HBitmap *hb, int
>> level, uint64_t start, uint64_t last
>>       if (level > 0 && changed) {
>>           hb_set_between(hb, level - 1, pos, lastpos);
>>       }
>> +    return changed;
>>   }
>>     void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
>>   {
>>       /* Compute range in the last layer.  */
>> +    uint64_t first, n;
>>       uint64_t last = start + count - 1;
>>         trace_hbitmap_set(hb, start, count,
>>                         start >> hb->granularity, last >>
>> hb->granularity);
>>   -    start >>= hb->granularity;
>> +    first = start >> hb->granularity;
>>       last >>= hb->granularity;
>> -    count = last - start + 1;
>> +    n = last - first + 1;
>>   -    hb->count += count - hb_count_between(hb, start, last);
>> -    hb_set_between(hb, HBITMAP_LEVELS - 1, start, last);
>> +    hb->count += n - hb_count_between(hb, first, last);
>> +    if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
>> +        hb->meta) {
> 
> I don't know, what optimizer things about it, but definetly
> 
> +    if (hb->meta &&
> +        hb_set_between(hb, HBITMAP_LEVELS - 1, first, last))
> 
> should work faster for most cases, when hb->meta == NULL.
> 
> 

The hb_set_between is first to ensure it always happens.

>> +        hbitmap_set(hb->meta, start, count);
>> +    }
>>   }
>>     /* Resetting works the other way round: propagate up if the new
>> @@ -295,8 +305,10 @@ static inline bool hb_reset_elem(unsigned long
>> *elem, uint64_t start, uint64_t l
>>       return blanked;
>>   }
>>   -/* The recursive workhorse (the depth is limited to
>> HBITMAP_LEVELS)... */
>> -static void hb_reset_between(HBitmap *hb, int level, uint64_t start,
>> uint64_t last)
>> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
>> + * Returns true if at least one bit is changed. */
>> +static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
>> +                             uint64_t last)
>>   {
>>       size_t pos = start >> BITS_PER_LEVEL;
>>       size_t lastpos = last >> BITS_PER_LEVEL;
>> @@ -339,21 +351,28 @@ static void hb_reset_between(HBitmap *hb, int
>> level, uint64_t start, uint64_t la
>>       if (level > 0 && changed) {
>>           hb_reset_between(hb, level - 1, pos, lastpos);
>>       }
>> +
>> +    return changed;
>> +
>>   }
>>     void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
>>   {
>>       /* Compute range in the last layer.  */
>> +    uint64_t first;
>>       uint64_t last = start + count - 1;
>>         trace_hbitmap_reset(hb, start, count,
>>                           start >> hb->granularity, last >>
>> hb->granularity);
>>   -    start >>= hb->granularity;
>> +    first = start >> hb->granularity;
>>       last >>= hb->granularity;
>>   -    hb->count -= hb_count_between(hb, start, last);
>> -    hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last);
>> +    hb->count -= hb_count_between(hb, first, last);
>> +    if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
>> +        hb->meta) {
> 
> and here
> 
>> +        hbitmap_set(hb->meta, start, count);
>> +    }
>>   }
>>     void hbitmap_reset_all(HBitmap *hb)
>> @@ -384,6 +403,9 @@ void hbitmap_free(HBitmap *hb)
>>       for (i = HBITMAP_LEVELS; i-- > 0; ) {
>>           g_free(hb->levels[i]);
>>       }
>> +    if (hb->meta) {
>> +        hbitmap_free(hb->meta);
>> +    }
> 
> hmm, not obvious for me.. why not "the one who creates must than destroy"?
> 
>>       g_free(hb);
>>   }
>>   @@ -493,3 +515,12 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b)
>>         return true;
>>   }
>> +
>> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
>> +{
>> +    assert(!(chunk_size & (chunk_size - 1)));
>> +    assert(!hb->meta);
>> +    hb->meta = hbitmap_alloc(hb->size << hb->granularity,
>> +                             hb->granularity + ctz32(chunk_size));
>> +    return hb->meta;
>> +}
> 
>
Vladimir Sementsov-Ogievskiy Jan. 12, 2016, 8:25 a.m. UTC | #4
On 11.01.2016 21:56, John Snow wrote:
>
> On 01/11/2016 10:40 AM, Vladimir Sementsov-Ogievskiy wrote:
>> On 04.01.2016 13:27, Fam Zheng wrote:
>>> Upon each bit toggle, the corresponding bit in the meta bitmap will be
>>> set.
>>>
>>> Signed-off-by: Fam Zheng <famz@redhat.com>
>>> ---
>>>    include/qemu/hbitmap.h |  8 +++++++
>>>    util/hbitmap.c         | 61
>>> +++++++++++++++++++++++++++++++++++++-------------
>>>    2 files changed, 54 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
>>> index bb94a00..ed672e7 100644
>>> --- a/include/qemu/hbitmap.h
>>> +++ b/include/qemu/hbitmap.h
>>> @@ -181,6 +181,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const
>>> HBitmap *hb, uint64_t first);
>>>     */
>>>    unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
>>>    +/* hbitmap_create_meta
>>> + * Create a "meta" hbitmap to track dirtiness of the bits in this
>>> HBitmap.
>>> + *
>>> + * @hb: The HBitmap to operate on.
>>> + * @chunk_size: How many bits in @hb does one bit in the meta track.
>>> + */
>>> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
>>> +
>>>    /**
>>>     * hbitmap_iter_next:
>>>     * @hbi: HBitmapIter to operate on.
>>> diff --git a/util/hbitmap.c b/util/hbitmap.c
>>> index 50b888f..55d3182 100644
>>> --- a/util/hbitmap.c
>>> +++ b/util/hbitmap.c
>>> @@ -81,6 +81,9 @@ struct HBitmap {
>>>         */
>>>        int granularity;
>>>    +    /* A meta dirty bitmap to track the dirtiness of bits in this
>>> HBitmap. */
>>> +    HBitmap *meta;
>>> +
>>>        /* A number of progressively less coarse bitmaps (i.e. level 0
>>> is the
>>>         * coarsest).  Each bit in level N represents a word in level
>>> N+1 that
>>>         * has a set bit, except the last level where each bit
>>> represents the
>>> @@ -212,25 +215,27 @@ static uint64_t hb_count_between(HBitmap *hb,
>>> uint64_t start, uint64_t last)
>>>    }
>>>      /* Setting starts at the last layer and propagates up if an element
>>> - * changes from zero to non-zero.
>>> + * changes.
>>>     */
>>>    static inline bool hb_set_elem(unsigned long *elem, uint64_t start,
>>> uint64_t last)
>>>    {
>>>        unsigned long mask;
>>> -    bool changed;
>>> +    unsigned long old;
>>>          assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
>>>        assert(start <= last);
>>>          mask = 2UL << (last & (BITS_PER_LONG - 1));
>>>        mask -= 1UL << (start & (BITS_PER_LONG - 1));
>>> -    changed = (*elem == 0);
>>> +    old = *elem;
>>>        *elem |= mask;
>>> -    return changed;
>>> +    return old != *elem;
>>>    }
>>>    -/* The recursive workhorse (the depth is limited to
>>> HBITMAP_LEVELS)... */
>>> -static void hb_set_between(HBitmap *hb, int level, uint64_t start,
>>> uint64_t last)
>>> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
>>> + * Returns true if at least one bit is changed. */
>>> +static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
>>> +                           uint64_t last)
>>>    {
>>>        size_t pos = start >> BITS_PER_LEVEL;
>>>        size_t lastpos = last >> BITS_PER_LEVEL;
>>> @@ -259,22 +264,27 @@ static void hb_set_between(HBitmap *hb, int
>>> level, uint64_t start, uint64_t last
>>>        if (level > 0 && changed) {
>>>            hb_set_between(hb, level - 1, pos, lastpos);
>>>        }
>>> +    return changed;
>>>    }
>>>      void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
>>>    {
>>>        /* Compute range in the last layer.  */
>>> +    uint64_t first, n;
>>>        uint64_t last = start + count - 1;
>>>          trace_hbitmap_set(hb, start, count,
>>>                          start >> hb->granularity, last >>
>>> hb->granularity);
>>>    -    start >>= hb->granularity;
>>> +    first = start >> hb->granularity;
>>>        last >>= hb->granularity;
>>> -    count = last - start + 1;
>>> +    n = last - first + 1;
>>>    -    hb->count += count - hb_count_between(hb, start, last);
>>> -    hb_set_between(hb, HBITMAP_LEVELS - 1, start, last);
>>> +    hb->count += n - hb_count_between(hb, first, last);
>>> +    if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
>>> +        hb->meta) {
>> I don't know, what optimizer things about it, but definetly
>>
>> +    if (hb->meta &&
>> +        hb_set_between(hb, HBITMAP_LEVELS - 1, first, last))
>>
>> should work faster for most cases, when hb->meta == NULL.
>>
>>
> The hb_set_between is first to ensure it always happens.

oh, right. imho, it would be better then to add bool changed = 
hb_set_between(), and then if (changed && hb->meta), but it's up to you

>
>>> +        hbitmap_set(hb->meta, start, count);
>>> +    }
>>>    }
>>>      /* Resetting works the other way round: propagate up if the new
>>> @@ -295,8 +305,10 @@ static inline bool hb_reset_elem(unsigned long
>>> *elem, uint64_t start, uint64_t l
>>>        return blanked;
>>>    }
>>>    -/* The recursive workhorse (the depth is limited to
>>> HBITMAP_LEVELS)... */
>>> -static void hb_reset_between(HBitmap *hb, int level, uint64_t start,
>>> uint64_t last)
>>> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
>>> + * Returns true if at least one bit is changed. */
>>> +static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
>>> +                             uint64_t last)
>>>    {
>>>        size_t pos = start >> BITS_PER_LEVEL;
>>>        size_t lastpos = last >> BITS_PER_LEVEL;
>>> @@ -339,21 +351,28 @@ static void hb_reset_between(HBitmap *hb, int
>>> level, uint64_t start, uint64_t la
>>>        if (level > 0 && changed) {
>>>            hb_reset_between(hb, level - 1, pos, lastpos);
>>>        }
>>> +
>>> +    return changed;
>>> +
>>>    }
>>>      void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
>>>    {
>>>        /* Compute range in the last layer.  */
>>> +    uint64_t first;
>>>        uint64_t last = start + count - 1;
>>>          trace_hbitmap_reset(hb, start, count,
>>>                            start >> hb->granularity, last >>
>>> hb->granularity);
>>>    -    start >>= hb->granularity;
>>> +    first = start >> hb->granularity;
>>>        last >>= hb->granularity;
>>>    -    hb->count -= hb_count_between(hb, start, last);
>>> -    hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last);
>>> +    hb->count -= hb_count_between(hb, first, last);
>>> +    if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
>>> +        hb->meta) {
>> and here
>>
>>> +        hbitmap_set(hb->meta, start, count);
>>> +    }
>>>    }
>>>      void hbitmap_reset_all(HBitmap *hb)
>>> @@ -384,6 +403,9 @@ void hbitmap_free(HBitmap *hb)
>>>        for (i = HBITMAP_LEVELS; i-- > 0; ) {
>>>            g_free(hb->levels[i]);
>>>        }
>>> +    if (hb->meta) {
>>> +        hbitmap_free(hb->meta);
>>> +    }
>> hmm, not obvious for me.. why not "the one who creates must than destroy"?
>>
>>>        g_free(hb);
>>>    }
>>>    @@ -493,3 +515,12 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b)
>>>          return true;
>>>    }
>>> +
>>> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
>>> +{
>>> +    assert(!(chunk_size & (chunk_size - 1)));
>>> +    assert(!hb->meta);
>>> +    hb->meta = hbitmap_alloc(hb->size << hb->granularity,
>>> +                             hb->granularity + ctz32(chunk_size));
>>> +    return hb->meta;
>>> +}
>>
diff mbox

Patch

diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index bb94a00..ed672e7 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -181,6 +181,14 @@  void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
  */
 unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
 
+/* hbitmap_create_meta
+ * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
+ *
+ * @hb: The HBitmap to operate on.
+ * @chunk_size: How many bits in @hb does one bit in the meta track.
+ */
+HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
+
 /**
  * hbitmap_iter_next:
  * @hbi: HBitmapIter to operate on.
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 50b888f..55d3182 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -81,6 +81,9 @@  struct HBitmap {
      */
     int granularity;
 
+    /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
+    HBitmap *meta;
+
     /* A number of progressively less coarse bitmaps (i.e. level 0 is the
      * coarsest).  Each bit in level N represents a word in level N+1 that
      * has a set bit, except the last level where each bit represents the
@@ -212,25 +215,27 @@  static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
 }
 
 /* Setting starts at the last layer and propagates up if an element
- * changes from zero to non-zero.
+ * changes.
  */
 static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
 {
     unsigned long mask;
-    bool changed;
+    unsigned long old;
 
     assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
     assert(start <= last);
 
     mask = 2UL << (last & (BITS_PER_LONG - 1));
     mask -= 1UL << (start & (BITS_PER_LONG - 1));
-    changed = (*elem == 0);
+    old = *elem;
     *elem |= mask;
-    return changed;
+    return old != *elem;
 }
 
-/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
-static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t last)
+/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
+ * Returns true if at least one bit is changed. */
+static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
+                           uint64_t last)
 {
     size_t pos = start >> BITS_PER_LEVEL;
     size_t lastpos = last >> BITS_PER_LEVEL;
@@ -259,22 +264,27 @@  static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t last
     if (level > 0 && changed) {
         hb_set_between(hb, level - 1, pos, lastpos);
     }
+    return changed;
 }
 
 void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
 {
     /* Compute range in the last layer.  */
+    uint64_t first, n;
     uint64_t last = start + count - 1;
 
     trace_hbitmap_set(hb, start, count,
                       start >> hb->granularity, last >> hb->granularity);
 
-    start >>= hb->granularity;
+    first = start >> hb->granularity;
     last >>= hb->granularity;
-    count = last - start + 1;
+    n = last - first + 1;
 
-    hb->count += count - hb_count_between(hb, start, last);
-    hb_set_between(hb, HBITMAP_LEVELS - 1, start, last);
+    hb->count += n - hb_count_between(hb, first, last);
+    if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
+        hb->meta) {
+        hbitmap_set(hb->meta, start, count);
+    }
 }
 
 /* Resetting works the other way round: propagate up if the new
@@ -295,8 +305,10 @@  static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t l
     return blanked;
 }
 
-/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
-static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t last)
+/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
+ * Returns true if at least one bit is changed. */
+static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
+                             uint64_t last)
 {
     size_t pos = start >> BITS_PER_LEVEL;
     size_t lastpos = last >> BITS_PER_LEVEL;
@@ -339,21 +351,28 @@  static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t la
     if (level > 0 && changed) {
         hb_reset_between(hb, level - 1, pos, lastpos);
     }
+
+    return changed;
+
 }
 
 void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
 {
     /* Compute range in the last layer.  */
+    uint64_t first;
     uint64_t last = start + count - 1;
 
     trace_hbitmap_reset(hb, start, count,
                         start >> hb->granularity, last >> hb->granularity);
 
-    start >>= hb->granularity;
+    first = start >> hb->granularity;
     last >>= hb->granularity;
 
-    hb->count -= hb_count_between(hb, start, last);
-    hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last);
+    hb->count -= hb_count_between(hb, first, last);
+    if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
+        hb->meta) {
+        hbitmap_set(hb->meta, start, count);
+    }
 }
 
 void hbitmap_reset_all(HBitmap *hb)
@@ -384,6 +403,9 @@  void hbitmap_free(HBitmap *hb)
     for (i = HBITMAP_LEVELS; i-- > 0; ) {
         g_free(hb->levels[i]);
     }
+    if (hb->meta) {
+        hbitmap_free(hb->meta);
+    }
     g_free(hb);
 }
 
@@ -493,3 +515,12 @@  bool hbitmap_merge(HBitmap *a, const HBitmap *b)
 
     return true;
 }
+
+HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
+{
+    assert(!(chunk_size & (chunk_size - 1)));
+    assert(!hb->meta);
+    hb->meta = hbitmap_alloc(hb->size << hb->granularity,
+                             hb->granularity + ctz32(chunk_size));
+    return hb->meta;
+}