diff mbox

[RESEND,v2,08/18] ram/COLO: Record the dirty pages that SVM received

Message ID 1492850128-17472-9-git-send-email-zhang.zhanghailiang@huawei.com
State New
Headers show

Commit Message

Zhanghailiang April 22, 2017, 8:35 a.m. UTC
We record the address of the dirty pages that received,
it will help flushing pages that cached into SVM.

Here, it is a trick, we record dirty pages by re-using migration
dirty bitmap. In the later patch, we will start the dirty log
for SVM, just like migration, in this way, we can record both
the dirty pages caused by PVM and SVM, we only flush those dirty
pages from RAM cache while do checkpoint.

Cc: Juan Quintela <quintela@redhat.com>
Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
 migration/ram.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

Comments

Juan Quintela April 24, 2017, 6:29 p.m. UTC | #1
zhanghailiang <zhang.zhanghailiang@huawei.com> wrote:
> We record the address of the dirty pages that received,
> it will help flushing pages that cached into SVM.
>
> Here, it is a trick, we record dirty pages by re-using migration
> dirty bitmap. In the later patch, we will start the dirty log
> for SVM, just like migration, in this way, we can record both
> the dirty pages caused by PVM and SVM, we only flush those dirty
> pages from RAM cache while do checkpoint.
>
> Cc: Juan Quintela <quintela@redhat.com>
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
>  migration/ram.c | 29 +++++++++++++++++++++++++++++
>  1 file changed, 29 insertions(+)
>
> diff --git a/migration/ram.c b/migration/ram.c
> index 05d1b06..0653a24 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -2268,6 +2268,9 @@ static inline void *host_from_ram_block_offset(RAMBlock *block,
>  static inline void *colo_cache_from_block_offset(RAMBlock *block,
>                                                   ram_addr_t offset)
>  {
> +    unsigned long *bitmap;
> +    long k;
> +
>      if (!offset_in_ramblock(block, offset)) {
>          return NULL;
>      }
> @@ -2276,6 +2279,17 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block,
>                       __func__, block->idstr);
>          return NULL;
>      }
> +
> +    k = (memory_region_get_ram_addr(block->mr) + offset) >> TARGET_PAGE_BITS;
> +    bitmap = atomic_rcu_read(&ram_state.ram_bitmap)->bmap;
> +    /*
> +    * During colo checkpoint, we need bitmap of these migrated pages.
> +    * It help us to decide which pages in ram cache should be flushed
> +    * into VM's RAM later.
> +    */
> +    if (!test_and_set_bit(k, bitmap)) {
> +        ram_state.migration_dirty_pages++;
> +    }
>      return block->colo_cache + offset;
>  }
>  
> @@ -2752,6 +2766,15 @@ int colo_init_ram_cache(void)
>          memcpy(block->colo_cache, block->host, block->used_length);
>      }
>      rcu_read_unlock();
> +    /*
> +    * Record the dirty pages that sent by PVM, we use this dirty bitmap together
> +    * with to decide which page in cache should be flushed into SVM's RAM. Here
> +    * we use the same name 'ram_bitmap' as for migration.
> +    */
> +    ram_state.ram_bitmap = g_new0(RAMBitmap, 1);
> +    ram_state.ram_bitmap->bmap = bitmap_new(last_ram_page());
> +    ram_state.migration_dirty_pages = 0;
> +
>      return 0;
>  
>  out_locked:
> @@ -2770,6 +2793,12 @@ out_locked:
>  void colo_release_ram_cache(void)
>  {
>      RAMBlock *block;
> +    RAMBitmap *bitmap = ram_state.ram_bitmap;
> +
> +    atomic_rcu_set(&ram_state.ram_bitmap, NULL);
> +    if (bitmap) {
> +        call_rcu(bitmap, migration_bitmap_free, rcu);
> +    }
>  
>      rcu_read_lock();
>      QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {

You can see my Split bitmap patches, I am splitting the dirty bitmap per
block, I think that it shouldn't make your life more difficult, but
please take a look.

I am wondering if it is faster/easier to use the page_cache.c that
xbzrle uses to store the dirty pages instead of copying the whole
RAMBlocks, but I don't really know.


Thanks, Juan.
Zhanghailiang April 25, 2017, 11:19 a.m. UTC | #2
On 2017/4/25 2:29, Juan Quintela wrote:
> zhanghailiang <zhang.zhanghailiang@huawei.com> wrote:
>> We record the address of the dirty pages that received,
>> it will help flushing pages that cached into SVM.
>>
>> Here, it is a trick, we record dirty pages by re-using migration
>> dirty bitmap. In the later patch, we will start the dirty log
>> for SVM, just like migration, in this way, we can record both
>> the dirty pages caused by PVM and SVM, we only flush those dirty
>> pages from RAM cache while do checkpoint.
>>
>> Cc: Juan Quintela <quintela@redhat.com>
>> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
>> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
>> ---
>>   migration/ram.c | 29 +++++++++++++++++++++++++++++
>>   1 file changed, 29 insertions(+)
>>
>> diff --git a/migration/ram.c b/migration/ram.c
>> index 05d1b06..0653a24 100644
>> --- a/migration/ram.c
>> +++ b/migration/ram.c
>> @@ -2268,6 +2268,9 @@ static inline void *host_from_ram_block_offset(RAMBlock *block,
>>   static inline void *colo_cache_from_block_offset(RAMBlock *block,
>>                                                    ram_addr_t offset)
>>   {
>> +    unsigned long *bitmap;
>> +    long k;
>> +
>>       if (!offset_in_ramblock(block, offset)) {
>>           return NULL;
>>       }
>> @@ -2276,6 +2279,17 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block,
>>                        __func__, block->idstr);
>>           return NULL;
>>       }
>> +
>> +    k = (memory_region_get_ram_addr(block->mr) + offset) >> TARGET_PAGE_BITS;
>> +    bitmap = atomic_rcu_read(&ram_state.ram_bitmap)->bmap;
>> +    /*
>> +    * During colo checkpoint, we need bitmap of these migrated pages.
>> +    * It help us to decide which pages in ram cache should be flushed
>> +    * into VM's RAM later.
>> +    */
>> +    if (!test_and_set_bit(k, bitmap)) {
>> +        ram_state.migration_dirty_pages++;
>> +    }
>>       return block->colo_cache + offset;
>>   }
>>   
>> @@ -2752,6 +2766,15 @@ int colo_init_ram_cache(void)
>>           memcpy(block->colo_cache, block->host, block->used_length);
>>       }
>>       rcu_read_unlock();
>> +    /*
>> +    * Record the dirty pages that sent by PVM, we use this dirty bitmap together
>> +    * with to decide which page in cache should be flushed into SVM's RAM. Here
>> +    * we use the same name 'ram_bitmap' as for migration.
>> +    */
>> +    ram_state.ram_bitmap = g_new0(RAMBitmap, 1);
>> +    ram_state.ram_bitmap->bmap = bitmap_new(last_ram_page());
>> +    ram_state.migration_dirty_pages = 0;
>> +
>>       return 0;
>>   
>>   out_locked:
>> @@ -2770,6 +2793,12 @@ out_locked:
>>   void colo_release_ram_cache(void)
>>   {
>>       RAMBlock *block;
>> +    RAMBitmap *bitmap = ram_state.ram_bitmap;
>> +
>> +    atomic_rcu_set(&ram_state.ram_bitmap, NULL);
>> +    if (bitmap) {
>> +        call_rcu(bitmap, migration_bitmap_free, rcu);
>> +    }
>>   
>>       rcu_read_lock();
>>       QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> You can see my Split bitmap patches, I am splitting the dirty bitmap per
> block, I think that it shouldn't make your life more difficult, but
> please take a look.

OK, I'll look at it.

> I am wondering if it is faster/easier to use the page_cache.c that
> xbzrle uses to store the dirty pages instead of copying the whole
> RAMBlocks, but I don't really know.

Hmm,  Yes, it takes long time (depends on VM's memory size) to backup the whole VM's memory data into cache.
And we can reduce the time by backup page one by one while loading the page during the first live migration round,
because we can know  if users enable COLO at the beginning of the first migration stage.
I'd like to send those optimization later in another series...

Thanks,
Hailiang

>
> Thanks, Juan.
>
> .
>
diff mbox

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 05d1b06..0653a24 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2268,6 +2268,9 @@  static inline void *host_from_ram_block_offset(RAMBlock *block,
 static inline void *colo_cache_from_block_offset(RAMBlock *block,
                                                  ram_addr_t offset)
 {
+    unsigned long *bitmap;
+    long k;
+
     if (!offset_in_ramblock(block, offset)) {
         return NULL;
     }
@@ -2276,6 +2279,17 @@  static inline void *colo_cache_from_block_offset(RAMBlock *block,
                      __func__, block->idstr);
         return NULL;
     }
+
+    k = (memory_region_get_ram_addr(block->mr) + offset) >> TARGET_PAGE_BITS;
+    bitmap = atomic_rcu_read(&ram_state.ram_bitmap)->bmap;
+    /*
+    * During colo checkpoint, we need bitmap of these migrated pages.
+    * It help us to decide which pages in ram cache should be flushed
+    * into VM's RAM later.
+    */
+    if (!test_and_set_bit(k, bitmap)) {
+        ram_state.migration_dirty_pages++;
+    }
     return block->colo_cache + offset;
 }
 
@@ -2752,6 +2766,15 @@  int colo_init_ram_cache(void)
         memcpy(block->colo_cache, block->host, block->used_length);
     }
     rcu_read_unlock();
+    /*
+    * Record the dirty pages that sent by PVM, we use this dirty bitmap together
+    * with to decide which page in cache should be flushed into SVM's RAM. Here
+    * we use the same name 'ram_bitmap' as for migration.
+    */
+    ram_state.ram_bitmap = g_new0(RAMBitmap, 1);
+    ram_state.ram_bitmap->bmap = bitmap_new(last_ram_page());
+    ram_state.migration_dirty_pages = 0;
+
     return 0;
 
 out_locked:
@@ -2770,6 +2793,12 @@  out_locked:
 void colo_release_ram_cache(void)
 {
     RAMBlock *block;
+    RAMBitmap *bitmap = ram_state.ram_bitmap;
+
+    atomic_rcu_set(&ram_state.ram_bitmap, NULL);
+    if (bitmap) {
+        call_rcu(bitmap, migration_bitmap_free, rcu);
+    }
 
     rcu_read_lock();
     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {