Patchwork [2/4] ram: remove xbrle last_stage optimization

login
register
mail settings
Submitter Juan Quintela
Date Jan. 18, 2013, 11:53 a.m.
Message ID <1358510033-17268-3-git-send-email-quintela@redhat.com>
Download mbox | patch
Permalink /patch/213580/
State New
Headers show

Comments

Juan Quintela - Jan. 18, 2013, 11:53 a.m.
We need to remove it to be able to return from complete to iterative
phases of migration.

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 arch_init.c | 24 +++++++++---------------
 1 file changed, 9 insertions(+), 15 deletions(-)
Orit Wasserman - Jan. 21, 2013, 10:11 a.m.
Juan,
Why not add a migration_is_last_stage (similar to migration_is_xbzrle) function and leave the optimization

Regards,
Orit
On 01/18/2013 01:53 PM, Juan Quintela wrote:
> We need to remove it to be able to return from complete to iterative
> phases of migration.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  arch_init.c | 24 +++++++++---------------
>  1 file changed, 9 insertions(+), 15 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 2792b76..9f7d44d 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -286,16 +286,14 @@ static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
> 
>  static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
>                              ram_addr_t current_addr, RAMBlock *block,
> -                            ram_addr_t offset, int cont, bool last_stage)
> +                            ram_addr_t offset, int cont)
>  {
>      int encoded_len = 0, bytes_sent = -1;
>      uint8_t *prev_cached_page;
> 
>      if (!cache_is_cached(XBZRLE.cache, current_addr)) {
> -        if (!last_stage) {
> -            cache_insert(XBZRLE.cache, current_addr,
> -                         g_memdup(current_data, TARGET_PAGE_SIZE));
> -        }
> +        cache_insert(XBZRLE.cache, current_addr,
> +                     g_memdup(current_data, TARGET_PAGE_SIZE));
>          acct_info.xbzrle_cache_miss++;
>          return -1;
>      }
> @@ -321,9 +319,7 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
>      }
> 
>      /* we need to update the data in the cache, in order to get the same data */
> -    if (!last_stage) {
> -        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
> -    }
> +    memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
> 
>      /* Send XBZRLE based compressed page */
>      bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
> @@ -426,7 +422,7 @@ static void migration_bitmap_sync(void)
>   *           0 means no dirty pages
>   */
> 
> -static int ram_save_block(QEMUFile *f, bool last_stage)
> +static int ram_save_block(QEMUFile *f)
>  {
>      RAMBlock *block = last_seen_block;
>      ram_addr_t offset = last_offset;
> @@ -470,10 +466,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
>              } else if (migrate_use_xbzrle()) {
>                  current_addr = block->offset + offset;
>                  bytes_sent = save_xbzrle_page(f, p, current_addr, block,
> -                                              offset, cont, last_stage);
> -                if (!last_stage) {
> -                    p = get_cached_data(XBZRLE.cache, current_addr);
> -                }
> +                                              offset, cont);
> +                p = get_cached_data(XBZRLE.cache, current_addr);
>              }
> 
>              /* XBZRLE overflow or normal page */
> @@ -621,7 +615,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque, uint64_t free_space)
>      i = 0;
>      /* We need space for at least one page and end of section marker */
>      while (free_space > MAX_PAGE_SIZE + 8) {
> -        int bytes_sent = ram_save_block(f, false);
> +        int bytes_sent = ram_save_block(f);
>          /* no more blocks to sent */
>          if (bytes_sent == 0) {
>              break;
> @@ -665,7 +659,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
>      while (true) {
>          int bytes_sent;
> 
> -        bytes_sent = ram_save_block(f, true);
> +        bytes_sent = ram_save_block(f);
>          /* no more blocks to sent */
>          if (bytes_sent == 0) {
>              break;
>

Patch

diff --git a/arch_init.c b/arch_init.c
index 2792b76..9f7d44d 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -286,16 +286,14 @@  static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,

 static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
                             ram_addr_t current_addr, RAMBlock *block,
-                            ram_addr_t offset, int cont, bool last_stage)
+                            ram_addr_t offset, int cont)
 {
     int encoded_len = 0, bytes_sent = -1;
     uint8_t *prev_cached_page;

     if (!cache_is_cached(XBZRLE.cache, current_addr)) {
-        if (!last_stage) {
-            cache_insert(XBZRLE.cache, current_addr,
-                         g_memdup(current_data, TARGET_PAGE_SIZE));
-        }
+        cache_insert(XBZRLE.cache, current_addr,
+                     g_memdup(current_data, TARGET_PAGE_SIZE));
         acct_info.xbzrle_cache_miss++;
         return -1;
     }
@@ -321,9 +319,7 @@  static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
     }

     /* we need to update the data in the cache, in order to get the same data */
-    if (!last_stage) {
-        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
-    }
+    memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);

     /* Send XBZRLE based compressed page */
     bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
@@ -426,7 +422,7 @@  static void migration_bitmap_sync(void)
  *           0 means no dirty pages
  */

-static int ram_save_block(QEMUFile *f, bool last_stage)
+static int ram_save_block(QEMUFile *f)
 {
     RAMBlock *block = last_seen_block;
     ram_addr_t offset = last_offset;
@@ -470,10 +466,8 @@  static int ram_save_block(QEMUFile *f, bool last_stage)
             } else if (migrate_use_xbzrle()) {
                 current_addr = block->offset + offset;
                 bytes_sent = save_xbzrle_page(f, p, current_addr, block,
-                                              offset, cont, last_stage);
-                if (!last_stage) {
-                    p = get_cached_data(XBZRLE.cache, current_addr);
-                }
+                                              offset, cont);
+                p = get_cached_data(XBZRLE.cache, current_addr);
             }

             /* XBZRLE overflow or normal page */
@@ -621,7 +615,7 @@  static int ram_save_iterate(QEMUFile *f, void *opaque, uint64_t free_space)
     i = 0;
     /* We need space for at least one page and end of section marker */
     while (free_space > MAX_PAGE_SIZE + 8) {
-        int bytes_sent = ram_save_block(f, false);
+        int bytes_sent = ram_save_block(f);
         /* no more blocks to sent */
         if (bytes_sent == 0) {
             break;
@@ -665,7 +659,7 @@  static int ram_save_complete(QEMUFile *f, void *opaque)
     while (true) {
         int bytes_sent;

-        bytes_sent = ram_save_block(f, true);
+        bytes_sent = ram_save_block(f);
         /* no more blocks to sent */
         if (bytes_sent == 0) {
             break;