diff mbox

[v4,07/13] migration: Split the function ram_save_page

Message ID 1422875149-13198-8-git-send-email-liang.z.li@intel.com
State New
Headers show

Commit Message

Li, Liang Z Feb. 2, 2015, 11:05 a.m. UTC
Split the function ram_save_page for code reuse purpose.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
---
 arch_init.c | 102 +++++++++++++++++++++++++++++++++---------------------------
 1 file changed, 56 insertions(+), 46 deletions(-)

Comments

Dr. David Alan Gilbert Feb. 6, 2015, 11:01 a.m. UTC | #1
* Liang Li (liang.z.li@intel.com) wrote:
> Split the function ram_save_page for code reuse purpose.

That's better, but I still think there is an XBZRLE problem; see below.

> Signed-off-by: Liang Li <liang.z.li@intel.com>
> Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
> ---
>  arch_init.c | 102 +++++++++++++++++++++++++++++++++---------------------------
>  1 file changed, 56 insertions(+), 46 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 500f299..eae082b 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -595,6 +595,58 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
>      }
>  }
>  
> +static int save_zero_and_xbzrle_page(QEMUFile *f, uint8_t **current_data,
> +                                     RAMBlock *block, ram_addr_t offset,
> +                                     bool last_stage, bool *send_async)
> +{
> +    int bytes_sent = -1;
> +    int cont, ret;
> +    ram_addr_t current_addr;
> +
> +    cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
> +
> +    /* In doubt sent page as normal */
> +    ret = ram_control_save_page(f, block->offset,
> +                                offset, TARGET_PAGE_SIZE, &bytes_sent);
> +
> +    XBZRLE_cache_lock();
> +
> +    current_addr = block->offset + offset;
> +    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> +        if (ret != RAM_SAVE_CONTROL_DELAYED) {
> +            if (bytes_sent > 0) {
> +                acct_info.norm_pages++;
> +            } else if (bytes_sent == 0) {
> +                acct_info.dup_pages++;
> +            }
> +        }
> +    } else if (is_zero_range(*current_data, TARGET_PAGE_SIZE)) {
> +        acct_info.dup_pages++;
> +        bytes_sent = save_block_hdr(f, block, offset, cont,
> +                                    RAM_SAVE_FLAG_COMPRESS);
> +        qemu_put_byte(f, 0);
> +        bytes_sent++;
> +        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
> +         * page would be stale
> +         */
> +        xbzrle_cache_zero_page(current_addr);
> +    } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
> +        bytes_sent = save_xbzrle_page(f, current_data, current_addr, block,
> +                                      offset, cont, last_stage);
> +        if (!last_stage) {
> +            /* Can't send this cached data async, since the cache page
> +             * might get updated before it gets to the wire
> +             */
> +            if (send_async != NULL) {
> +                *send_async = false;
> +            }
> +        }
> +    }
> +
> +    XBZRLE_cache_unlock();

I think this is too soon; when save_xbzrle_page updates current_data to point
to a page from the cache, the cache data is still in use by this point, so
we must be careful that the cache couldn't get resized until after the qemu_put_buffer
below.  Thus this lock must be held until after that.

Dave

> +    return bytes_sent;
> +}
>  
>  /* Needs iothread lock! */
>  /* Fix me: there are too many global variables used in migration process. */
> @@ -685,60 +737,20 @@ static void migration_bitmap_sync(void)
>   *
>   * Returns: Number of bytes written.
>   */
> -static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
> +static int ram_save_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
>                           bool last_stage)
>  {
>      int bytes_sent;
>      int cont;
> -    ram_addr_t current_addr;
>      MemoryRegion *mr = block->mr;
>      uint8_t *p;
> -    int ret;
>      bool send_async = true;
>  
> -    cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
> -
>      p = memory_region_get_ram_ptr(mr) + offset;
> -
> -    /* In doubt sent page as normal */
> -    bytes_sent = -1;
> -    ret = ram_control_save_page(f, block->offset,
> -                           offset, TARGET_PAGE_SIZE, &bytes_sent);
> -
> -    XBZRLE_cache_lock();
> -
> -    current_addr = block->offset + offset;
> -    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> -        if (ret != RAM_SAVE_CONTROL_DELAYED) {
> -            if (bytes_sent > 0) {
> -                acct_info.norm_pages++;
> -            } else if (bytes_sent == 0) {
> -                acct_info.dup_pages++;
> -            }
> -        }
> -    } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
> -        acct_info.dup_pages++;
> -        bytes_sent = save_block_hdr(f, block, offset, cont,
> -                                    RAM_SAVE_FLAG_COMPRESS);
> -        qemu_put_byte(f, 0);
> -        bytes_sent++;
> -        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
> -         * page would be stale
> -         */
> -        xbzrle_cache_zero_page(current_addr);
> -    } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
> -        bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
> -                                      offset, cont, last_stage);
> -        if (!last_stage) {
> -            /* Can't send this cached data async, since the cache page
> -             * might get updated before it gets to the wire
> -             */
> -            send_async = false;
> -        }
> -    }
> -
> -    /* XBZRLE overflow or normal page */
> +    bytes_sent = save_zero_and_xbzrle_page(f, &p, block, offset,
> +                                           last_stage, &send_async);
>      if (bytes_sent == -1) {
> +        cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
>          bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
>          if (send_async) {
>              qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
> @@ -749,8 +761,6 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
>          acct_info.norm_pages++;
>      }
>  
> -    XBZRLE_cache_unlock();
> -
>      return bytes_sent;
>  }
>  
> -- 
> 1.9.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff mbox

Patch

diff --git a/arch_init.c b/arch_init.c
index 500f299..eae082b 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -595,6 +595,58 @@  static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
     }
 }
 
+static int save_zero_and_xbzrle_page(QEMUFile *f, uint8_t **current_data,
+                                     RAMBlock *block, ram_addr_t offset,
+                                     bool last_stage, bool *send_async)
+{
+    int bytes_sent = -1;
+    int cont, ret;
+    ram_addr_t current_addr;
+
+    cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
+
+    /* In doubt sent page as normal */
+    ret = ram_control_save_page(f, block->offset,
+                                offset, TARGET_PAGE_SIZE, &bytes_sent);
+
+    XBZRLE_cache_lock();
+
+    current_addr = block->offset + offset;
+    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
+        if (ret != RAM_SAVE_CONTROL_DELAYED) {
+            if (bytes_sent > 0) {
+                acct_info.norm_pages++;
+            } else if (bytes_sent == 0) {
+                acct_info.dup_pages++;
+            }
+        }
+    } else if (is_zero_range(*current_data, TARGET_PAGE_SIZE)) {
+        acct_info.dup_pages++;
+        bytes_sent = save_block_hdr(f, block, offset, cont,
+                                    RAM_SAVE_FLAG_COMPRESS);
+        qemu_put_byte(f, 0);
+        bytes_sent++;
+        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
+         * page would be stale
+         */
+        xbzrle_cache_zero_page(current_addr);
+    } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
+        bytes_sent = save_xbzrle_page(f, current_data, current_addr, block,
+                                      offset, cont, last_stage);
+        if (!last_stage) {
+            /* Can't send this cached data async, since the cache page
+             * might get updated before it gets to the wire
+             */
+            if (send_async != NULL) {
+                *send_async = false;
+            }
+        }
+    }
+
+    XBZRLE_cache_unlock();
+
+    return bytes_sent;
+}
 
 /* Needs iothread lock! */
 /* Fix me: there are too many global variables used in migration process. */
@@ -685,60 +737,20 @@  static void migration_bitmap_sync(void)
  *
  * Returns: Number of bytes written.
  */
-static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
+static int ram_save_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
                          bool last_stage)
 {
     int bytes_sent;
     int cont;
-    ram_addr_t current_addr;
     MemoryRegion *mr = block->mr;
     uint8_t *p;
-    int ret;
     bool send_async = true;
 
-    cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
-
     p = memory_region_get_ram_ptr(mr) + offset;
-
-    /* In doubt sent page as normal */
-    bytes_sent = -1;
-    ret = ram_control_save_page(f, block->offset,
-                           offset, TARGET_PAGE_SIZE, &bytes_sent);
-
-    XBZRLE_cache_lock();
-
-    current_addr = block->offset + offset;
-    if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
-        if (ret != RAM_SAVE_CONTROL_DELAYED) {
-            if (bytes_sent > 0) {
-                acct_info.norm_pages++;
-            } else if (bytes_sent == 0) {
-                acct_info.dup_pages++;
-            }
-        }
-    } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
-        acct_info.dup_pages++;
-        bytes_sent = save_block_hdr(f, block, offset, cont,
-                                    RAM_SAVE_FLAG_COMPRESS);
-        qemu_put_byte(f, 0);
-        bytes_sent++;
-        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
-         * page would be stale
-         */
-        xbzrle_cache_zero_page(current_addr);
-    } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
-        bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
-                                      offset, cont, last_stage);
-        if (!last_stage) {
-            /* Can't send this cached data async, since the cache page
-             * might get updated before it gets to the wire
-             */
-            send_async = false;
-        }
-    }
-
-    /* XBZRLE overflow or normal page */
+    bytes_sent = save_zero_and_xbzrle_page(f, &p, block, offset,
+                                           last_stage, &send_async);
     if (bytes_sent == -1) {
+        cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
         bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
         if (send_async) {
             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
@@ -749,8 +761,6 @@  static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
         acct_info.norm_pages++;
     }
 
-    XBZRLE_cache_unlock();
-
     return bytes_sent;
 }