diff mbox

[COLO-Frame,v8,12/34] COLO RAM: Load PVM's dirty page into SVM's RAM cache temporarily

Message ID 1438159544-6224-13-git-send-email-zhang.zhanghailiang@huawei.com
State New
Headers show

Commit Message

Zhanghailiang July 29, 2015, 8:45 a.m. UTC
The ram cache is initially the same as SVM/PVM's memory.

At checkpoint, we cache the dirty RAM of PVM into RAM cache in the slave
(so that RAM cache always the same as PVM's memory at every
checkpoint), we will flush cached RAM to SVM after we receive
all PVM's vmstate (RAM/device).

Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: Yang Hongyang <yanghy@cn.fujitsu.com>
Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
---
 include/exec/cpu-all.h   |  1 +
 include/migration/colo.h |  3 ++
 migration/colo.c         | 32 ++++++++++++++---
 migration/ram.c          | 93 ++++++++++++++++++++++++++++++++++++++++++++++--
 4 files changed, 123 insertions(+), 6 deletions(-)
diff mbox

Patch

diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index ea6a9a6..f8964c2 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -281,6 +281,7 @@  struct RAMBlock {
     struct rcu_head rcu;
     struct MemoryRegion *mr;
     uint8_t *host;
+    uint8_t *host_cache; /* For colo, VM's ram cache */
     ram_addr_t offset;
     ram_addr_t used_length;
     ram_addr_t max_length;
diff --git a/include/migration/colo.h b/include/migration/colo.h
index 2559f90..3b1eff9 100644
--- a/include/migration/colo.h
+++ b/include/migration/colo.h
@@ -30,4 +30,7 @@  bool migration_incoming_enable_colo(void);
 void migration_incoming_exit_colo(void);
 void *colo_process_incoming_checkpoints(void *opaque);
 bool migration_incoming_in_colo_state(void);
+/* ram cache */
+int create_and_init_ram_cache(void);
+void release_ram_cache(void);
 #endif
diff --git a/migration/colo.c b/migration/colo.c
index a77f23b..871e816 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -343,11 +343,23 @@  void *colo_process_incoming_checkpoints(void *opaque)
         error_report("Can't open incoming channel!");
         goto out;
     }
+
+    if (create_and_init_ram_cache() < 0) {
+        error_report("Failed to initialize ram cache");
+        goto out;
+    }
+
     ret = colo_ctl_put(ctl, COLO_CHECPOINT_READY);
     if (ret < 0) {
         goto out;
     }
-    /* TODO: in COLO mode, Secondary is runing, so start the vm */
+
+    qemu_mutex_lock_iothread();
+    /* in COLO mode, slave is runing, so start the vm */
+    vm_start();
+    qemu_mutex_unlock_iothread();
+    trace_colo_vm_state_change("stop", "run");
+
     while (mis->state == MIGRATION_STATUS_COLO) {
         int request = 0;
         int ret = colo_wait_handle_cmd(f, &request);
@@ -360,7 +372,12 @@  void *colo_process_incoming_checkpoints(void *opaque)
             }
         }
 
-        /* TODO: suspend guest */
+        /* suspend guest */
+        qemu_mutex_lock_iothread();
+        vm_stop_force_state(RUN_STATE_COLO);
+        qemu_mutex_unlock_iothread();
+        trace_colo_vm_state_change("run", "stop");
+
         ret = colo_ctl_put(ctl, COLO_CHECKPOINT_SUSPENDED);
         if (ret < 0) {
             goto out;
@@ -371,22 +388,29 @@  void *colo_process_incoming_checkpoints(void *opaque)
             goto out;
         }
 
-        /* TODO: read migration data into colo buffer */
+        /*TODO Load VM state */
 
         ret = colo_ctl_put(ctl, COLO_CHECKPOINT_RECEIVED);
         if (ret < 0) {
             goto out;
         }
 
-        /* TODO: load vm state */
+        /* TODO: flush vm state */
 
         ret = colo_ctl_put(ctl, COLO_CHECKPOINT_LOADED);
         if (ret < 0) {
             goto out;
         }
+
+        /* resume guest */
+        qemu_mutex_lock_iothread();
+        vm_start();
+        qemu_mutex_unlock_iothread();
+        trace_colo_vm_state_change("stop", "start");
 }
 
 out:
+    release_ram_cache();
     if (ctl) {
         qemu_fclose(ctl);
     }
diff --git a/migration/ram.c b/migration/ram.c
index 68980be..0897ecc 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -225,6 +225,7 @@  static ram_addr_t last_offset;
 static unsigned long *migration_bitmap;
 static QemuMutex migration_bitmap_mutex;
 static uint64_t migration_dirty_pages;
+static bool ram_cache_enable;
 static uint32_t last_version;
 static bool ram_bulk_stage;
 
@@ -1353,6 +1354,8 @@  static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
     return 0;
 }
 
+static void *memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block);
+
 /* Must be called from within a rcu critical section.
  * Returns a pointer from within the RCU-protected ram_list.
  */
@@ -1370,7 +1373,20 @@  static inline void *host_from_stream_offset(QEMUFile *f,
             return NULL;
         }
 
-        return memory_region_get_ram_ptr(block->mr) + offset;
+        if (ram_cache_enable) {
+            /*
+            * During colo checkpoint, we need bitmap of these migrated pages.
+            * It help us to decide which pages in ram cache should be flushed
+            * into VM's RAM later.
+            */
+            long k = (block->mr->ram_addr + offset) >> TARGET_PAGE_BITS;
+            if (!test_and_set_bit(k, migration_bitmap)) {
+                migration_dirty_pages++;
+            }
+            return memory_region_get_ram_cache_ptr(block->mr, block) + offset;
+        } else {
+            return memory_region_get_ram_ptr(block->mr) + offset;
+        }
     }
 
     len = qemu_get_byte(f);
@@ -1380,7 +1396,16 @@  static inline void *host_from_stream_offset(QEMUFile *f,
     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
         if (!strncmp(id, block->idstr, sizeof(id)) &&
             block->max_length > offset) {
-            return memory_region_get_ram_ptr(block->mr) + offset;
+            if (ram_cache_enable) {
+                long k = (block->mr->ram_addr + offset) >> TARGET_PAGE_BITS;
+                if (!test_and_set_bit(k, migration_bitmap)) {
+                    migration_dirty_pages++;
+                }
+                return memory_region_get_ram_cache_ptr(block->mr, block)
+                       + offset;
+            } else {
+                return memory_region_get_ram_ptr(block->mr) + offset;
+            }
         }
     }
 
@@ -1631,6 +1656,70 @@  static int ram_load(QEMUFile *f, void *opaque, int version_id)
     return ret;
 }
 
+/*
+ * colo cache: this is for secondary VM, we cache the whole
+ * memory of the secondary VM, it will be called after first migration.
+ */
+int create_and_init_ram_cache(void)
+{
+    RAMBlock *block;
+
+    rcu_read_lock();
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        block->host_cache = qemu_anon_ram_alloc(block->used_length, NULL);
+        if (!block->host_cache) {
+            goto out_locked;
+        }
+        memcpy(block->host_cache, block->host, block->used_length);
+    }
+    rcu_read_unlock();
+    ram_cache_enable = true;
+    return 0;
+
+out_locked:
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        if (block->host_cache) {
+            qemu_anon_ram_free(block->host_cache, block->used_length);
+            block->host_cache = NULL;
+        }
+    }
+
+    rcu_read_unlock();
+    return -1;
+}
+
+void release_ram_cache(void)
+{
+    RAMBlock *block;
+
+    ram_cache_enable = false;
+
+    rcu_read_lock();
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        if (block->host_cache) {
+            qemu_anon_ram_free(block->host_cache, block->used_length);
+            block->host_cache = NULL;
+        }
+    }
+    rcu_read_unlock();
+}
+
+static void *memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block)
+{
+   if (mr->alias) {
+        return memory_region_get_ram_cache_ptr(mr->alias, block) +
+               mr->alias_offset;
+    }
+
+    assert(mr->terminates);
+
+    ram_addr_t addr = mr->ram_addr & TARGET_PAGE_MASK;
+
+    assert(addr - block->offset < block->used_length);
+
+    return block->host_cache + (addr - block->offset);
+}
+
 static SaveVMHandlers savevm_ram_handlers = {
     .save_live_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,