@@ -1154,6 +1154,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
int flags = 0, ret = 0;
static uint64_t seq_iter;
+ bool need_flush = false;
seq_iter++;
@@ -1221,6 +1222,8 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
+
+ need_flush = true;
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
break;
@@ -1231,6 +1234,8 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
+
+ need_flush = true;
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
case RAM_SAVE_FLAG_XBZRLE:
@@ -1246,6 +1251,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
+ need_flush = true;
break;
case RAM_SAVE_FLAG_EOS:
/* normal exit */
@@ -1265,6 +1271,11 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
rcu_read_unlock();
+
+ if (!ret && ram_cache_enable && need_flush) {
+ DPRINTF("Flush ram_cache\n");
+ colo_flush_ram_cache();
+ }
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
@@ -1332,6 +1343,87 @@ static void *memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block)
return block->host_cache + (addr - block->offset);
}
+/* fix me: should this helper function be merged with
+ * migration_bitmap_find_and_reset_dirty ?
+ */
+static inline
+ram_addr_t host_bitmap_find_and_reset_dirty(MemoryRegion *mr,
+ ram_addr_t start)
+{
+ unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
+ unsigned long nr = base + (start >> TARGET_PAGE_BITS);
+ uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
+ unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
+
+ unsigned long next;
+
+ next = find_next_bit(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION],
+ size, nr);
+ if (next < size) {
+ clear_bit(next, ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ }
+ return (next - base) << TARGET_PAGE_BITS;
+}
+
+/*
+* Flush content of RAM cache into SVM's memory.
+* Only flush the pages that be dirtied by PVM or SVM or both.
+*/
+void colo_flush_ram_cache(void)
+{
+ RAMBlock *block = NULL;
+ void *dst_host;
+ void *src_host;
+ ram_addr_t ca = 0, ha = 0;
+ bool got_ca = 0, got_ha = 0;
+ int64_t host_dirty = 0, both_dirty = 0;
+
+ address_space_sync_dirty_bitmap(&address_space_memory);
+ rcu_read_lock();
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
+ while (true) {
+ if (ca < block->used_length && ca <= ha) {
+ ca = migration_bitmap_find_and_reset_dirty(block->mr, ca);
+ if (ca < block->used_length) {
+ got_ca = 1;
+ }
+ }
+ if (ha < block->used_length && ha <= ca) {
+ ha = host_bitmap_find_and_reset_dirty(block->mr, ha);
+ if (ha < block->used_length && ha != ca) {
+ got_ha = 1;
+ }
+ host_dirty += (ha < block->used_length ? 1 : 0);
+ both_dirty += (ha < block->used_length && ha == ca ? 1 : 0);
+ }
+ if (ca >= block->used_length && ha >= block->used_length) {
+ ca = 0;
+ ha = 0;
+ block = QLIST_NEXT_RCU(block, next);
+ if (!block) {
+ break;
+ }
+ } else {
+ if (got_ha) {
+ got_ha = 0;
+ dst_host = memory_region_get_ram_ptr(block->mr) + ha;
+ src_host = memory_region_get_ram_cache_ptr(block->mr, block)
+ + ha;
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ }
+ if (got_ca) {
+ got_ca = 0;
+ dst_host = memory_region_get_ram_ptr(block->mr) + ca;
+ src_host = memory_region_get_ram_cache_ptr(block->mr, block)
+ + ca;
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ }
+ }
+ }
+ rcu_read_unlock();
+ assert(migration_dirty_pages == 0);
+}
+
static SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
@@ -37,5 +37,6 @@ void *colo_process_incoming_checkpoints(void *opaque);
bool loadvm_in_colo_state(void);
/* ram cache */
void create_and_init_ram_cache(void);
+void colo_flush_ram_cache(void);
void release_ram_cache(void);
#endif
@@ -409,7 +409,6 @@ void *colo_process_incoming_checkpoints(void *opaque)
}
DPRINTF("Finish load all vm state to cache\n");
qemu_mutex_unlock_iothread();
- /* TODO: flush vm state */
ret = colo_ctl_put(ctl, COLO_CHECKPOINT_LOADED);
if (ret < 0) {