diff mbox

[PATCHv2,5/6] migration: stable ram block ordering

Message ID 0bc452a2f9bfd1af031dbe3b5f8793e0b05c52d9.1290613959.git.mst@redhat.com
State New
Headers show

Commit Message

Michael S. Tsirkin Nov. 24, 2010, 3:53 p.m. UTC
This makes ram block ordering under migration stable, ordered by offset.
This is especially useful for migration to exec, for debugging.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Jason Wang <jasowang@redhat.com>
---
 arch_init.c  |   35 +++++++++++++++++++++++++++++++++++
 cpu-common.h |    3 +++
 exec.c       |   24 ++++++++++++++++++++++--
 kvm-all.c    |    2 +-
 4 files changed, 61 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/arch_init.c b/arch_init.c
index 4486925..e32e289 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -23,6 +23,7 @@ 
  */
 #include <stdint.h>
 #include <stdarg.h>
+#include <stdlib.h>
 #ifndef _WIN32
 #include <sys/types.h>
 #include <sys/mman.h>
@@ -212,6 +213,39 @@  uint64_t ram_bytes_total(void)
     return total;
 }
 
+static int block_compar(const void *a, const void *b)
+{
+    RAMBlock * const *ablock = a;
+    RAMBlock * const *bblock = b;
+    if ((*ablock)->offset < (*bblock)->offset) {
+        return -1;
+    } else if ((*ablock)->offset > (*bblock)->offset) {
+        return 1;
+    }
+    return 0;
+}
+
+static void sort_ram_list(void)
+{
+    RAMBlock *block, *nblock, **blocks;
+    int n;
+    n = 0;
+    QLIST_FOREACH(block, &ram_list.blocks, next) {
+        ++n;
+    }
+    blocks = qemu_malloc(n * sizeof *blocks);
+    n = 0;
+    QLIST_FOREACH_SAFE(block, &ram_list.blocks, next, nblock) {
+        blocks[n++] = block;
+        QLIST_REMOVE(block, next);
+    }
+    qsort(blocks, n, sizeof *blocks, block_compar);
+    while (--n >= 0) {
+        QLIST_INSERT_HEAD(&ram_list.blocks, blocks[n], next);
+    }
+    qemu_free(blocks);
+}
+
 int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
 {
     ram_addr_t addr;
@@ -234,6 +268,7 @@  int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
         bytes_transferred = 0;
         last_block = NULL;
         last_offset = 0;
+        sort_ram_list();
 
         /* Make sure all dirty bits are set */
         QLIST_FOREACH(block, &ram_list.blocks, next) {
diff --git a/cpu-common.h b/cpu-common.h
index a543b5d..bb6b137 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -46,6 +46,9 @@  ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
 void qemu_ram_free(ram_addr_t addr);
 /* This should only be used for ram local to a device.  */
 void *qemu_get_ram_ptr(ram_addr_t addr);
+/* Same but slower, to use for migration, where the order of
+ * RAMBlocks must not change. */
+void *qemu_safe_ram_ptr(ram_addr_t addr);
 /* This should not be used by devices.  */
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
diff --git a/exec.c b/exec.c
index db9ff55..6c8f635 100644
--- a/exec.c
+++ b/exec.c
@@ -2030,10 +2030,10 @@  void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
 
     /* we modify the TLB cache so that the dirty bit will be set again
        when accessing the range */
-    start1 = (unsigned long)qemu_get_ram_ptr(start);
+    start1 = (unsigned long)qemu_safe_ram_ptr(start);
     /* Chek that we don't span multiple blocks - this breaks the
        address comparisons below.  */
-    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
+    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
             != (end - 1) - start) {
         abort();
     }
@@ -2858,6 +2858,7 @@  ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
     new_block->length = size;
 
     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
+    fprintf(stderr, "alloc ram %s len 0x%x\n", new_block->idstr, (int)new_block->length);
 
     ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
                                        last_ram_offset() >> TARGET_PAGE_BITS);
@@ -2931,6 +2932,25 @@  void *qemu_get_ram_ptr(ram_addr_t addr)
     return NULL;
 }
 
+/* Return a host pointer to ram allocated with qemu_ram_alloc.
+ * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
+ */
+void *qemu_safe_ram_ptr(ram_addr_t addr)
+{
+    RAMBlock *block;
+
+    QLIST_FOREACH(block, &ram_list.blocks, next) {
+        if (addr - block->offset < block->length) {
+            return block->host + (addr - block->offset);
+        }
+    }
+
+    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
+    abort();
+
+    return NULL;
+}
+
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
 {
     RAMBlock *block;
diff --git a/kvm-all.c b/kvm-all.c
index 37b99c7..cae24bb 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -162,7 +162,7 @@  static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
     mem.slot = slot->slot;
     mem.guest_phys_addr = slot->start_addr;
     mem.memory_size = slot->memory_size;
-    mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset);
+    mem.userspace_addr = (unsigned long)qemu_safe_ram_ptr(slot->phys_offset);
     mem.flags = slot->flags;
     if (s->migration_log) {
         mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;