Patchwork Re: [PATCH] qemu-kvm: Speed up of the dirty-bitmap-traveling

login
register
mail settings
Submitter OHMURA Kei
Date Feb. 9, 2010, 9:55 a.m.
Message ID <4B71311C.8090108@lab.ntt.co.jp>
Download mbox | patch
Permalink /patch/44893/
State New
Headers show

Comments

OHMURA Kei - Feb. 9, 2010, 9:55 a.m.
Thank you for your comments.  We have implemented the code which applied your
comments.  This is patch for upstream.

Signed-off-by: OHMURA Kei <ohmura.kei@lab.ntt.co.jp>
---
 kvm-all.c |   54 +++++++++++++++++++++++++++++++++++++++---------------
 1 files changed, 39 insertions(+), 15 deletions(-)

Patch

diff --git a/kvm-all.c b/kvm-all.c
index 6c0fd37..603307c 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -287,9 +287,43 @@  int kvm_set_migration_log(int enable)
     return 0;
 }
 
-static int test_le_bit(unsigned long nr, unsigned char *addr)
+/* get kvm's dirty pages bitmap and update qemu's */
+static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
+                                         unsigned char *bitmap,
+                                         unsigned long offset,
+                                         unsigned long mem_size)
 {
-    return (addr[nr >> 3] >> (nr & 7)) & 1;
+    unsigned int i, j, k, start, end;
+    unsigned char c;
+    unsigned long page_number, addr, addr1;
+    ram_addr_t ram_addr;
+    unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + TARGET_LONG_BITS - 1) /
+        TARGET_LONG_BITS;
+    unsigned long *bitmap_ul = (unsigned long *)bitmap;
+    
+    /*
+     * bitmap-traveling is faster than memory-traveling (for addr...)
+     * especially when most of the memory is not dirty.
+     */
+    for (i = 0; i < len; i++) {
+        if (bitmap_ul[i] != 0) {
+            start = i * TARGET_LONG_SIZE;
+            end = (i + 1) * TARGET_LONG_SIZE;
+            for (j = start; j < end; j++) {
+                c = bitmap[j];
+                while (c > 0) {
+                    k = ffsl(c) - 1;
+                    c &= ~(1u << k);
+                    page_number = j * 8 + k;
+                    addr1 = page_number * TARGET_PAGE_SIZE;
+                    addr = offset + addr1;
+                    ram_addr = cpu_get_physical_page_desc(addr);
+                    cpu_physical_memory_set_dirty(ram_addr);
+                }
+            }
+        }
+    }
+    return 0;
 }
 
 /**
@@ -305,8 +339,6 @@  int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
 {
     KVMState *s = kvm_state;
     unsigned long size, allocated_size = 0;
-    target_phys_addr_t phys_addr;
-    ram_addr_t addr;
     KVMDirtyLog d;
     KVMSlot *mem;
     int ret = 0;
@@ -335,17 +367,9 @@  int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
             break;
         }
 
-        for (phys_addr = mem->start_addr, addr = mem->phys_offset;
-             phys_addr < mem->start_addr + mem->memory_size;
-             phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
-            unsigned char *bitmap = (unsigned char *)d.dirty_bitmap;
-            unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
-
-            if (test_le_bit(nr, bitmap)) {
-                cpu_physical_memory_set_dirty(addr);
-            }
-        }
-        start_addr = phys_addr;
+        kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap, 
+                                      mem->start_addr, mem->memory_size);
+        start_addr = mem->start_addr + mem->memory_size;
     }
     qemu_free(d.dirty_bitmap);