diff mbox

kvm: Fix dirty tracking with large kernel page size

Message ID 1332133047-7672-1-git-send-email-david@gibson.dropbear.id.au
State New
Headers show

Commit Message

David Gibson March 19, 2012, 4:57 a.m. UTC
If the kernel page size is larger than TARGET_PAGE_SIZE, which
happens for example on ppc64 with kernels compiled for 64K pages,
the dirty tracking doesn't work.

Cc: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tossatti <mtosatti@redhat.com>

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
 kvm-all.c |    8 +++++---
 1 files changed, 5 insertions(+), 3 deletions(-)

Comments

Andreas Färber March 19, 2012, 10:52 a.m. UTC | #1
Am 19.03.2012 05:57, schrieb David Gibson:
> If the kernel page size is larger than TARGET_PAGE_SIZE, which
> happens for example on ppc64 with kernels compiled for 64K pages,
> the dirty tracking doesn't work.
> 
> Cc: Avi Kivity <avi@redhat.com>
> Cc: Marcelo Tossatti <mtosatti@redhat.com>
> 
> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> ---
>  kvm-all.c |    8 +++++---
>  1 files changed, 5 insertions(+), 3 deletions(-)
> 
> diff --git a/kvm-all.c b/kvm-all.c
> index ba2cee1..47adc97 100644
> --- a/kvm-all.c
> +++ b/kvm-all.c
> @@ -350,10 +350,11 @@ static int kvm_set_migration_log(int enable)
>  static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
>                                           unsigned long *bitmap)
>  {
> -    unsigned int i, j;
> +  unsigned int i, j;

Unintentional change?

Andreas

>      unsigned long page_number, c;
>      target_phys_addr_t addr, addr1;
>      unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
> +    unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
>  
>      /*
>       * bitmap-traveling is faster than memory-traveling (for addr...)
> @@ -365,10 +366,11 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
>              do {
>                  j = ffsl(c) - 1;
>                  c &= ~(1ul << j);
> -                page_number = i * HOST_LONG_BITS + j;
> +                page_number = (i * HOST_LONG_BITS + j) * hpratio;
>                  addr1 = page_number * TARGET_PAGE_SIZE;
>                  addr = section->offset_within_region + addr1;
> -                memory_region_set_dirty(section->mr, addr, TARGET_PAGE_SIZE);
> +                memory_region_set_dirty(section->mr, addr,
> +                                        TARGET_PAGE_SIZE * hpratio);
>              } while (c != 0);
>          }
>      }
diff mbox

Patch

diff --git a/kvm-all.c b/kvm-all.c
index ba2cee1..47adc97 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -350,10 +350,11 @@  static int kvm_set_migration_log(int enable)
 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
                                          unsigned long *bitmap)
 {
-    unsigned int i, j;
+  unsigned int i, j;
     unsigned long page_number, c;
     target_phys_addr_t addr, addr1;
     unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
+    unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
 
     /*
      * bitmap-traveling is faster than memory-traveling (for addr...)
@@ -365,10 +366,11 @@  static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
             do {
                 j = ffsl(c) - 1;
                 c &= ~(1ul << j);
-                page_number = i * HOST_LONG_BITS + j;
+                page_number = (i * HOST_LONG_BITS + j) * hpratio;
                 addr1 = page_number * TARGET_PAGE_SIZE;
                 addr = section->offset_within_region + addr1;
-                memory_region_set_dirty(section->mr, addr, TARGET_PAGE_SIZE);
+                memory_region_set_dirty(section->mr, addr,
+                                        TARGET_PAGE_SIZE * hpratio);
             } while (c != 0);
         }
     }