Patchwork [09/13] Only TCG needs TLB handling

login
register
mail settings
Submitter Juan Quintela
Date June 29, 2012, 4:44 p.m.
Message ID <9313b688dde620cb9b2f8a52792e00ad9e22f9be.1340987905.git.quintela@redhat.com>
Download mbox | patch
Permalink /patch/168228/
State New
Headers show

Comments

Juan Quintela - June 29, 2012, 4:44 p.m.
Refactor the code that is only needed for tcg to an static function.
Call that only when tcg is enabled.  We can't refactor to a dummy
function in the kvm case, as qemu can be compiled at the same time
with tcg and kvm.

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 exec.c |   31 +++++++++++++++++++++----------
 1 file changed, 21 insertions(+), 10 deletions(-)
Blue Swirl - June 29, 2012, 8:29 p.m.
On Fri, Jun 29, 2012 at 4:44 PM, Juan Quintela <quintela@redhat.com> wrote:
> Refactor the code that is only needed for tcg to an static function.
> Call that only when tcg is enabled.  We can't refactor to a dummy
> function in the kvm case, as qemu can be compiled at the same time
> with tcg and kvm.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  exec.c |   31 +++++++++++++++++++++----------
>  1 file changed, 21 insertions(+), 10 deletions(-)
>
> diff --git a/exec.c b/exec.c
> index 8244d54..a68b65c 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -1824,11 +1824,29 @@ void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
>             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
>  }
>
> +static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
> +                                      uintptr_t length)
> +{
> +    uintptr_t start1;
> +
> +    /* we modify the TLB cache so that the dirty bit will be set again
> +       when accessing the range */
> +    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
> +    /* Check that we don't span multiple blocks - this breaks the
> +       address comparisons below.  */
> +    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
> +            != (end - 1) - start) {
> +        abort();
> +    }
> +    cpu_tlb_reset_dirty_all(start1, length);
> +
> +}

Please move this to cputlb.c. Maybe it can be merged with
cpu_tlb_reset_dirty_all() if there are no other callers.

> +
>  /* Note: start and end must be within the same ram block.  */
>  void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
>                                      int dirty_flags)
>  {
> -    uintptr_t length, start1;
> +    uintptr_t length;
>
>     start &= TARGET_PAGE_MASK;
>     end = TARGET_PAGE_ALIGN(end);
> @@ -1838,16 +1856,9 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
>         return;
>     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
>
> -    /* we modify the TLB cache so that the dirty bit will be set again
> -       when accessing the range */
> -    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
> -    /* Check that we don't span multiple blocks - this breaks the
> -       address comparisons below.  */
> -    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
> -            != (end - 1) - start) {
> -        abort();
> +    if (tcg_enabled()) {
> +        tlb_reset_dirty_range_all(start, end, length);
>     }
> -    cpu_tlb_reset_dirty_all(start1, length);
>  }
>
>  int cpu_physical_memory_set_dirty_tracking(int enable)
> --
> 1.7.10.4
>
>

Patch

diff --git a/exec.c b/exec.c
index 8244d54..a68b65c 100644
--- a/exec.c
+++ b/exec.c
@@ -1824,11 +1824,29 @@  void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
 }

+static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
+                                      uintptr_t length)
+{
+    uintptr_t start1;
+
+    /* we modify the TLB cache so that the dirty bit will be set again
+       when accessing the range */
+    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
+    /* Check that we don't span multiple blocks - this breaks the
+       address comparisons below.  */
+    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
+            != (end - 1) - start) {
+        abort();
+    }
+    cpu_tlb_reset_dirty_all(start1, length);
+
+}
+
 /* Note: start and end must be within the same ram block.  */
 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
                                      int dirty_flags)
 {
-    uintptr_t length, start1;
+    uintptr_t length;

     start &= TARGET_PAGE_MASK;
     end = TARGET_PAGE_ALIGN(end);
@@ -1838,16 +1856,9 @@  void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
         return;
     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);

-    /* we modify the TLB cache so that the dirty bit will be set again
-       when accessing the range */
-    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
-    /* Check that we don't span multiple blocks - this breaks the
-       address comparisons below.  */
-    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
-            != (end - 1) - start) {
-        abort();
+    if (tcg_enabled()) {
+        tlb_reset_dirty_range_all(start, end, length);
     }
-    cpu_tlb_reset_dirty_all(start1, length);
 }

 int cpu_physical_memory_set_dirty_tracking(int enable)