diff mbox series

[05/10] cputlb: Move env->vtlb_index to env->tlb_d.vindex

Message ID 20181023070253.6407-7-richard.henderson@linaro.org
State New
Headers show
Series cputlb: track dirty tlbs and general cleanup | expand

Commit Message

Richard Henderson Oct. 23, 2018, 7:02 a.m. UTC
The rest of the tlb victim cache is per-tlb,
the next use index should be as well.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cpu-defs.h | 5 +++--
 accel/tcg/cputlb.c      | 5 ++---
 2 files changed, 5 insertions(+), 5 deletions(-)

Comments

Philippe Mathieu-Daudé Oct. 23, 2018, 11:07 a.m. UTC | #1
On 23/10/18 9:02, Richard Henderson wrote:
> The rest of the tlb victim cache is per-tlb,
> the next use index should be as well.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>

> ---
>   include/exec/cpu-defs.h | 5 +++--
>   accel/tcg/cputlb.c      | 5 ++---
>   2 files changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
> index df8ae18d9d..181c0dbfa4 100644
> --- a/include/exec/cpu-defs.h
> +++ b/include/exec/cpu-defs.h
> @@ -150,6 +150,8 @@ typedef struct CPUTLBDesc {
>        */
>       target_ulong large_page_addr;
>       target_ulong large_page_mask;
> +    /* The next index to use in the tlb victim table.  */
> +    size_t vindex;
>   } CPUTLBDesc;
>   
>   /*
> @@ -178,8 +180,7 @@ typedef struct CPUTLBCommon {
>       CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE];               \
>       CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE];                    \
>       CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];                 \
> -    size_t tlb_flush_count;                                             \
> -    target_ulong vtlb_index;                                            \
> +    size_t tlb_flush_count;
>   
>   #else
>   
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 72b0567f70..d3b37ffa85 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -119,6 +119,7 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
>       memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
>       env->tlb_d[mmu_idx].large_page_addr = -1;
>       env->tlb_d[mmu_idx].large_page_mask = -1;
> +    env->tlb_d[mmu_idx].vindex = 0;
>   }
>   
>   /* This is OK because CPU architectures generally permit an
> @@ -149,8 +150,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
>       qemu_spin_unlock(&env->tlb_c.lock);
>   
>       cpu_tb_jmp_cache_clear(cpu);
> -
> -    env->vtlb_index = 0;
>   }
>   
>   static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
> @@ -668,7 +667,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>        * different page; otherwise just overwrite the stale data.
>        */
>       if (!tlb_hit_page_anyprot(te, vaddr_page)) {
> -        unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
> +        unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
>           CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
>   
>           /* Evict the old entry into the victim tlb.  */
>
diff mbox series

Patch

diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index df8ae18d9d..181c0dbfa4 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -150,6 +150,8 @@  typedef struct CPUTLBDesc {
      */
     target_ulong large_page_addr;
     target_ulong large_page_mask;
+    /* The next index to use in the tlb victim table.  */
+    size_t vindex;
 } CPUTLBDesc;
 
 /*
@@ -178,8 +180,7 @@  typedef struct CPUTLBCommon {
     CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE];               \
     CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE];                    \
     CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];                 \
-    size_t tlb_flush_count;                                             \
-    target_ulong vtlb_index;                                            \
+    size_t tlb_flush_count;
 
 #else
 
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 72b0567f70..d3b37ffa85 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -119,6 +119,7 @@  static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
     memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
     env->tlb_d[mmu_idx].large_page_addr = -1;
     env->tlb_d[mmu_idx].large_page_mask = -1;
+    env->tlb_d[mmu_idx].vindex = 0;
 }
 
 /* This is OK because CPU architectures generally permit an
@@ -149,8 +150,6 @@  static void tlb_flush_nocheck(CPUState *cpu)
     qemu_spin_unlock(&env->tlb_c.lock);
 
     cpu_tb_jmp_cache_clear(cpu);
-
-    env->vtlb_index = 0;
 }
 
 static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
@@ -668,7 +667,7 @@  void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
      * different page; otherwise just overwrite the stale data.
      */
     if (!tlb_hit_page_anyprot(te, vaddr_page)) {
-        unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
+        unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
 
         /* Evict the old entry into the victim tlb.  */