diff mbox series

[7/9] cputlb: Partially merge tlb_dyn_init into tlb_init

Message ID 20200109024907.2730-8-richard.henderson@linaro.org
State New
Headers show
Series cputlb: Various cleanups | expand

Commit Message

Richard Henderson Jan. 9, 2020, 2:49 a.m. UTC
Merge into the only caller, but at the same time split
out tlb_mmu_init to initialize a single tlb entry.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
 1 file changed, 16 insertions(+), 17 deletions(-)

Comments

Philippe Mathieu-Daudé Jan. 20, 2020, 9:01 a.m. UTC | #1
On 1/9/20 3:49 AM, Richard Henderson wrote:
> Merge into the only caller, but at the same time split
> out tlb_mmu_init to initialize a single tlb entry.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>

> ---
>   accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
>   1 file changed, 16 insertions(+), 17 deletions(-)
> 
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index e60e501334..c7c34b185b 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -97,22 +97,6 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
>       desc->window_max_entries = max_entries;
>   }
>   
> -static void tlb_dyn_init(CPUArchState *env)
> -{
> -    int i;
> -
> -    for (i = 0; i < NB_MMU_MODES; i++) {
> -        CPUTLBDesc *desc = &env_tlb(env)->d[i];
> -        size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
> -
> -        tlb_window_reset(desc, get_clock_realtime(), 0);
> -        desc->n_used_entries = 0;
> -        env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
> -        env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
> -        env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
> -    }
> -}
> -
>   /**
>    * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
>    * @desc: The CPUTLBDesc portion of the TLB
> @@ -247,6 +231,17 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
>       tlb_mmu_flush_locked(desc, fast);
>   }
>   
> +static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
> +{
> +    size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
> +
> +    tlb_window_reset(desc, now, 0);
> +    desc->n_used_entries = 0;
> +    fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
> +    fast->table = g_new(CPUTLBEntry, n_entries);
> +    desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
> +}
> +
>   static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
>   {
>       env_tlb(env)->d[mmu_idx].n_used_entries++;
> @@ -260,13 +255,17 @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
>   void tlb_init(CPUState *cpu)
>   {
>       CPUArchState *env = cpu->env_ptr;
> +    int64_t now = get_clock_realtime();
> +    int i;
>   
>       qemu_spin_init(&env_tlb(env)->c.lock);
>   
>       /* Ensure that cpu_reset performs a full flush.  */
>       env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
>   
> -    tlb_dyn_init(env);
> +    for (i = 0; i < NB_MMU_MODES; i++) {
> +        tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
> +    }
>   }
>   
>   /* flush_all_helper: run fn across all cpus
>
Alex Bennée Jan. 20, 2020, 2:33 p.m. UTC | #2
Richard Henderson <richard.henderson@linaro.org> writes:

> Merge into the only caller, but at the same time split
> out tlb_mmu_init to initialize a single tlb entry.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

> ---
>  accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
>  1 file changed, 16 insertions(+), 17 deletions(-)
>
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index e60e501334..c7c34b185b 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -97,22 +97,6 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
>      desc->window_max_entries = max_entries;
>  }
>  
> -static void tlb_dyn_init(CPUArchState *env)
> -{
> -    int i;
> -
> -    for (i = 0; i < NB_MMU_MODES; i++) {
> -        CPUTLBDesc *desc = &env_tlb(env)->d[i];
> -        size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
> -
> -        tlb_window_reset(desc, get_clock_realtime(), 0);
> -        desc->n_used_entries = 0;
> -        env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
> -        env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
> -        env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
> -    }
> -}
> -
>  /**
>   * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
>   * @desc: The CPUTLBDesc portion of the TLB
> @@ -247,6 +231,17 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
>      tlb_mmu_flush_locked(desc, fast);
>  }
>  
> +static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
> +{
> +    size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
> +
> +    tlb_window_reset(desc, now, 0);
> +    desc->n_used_entries = 0;
> +    fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
> +    fast->table = g_new(CPUTLBEntry, n_entries);
> +    desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
> +}
> +
>  static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
>  {
>      env_tlb(env)->d[mmu_idx].n_used_entries++;
> @@ -260,13 +255,17 @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
>  void tlb_init(CPUState *cpu)
>  {
>      CPUArchState *env = cpu->env_ptr;
> +    int64_t now = get_clock_realtime();
> +    int i;
>  
>      qemu_spin_init(&env_tlb(env)->c.lock);
>  
>      /* Ensure that cpu_reset performs a full flush.  */
>      env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
>  
> -    tlb_dyn_init(env);
> +    for (i = 0; i < NB_MMU_MODES; i++) {
> +        tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
> +    }
>  }
>  
>  /* flush_all_helper: run fn across all cpus
Alistair Francis Jan. 20, 2020, 11 p.m. UTC | #3
On Thu, Jan 9, 2020 at 12:52 PM Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Merge into the only caller, but at the same time split
> out tlb_mmu_init to initialize a single tlb entry.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
>  1 file changed, 16 insertions(+), 17 deletions(-)
>
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index e60e501334..c7c34b185b 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -97,22 +97,6 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
>      desc->window_max_entries = max_entries;
>  }
>
> -static void tlb_dyn_init(CPUArchState *env)
> -{
> -    int i;
> -
> -    for (i = 0; i < NB_MMU_MODES; i++) {
> -        CPUTLBDesc *desc = &env_tlb(env)->d[i];
> -        size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
> -
> -        tlb_window_reset(desc, get_clock_realtime(), 0);
> -        desc->n_used_entries = 0;
> -        env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
> -        env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
> -        env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
> -    }
> -}
> -
>  /**
>   * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
>   * @desc: The CPUTLBDesc portion of the TLB
> @@ -247,6 +231,17 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
>      tlb_mmu_flush_locked(desc, fast);
>  }
>
> +static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
> +{
> +    size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
> +
> +    tlb_window_reset(desc, now, 0);
> +    desc->n_used_entries = 0;
> +    fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
> +    fast->table = g_new(CPUTLBEntry, n_entries);
> +    desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
> +}
> +
>  static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
>  {
>      env_tlb(env)->d[mmu_idx].n_used_entries++;
> @@ -260,13 +255,17 @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
>  void tlb_init(CPUState *cpu)
>  {
>      CPUArchState *env = cpu->env_ptr;
> +    int64_t now = get_clock_realtime();
> +    int i;
>
>      qemu_spin_init(&env_tlb(env)->c.lock);
>
>      /* Ensure that cpu_reset performs a full flush.  */
>      env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
>
> -    tlb_dyn_init(env);
> +    for (i = 0; i < NB_MMU_MODES; i++) {
> +        tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
> +    }
>  }
>
>  /* flush_all_helper: run fn across all cpus
> --
> 2.20.1
>
>
diff mbox series

Patch

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index e60e501334..c7c34b185b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -97,22 +97,6 @@  static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
     desc->window_max_entries = max_entries;
 }
 
-static void tlb_dyn_init(CPUArchState *env)
-{
-    int i;
-
-    for (i = 0; i < NB_MMU_MODES; i++) {
-        CPUTLBDesc *desc = &env_tlb(env)->d[i];
-        size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
-
-        tlb_window_reset(desc, get_clock_realtime(), 0);
-        desc->n_used_entries = 0;
-        env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
-        env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
-        env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
-    }
-}
-
 /**
  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
  * @desc: The CPUTLBDesc portion of the TLB
@@ -247,6 +231,17 @@  static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
     tlb_mmu_flush_locked(desc, fast);
 }
 
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
+{
+    size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
+
+    tlb_window_reset(desc, now, 0);
+    desc->n_used_entries = 0;
+    fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
+    fast->table = g_new(CPUTLBEntry, n_entries);
+    desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
+}
+
 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
 {
     env_tlb(env)->d[mmu_idx].n_used_entries++;
@@ -260,13 +255,17 @@  static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
 void tlb_init(CPUState *cpu)
 {
     CPUArchState *env = cpu->env_ptr;
+    int64_t now = get_clock_realtime();
+    int i;
 
     qemu_spin_init(&env_tlb(env)->c.lock);
 
     /* Ensure that cpu_reset performs a full flush.  */
     env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
 
-    tlb_dyn_init(env);
+    for (i = 0; i < NB_MMU_MODES; i++) {
+        tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
+    }
 }
 
 /* flush_all_helper: run fn across all cpus