diff mbox

[1/6] cputlb: Add functions for flushing TLB for a single MMU index

Message ID 1438950810-28618-2-git-send-email-peter.maydell@linaro.org
State New
Headers show

Commit Message

Peter Maydell Aug. 7, 2015, 12:33 p.m. UTC
Guest CPU TLB maintenance operations may be sufficiently
specialized to only need to flush TLB entries corresponding
to a particular MMU index. Implement cputlb functions for
this, to avoid the inefficiency of flushing TLB entries
which we don't need to.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 cputlb.c                | 81 +++++++++++++++++++++++++++++++++++++++++++++++++
 include/exec/exec-all.h | 47 ++++++++++++++++++++++++++++
 2 files changed, 128 insertions(+)

Comments

Peter Maydell Aug. 14, 2015, 10:24 a.m. UTC | #1
On 7 August 2015 at 13:33, Peter Maydell <peter.maydell@linaro.org> wrote:
> Guest CPU TLB maintenance operations may be sufficiently
> specialized to only need to flush TLB entries corresponding
> to a particular MMU index. Implement cputlb functions for
> this, to avoid the inefficiency of flushing TLB entries
> which we don't need to.
>
> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
> ---
>  cputlb.c                | 81 +++++++++++++++++++++++++++++++++++++++++++++++++
>  include/exec/exec-all.h | 47 ++++++++++++++++++++++++++++
>  2 files changed, 128 insertions(+)
>
> diff --git a/cputlb.c b/cputlb.c
> index a506086..a1996ba 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -69,6 +69,39 @@ void tlb_flush(CPUState *cpu, int flush_global)
>      tlb_flush_count++;
>  }
>
> +static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
> +{
> +    CPUArchState *env = cpu->env_ptr;
> +
> +#if defined(DEBUG_TLB)
> +    printf("tlb_flush_by_mmuidx %d:\n", mmu_idx);
> +#endif

This debug tracing doesn't compile if enabled -- it was written
to go with my initial implementation which took a single
mmu_idx rather than varargs, and I forgot to update it. I'll
send out a v2 shortly.

thanks
-- PMM
diff mbox

Patch

diff --git a/cputlb.c b/cputlb.c
index a506086..a1996ba 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -69,6 +69,39 @@  void tlb_flush(CPUState *cpu, int flush_global)
     tlb_flush_count++;
 }
 
+static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
+{
+    CPUArchState *env = cpu->env_ptr;
+
+#if defined(DEBUG_TLB)
+    printf("tlb_flush_by_mmuidx %d:\n", mmu_idx);
+#endif
+    /* must reset current TB so that interrupts cannot modify the
+       links while we are modifying them */
+    cpu->current_tb = NULL;
+
+    for (;;) {
+        int mmu_idx = va_arg(argp, int);
+
+        if (mmu_idx < 0) {
+            break;
+        }
+
+        memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
+        memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
+    }
+
+    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+}
+
+void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+    va_list argp;
+    va_start(argp, cpu);
+    v_tlb_flush_by_mmuidx(cpu, argp);
+    va_end(argp);
+}
+
 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
 {
     if (addr == (tlb_entry->addr_read &
@@ -121,6 +154,54 @@  void tlb_flush_page(CPUState *cpu, target_ulong addr)
     tb_flush_jmp_cache(cpu, addr);
 }
 
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
+{
+    CPUArchState *env = cpu->env_ptr;
+    int i, k;
+    va_list argp;
+
+    va_start(argp, addr);
+
+#if defined(DEBUG_TLB)
+    printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx ", %d\n", addr, mmu_idx);
+#endif
+    /* Check if we need to flush due to large pages.  */
+    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+#if defined(DEBUG_TLB)
+        printf("tlb_flush_page_by_mmu_idx: forced full flush ("
+               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+               env->tlb_flush_addr, env->tlb_flush_mask);
+#endif
+        v_tlb_flush_by_mmuidx(cpu, argp);
+        va_end(argp);
+        return;
+    }
+    /* must reset current TB so that interrupts cannot modify the
+       links while we are modifying them */
+    cpu->current_tb = NULL;
+
+    addr &= TARGET_PAGE_MASK;
+    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+
+    for (;;) {
+        int mmu_idx = va_arg(argp, int);
+
+        if (mmu_idx < 0) {
+            break;
+        }
+
+        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
+
+        /* check whether there are vltb entries that need to be flushed */
+        for (k = 0; k < CPU_VTLB_SIZE; k++) {
+            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
+        }
+    }
+    va_end(argp);
+
+    tb_flush_jmp_cache(cpu, addr);
+}
+
 /* update the TLBs so that writes to code in the virtual page 'addr'
    can be detected */
 void tlb_protect_code(ram_addr_t ram_addr)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index a6fce04..4933683 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -96,8 +96,46 @@  bool qemu_in_vcpu_thread(void);
 void cpu_reload_memory_map(CPUState *cpu);
 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
 /* cputlb.c */
+/**
+ * tlb_flush_page:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
 void tlb_flush_page(CPUState *cpu, target_ulong addr);
+/**
+ * tlb_flush:
+ * @cpu: CPU whose TLB should be flushed
+ * @flush_global: ignored
+ *
+ * Flush the entire TLB for the specified CPU.
+ * The flush_global flag is in theory an indicator of whether the whole
+ * TLB should be flushed, or only those entries not marked global.
+ * In practice QEMU does not implement any global/not global flag for
+ * TLB entries, and the argument is ignored.
+ */
 void tlb_flush(CPUState *cpu, int flush_global);
+/**
+ * tlb_flush_page_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush one page from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
+/**
+ * tlb_flush_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush all entries from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx(CPUState *cpu, ...);
 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
                   hwaddr paddr, int prot,
                   int mmu_idx, target_ulong size);
@@ -115,6 +153,15 @@  static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 static inline void tlb_flush(CPUState *cpu, int flush_global)
 {
 }
+
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
+                                            target_ulong addr, ...)
+{
+}
+
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+}
 #endif
 
 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */