diff mbox

[v5,2/3] tcg: Add declarations and templates of extended MMU helpers

Message ID 1349786252-12343-3-git-send-email-yeongkyoon.lee@samsung.com
State New
Headers show

Commit Message

YeongKyoon Lee Oct. 9, 2012, 12:37 p.m. UTC
Add declarations and templates of extended MMU helpers.
An extended helper takes an additional argument of the host address accessing
a guest memory which differs from the address of the call site to the helper
because helper call sites locate at the end of a generated code block.

Signed-off-by: Yeongkyoon Lee <yeongkyoon.lee@samsung.com>
---
 softmmu_defs.h     |   39 +++++++++++++++++++++++++++++++++++++++
 softmmu_header.h   |   15 +++++++++++++++
 softmmu_template.h |   41 +++++++++++++++++++++++++++++++++--------
 3 files changed, 87 insertions(+), 8 deletions(-)

--
1.7.5.4

Comments

Richard Henderson Oct. 9, 2012, 6:36 p.m. UTC | #1
On 10/09/2012 05:37 AM, Yeongkyoon Lee wrote:
> Add declarations and templates of extended MMU helpers.
> An extended helper takes an additional argument of the host address accessing
> a guest memory which differs from the address of the call site to the helper
> because helper call sites locate at the end of a generated code block.
...
> +#ifndef CONFIG_QEMU_LDST_OPTIMIZATION


My feedback from the last round of review is that a version of the
helper functions that take the return address should *always* be available.

There are existing issues in the target-*/foo_helper.c files where
if a helper touches memory that we do no necessarily handle any
fault properly.  This is less true of system mode than user mode,
but it's still a problem.

The helper.c files ought to be changed to use these new "ra-enabled"
routines and pass GETPC().  That way a fault from a helper gets
treated *exactly* like it would if it were called from TCG generated code.

Thus, all this conditionalization should vanish.


r~
YeongKyoon Lee Oct. 10, 2012, 11:04 a.m. UTC | #2
On 2012년 10월 10일 03:36, Richard Henderson wrote:
> On 10/09/2012 05:37 AM, Yeongkyoon Lee wrote:
>> Add declarations and templates of extended MMU helpers.
>> An extended helper takes an additional argument of the host address accessing
>> a guest memory which differs from the address of the call site to the helper
>> because helper call sites locate at the end of a generated code block.
> ...
>> +#ifndef CONFIG_QEMU_LDST_OPTIMIZATION
>
> My feedback from the last round of review is that a version of the
> helper functions that take the return address should *always* be available.
>
> There are existing issues in the target-*/foo_helper.c files where
> if a helper touches memory that we do no necessarily handle any
> fault properly.  This is less true of system mode than user mode,
> but it's still a problem.
>
> The helper.c files ought to be changed to use these new "ra-enabled"
> routines and pass GETPC().  That way a fault from a helper gets
> treated *exactly* like it would if it were called from TCG generated code.
>
> Thus, all this conditionalization should vanish.

Do you mean that there are call sites in target-*/foo_helper.c which 
call the helpers of softmmu_def.h?
As far as I know, there is no access to those helpers other than from 
the functions in softmmu_header.h in which extra argument is handled.

Anyway, I'll try an approach to avoid helper fragmentation, which takes 
slight performance degradation of just one instruction for each fast path.

>
>
> r~
>
diff mbox

Patch

diff --git a/softmmu_defs.h b/softmmu_defs.h
index 1f25e33..a93adf0 100644
--- a/softmmu_defs.h
+++ b/softmmu_defs.h
@@ -9,6 +9,7 @@ 
 #ifndef SOFTMMU_DEFS_H
 #define SOFTMMU_DEFS_H

+#ifndef CONFIG_QEMU_LDST_OPTIMIZATION
 uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
 void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
                     int mmu_idx);
@@ -34,4 +35,42 @@  void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
 uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
 void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
                      int mmu_idx);
+#else
+/* Extended versions of MMU helpers for qemu_ld/st optimization.
+   The additional argument is a host code address accessing guest memory */
+uint8_t ext_helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                           uintptr_t ra);
+void ext_helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+                        int mmu_idx, uintptr_t ra);
+uint16_t ext_helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+                        int mmu_idx, uintptr_t ra);
+uint32_t ext_helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+                        int mmu_idx, uintptr_t ra);
+uint64_t ext_helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+                        int mmu_idx, uintptr_t ra);
+
+uint8_t ext_helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                            uintptr_t ra);
+void ext_helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
+                         int mmu_idx, uintptr_t ra);
+uint16_t ext_helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                             uintptr_t ra);
+void ext_helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
+                         int mmu_idx, uintptr_t ra);
+uint32_t ext_helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                             uintptr_t ra);
+void ext_helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
+                         int mmu_idx, uintptr_t ra);
+uint64_t ext_helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx,
+                             uintptr_t ra);
+void ext_helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
+                         int mmu_idx, uintptr_t ra);
+#endif  /* CONFIG_QEMU_LDST_OPTIMIZATION */
+
 #endif
diff --git a/softmmu_header.h b/softmmu_header.h
index d8d9c81..d18c8f8 100644
--- a/softmmu_header.h
+++ b/softmmu_header.h
@@ -93,7 +93,12 @@  glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+        res = glue(glue(ext_helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
+                                                           (uintptr_t)NULL);
+#else
         res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+#endif
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
@@ -114,8 +119,13 @@  glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+        res = (DATA_STYPE)glue(glue(ext_helper_ld, SUFFIX),
+                               MMUSUFFIX)(env, addr, mmu_idx, (uintptr_t)NULL);
+#else
         res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
                                MMUSUFFIX)(env, addr, mmu_idx);
+#endif
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
@@ -141,7 +151,12 @@  glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+        glue(glue(ext_helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx,
+                                                     (uintptr_t)NULL);
+#else
         glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+#endif
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         glue(glue(st, SUFFIX), _raw)(hostaddr, v);
diff --git a/softmmu_template.h b/softmmu_template.h
index e2490f0..e40c060 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -54,6 +54,14 @@ 
 #define ADDR_READ addr_read
 #endif

+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+/* An extended MMU helper takes one more argument which is
+   a host address of generated code accessing guest memory */
+#define GET_RET_ADDR() ra
+#else
+#define GET_RET_ADDR() GETPC()
+#endif  /* CONFIG_QEMU_LDST_OPTIMIZATION */
+
 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
                                                         target_ulong addr,
                                                         int mmu_idx,
@@ -91,9 +99,17 @@  static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
 }

 /* handle all cases except unaligned access which span two pages */
+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+DATA_TYPE
+glue(glue(ext_helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                             target_ulong addr,
+                                             int mmu_idx,
+                                             uintptr_t ra)
+#else
 DATA_TYPE
 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
                                          int mmu_idx)
+#endif
 {
     DATA_TYPE res;
     int index;
@@ -111,13 +127,13 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
             /* IO access */
             if ((addr & (DATA_SIZE - 1)) != 0)
                 goto do_unaligned_access;
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
             ioaddr = env->iotlb[mmu_idx][index];
             res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
             /* slow unaligned access (it spans two pages or IO) */
         do_unaligned_access:
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
             do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
 #endif
@@ -128,7 +144,7 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
             uintptr_t addend;
 #ifdef ALIGNED_ONLY
             if ((addr & (DATA_SIZE - 1)) != 0) {
-                retaddr = GETPC();
+                retaddr = GET_RET_ADDR();
                 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
             }
 #endif
@@ -138,7 +154,7 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
         }
     } else {
         /* the page is not in the TLB : fill it */
-        retaddr = GETPC();
+        retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0)
             do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
@@ -240,9 +256,17 @@  static inline void glue(io_write, SUFFIX)(CPUArchState *env,
 #endif /* SHIFT > 2 */
 }

+#ifdef CONFIG_QEMU_LDST_OPTIMIZATION
+void glue(glue(ext_helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                                  target_ulong addr,
+                                                  DATA_TYPE val,
+                                                  int mmu_idx,
+                                                  uintptr_t ra)
+#else
 void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
                                               target_ulong addr, DATA_TYPE val,
                                               int mmu_idx)
+#endif
 {
     target_phys_addr_t ioaddr;
     target_ulong tlb_addr;
@@ -257,12 +281,12 @@  void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
             /* IO access */
             if ((addr & (DATA_SIZE - 1)) != 0)
                 goto do_unaligned_access;
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
             ioaddr = env->iotlb[mmu_idx][index];
             glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
         do_unaligned_access:
-            retaddr = GETPC();
+            retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
             do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
 #endif
@@ -273,7 +297,7 @@  void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
             uintptr_t addend;
 #ifdef ALIGNED_ONLY
             if ((addr & (DATA_SIZE - 1)) != 0) {
-                retaddr = GETPC();
+                retaddr = GET_RET_ADDR();
                 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
             }
 #endif
@@ -283,7 +307,7 @@  void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
         }
     } else {
         /* the page is not in the TLB : fill it */
-        retaddr = GETPC();
+        retaddr = GET_RET_ADDR();
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0)
             do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
@@ -352,3 +376,4 @@  static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
 #undef USUFFIX
 #undef DATA_SIZE
 #undef ADDR_READ
+#undef GET_RET_ADDR