Patchwork [for-next,4/8] tcg: Add mmu helpers that take a return address argument

login
register
mail settings
Submitter Richard Henderson
Date Aug. 5, 2013, 6:07 p.m.
Message ID <1375726045-20797-5-git-send-email-rth@twiddle.net>
Download mbox | patch
Permalink /patch/264730/
State New
Headers show

Comments

Richard Henderson - Aug. 5, 2013, 6:07 p.m.
Allow the code that tcg generates to be less obtuse, passing in
the return address directly instead of computing it in the helper.

Maintain the old entrance point unchanged as an alternate entry point.

Signed-off-by: Richard Henderson <rth@twiddle.net>
---
 include/exec/softmmu_defs.h     | 46 ++++++++++++++++++++++++++---------------
 include/exec/softmmu_template.h | 42 +++++++++++++++++++++++--------------
 2 files changed, 55 insertions(+), 33 deletions(-)
Aurelien Jarno - Aug. 15, 2013, 3:54 p.m.
On Mon, Aug 05, 2013 at 08:07:21AM -1000, Richard Henderson wrote:
> Allow the code that tcg generates to be less obtuse, passing in
> the return address directly instead of computing it in the helper.
> 
> Maintain the old entrance point unchanged as an alternate entry point.
> 
> Signed-off-by: Richard Henderson <rth@twiddle.net>
> ---
>  include/exec/softmmu_defs.h     | 46 ++++++++++++++++++++++++++---------------
>  include/exec/softmmu_template.h | 42 +++++++++++++++++++++++--------------
>  2 files changed, 55 insertions(+), 33 deletions(-)
> 
> diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h
> index 1f25e33..e55e717 100644
> --- a/include/exec/softmmu_defs.h
> +++ b/include/exec/softmmu_defs.h
> @@ -9,29 +9,41 @@
>  #ifndef SOFTMMU_DEFS_H
>  #define SOFTMMU_DEFS_H
>  
> +uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr,
> +                           int mmu_idx, uintptr_t retaddr);
> +uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr,
> +                            int mmu_idx, uintptr_t retaddr);
> +uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr,
> +                            int mmu_idx, uintptr_t retaddr);
> +uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
> +                            int mmu_idx, uintptr_t retaddr);
> +
> +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
> +                        int mmu_idx, uintptr_t retaddr);
> +void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
> +                        int mmu_idx, uintptr_t retaddr);
> +void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
> +                        int mmu_idx, uintptr_t retaddr);
> +void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
> +                        int mmu_idx, uintptr_t retaddr);
> +
>  uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
> -                    int mmu_idx);
>  uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
> -                    int mmu_idx);
>  uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
> -                    int mmu_idx);
>  uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
> -                    int mmu_idx);
> +
> +void helper_stb_mmu(CPUArchState *env, target_ulong addr,
> +                    uint8_t val, int mmu_idx);
> +void helper_stw_mmu(CPUArchState *env, target_ulong addr,
> +                    uint16_t val, int mmu_idx);
> +void helper_stl_mmu(CPUArchState *env, target_ulong addr,
> +                    uint32_t val, int mmu_idx);
> +void helper_stq_mmu(CPUArchState *env, target_ulong addr,
> +                    uint64_t val, int mmu_idx);
>  
>  uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
> -int mmu_idx);
>  uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
> -                     int mmu_idx);
>  uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
> -                     int mmu_idx);
>  uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
> -void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
> -                     int mmu_idx);
> -#endif
> +
> +#endif /* SOFTMMU_DEFS_H */

Why removing this st*_cmmu versions? There might be a good reason, but
it should be indicated in the patch description.

> diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
> index 8584902..7d8bcb5 100644
> --- a/include/exec/softmmu_template.h
> +++ b/include/exec/softmmu_template.h
> @@ -78,15 +78,18 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
>  }
>  
>  /* handle all cases except unaligned access which span two pages */
> +#ifdef SOFTMMU_CODE_ACCESS
> +static
> +#endif
>  DATA_TYPE
> -glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
> -                                         int mmu_idx)
> +glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
> +                                             target_ulong addr, int mmu_idx,
> +                                             uintptr_t retaddr)
>  {
>      DATA_TYPE res;
>      int index;
>      target_ulong tlb_addr;
>      hwaddr ioaddr;
> -    uintptr_t retaddr;
>  
>      /* test if there is match for unaligned or IO access */
>      /* XXX: could done more in memory macro in a non portable way */
> @@ -98,13 +101,11 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
>              /* IO access */
>              if ((addr & (DATA_SIZE - 1)) != 0)
>                  goto do_unaligned_access;
> -            retaddr = GETPC_EXT();
>              ioaddr = env->iotlb[mmu_idx][index];
>              res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
>          } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
>              /* slow unaligned access (it spans two pages or IO) */
>          do_unaligned_access:
> -            retaddr = GETPC_EXT();
>  #ifdef ALIGNED_ONLY
>              do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
>  #endif
> @@ -115,7 +116,6 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
>              uintptr_t addend;
>  #ifdef ALIGNED_ONLY
>              if ((addr & (DATA_SIZE - 1)) != 0) {
> -                retaddr = GETPC_EXT();
>                  do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
>              }
>  #endif
> @@ -124,8 +124,6 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
>                                                  (addr + addend));
>          }
>      } else {
> -        /* the page is not in the TLB : fill it */
> -        retaddr = GETPC_EXT();
>  #ifdef ALIGNED_ONLY
>          if ((addr & (DATA_SIZE - 1)) != 0)
>              do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
> @@ -136,6 +134,14 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
>      return res;
>  }
>  
> +DATA_TYPE
> +glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
> +                                         int mmu_idx)
> +{
> +    return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
> +                                                        GETPC_EXT());
> +}
> +
>  /* handle all unaligned cases */
>  static DATA_TYPE
>  glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
> @@ -214,13 +220,13 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
>      io_mem_write(mr, physaddr, val, 1 << SHIFT);
>  }
>  
> -void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
> -                                              target_ulong addr, DATA_TYPE val,
> -                                              int mmu_idx)
> +void
> +glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
> +                                             target_ulong addr, DATA_TYPE val,
> +                                             int mmu_idx, uintptr_t retaddr)
>  {
>      hwaddr ioaddr;
>      target_ulong tlb_addr;
> -    uintptr_t retaddr;
>      int index;
>  
>      index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
> @@ -231,12 +237,10 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
>              /* IO access */
>              if ((addr & (DATA_SIZE - 1)) != 0)
>                  goto do_unaligned_access;
> -            retaddr = GETPC_EXT();
>              ioaddr = env->iotlb[mmu_idx][index];
>              glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
>          } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
>          do_unaligned_access:
> -            retaddr = GETPC_EXT();
>  #ifdef ALIGNED_ONLY
>              do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
>  #endif
> @@ -247,7 +251,6 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
>              uintptr_t addend;
>  #ifdef ALIGNED_ONLY
>              if ((addr & (DATA_SIZE - 1)) != 0) {
> -                retaddr = GETPC_EXT();
>                  do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
>              }
>  #endif
> @@ -257,7 +260,6 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
>          }
>      } else {
>          /* the page is not in the TLB : fill it */
> -        retaddr = GETPC_EXT();
>  #ifdef ALIGNED_ONLY
>          if ((addr & (DATA_SIZE - 1)) != 0)
>              do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
> @@ -267,6 +269,14 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
>      }
>  }
>  
> +void
> +glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
> +                                         DATA_TYPE val, int mmu_idx)
> +{
> +    glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
> +                                                 GETPC_EXT());
> +}
> +
>  /* handles all unaligned cases */
>  static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
>                                                     target_ulong addr,

The remaining looks fine.
Richard Henderson - Aug. 15, 2013, 8:45 p.m.
On 08/15/2013 08:54 AM, Aurelien Jarno wrote:
> Why removing this st*_cmmu versions? There might be a good reason, but
> it should be indicated in the patch description.

Though the prototypes existed, the bodies were never generated.  We already
ifdef out the store functions in the template when invoked for reading for code.


r~
Aurelien Jarno - Aug. 16, 2013, 8:35 a.m.
On Thu, Aug 15, 2013 at 01:45:31PM -0700, Richard Henderson wrote:
> On 08/15/2013 08:54 AM, Aurelien Jarno wrote:
> > Why removing this st*_cmmu versions? There might be a good reason, but
> > it should be indicated in the patch description.
> 
> Though the prototypes existed, the bodies were never generated.  We already
> ifdef out the store functions in the template when invoked for reading for code.
> 

Fine then. Could you mention that in the commit message?

Patch

diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h
index 1f25e33..e55e717 100644
--- a/include/exec/softmmu_defs.h
+++ b/include/exec/softmmu_defs.h
@@ -9,29 +9,41 @@ 
 #ifndef SOFTMMU_DEFS_H
 #define SOFTMMU_DEFS_H
 
+uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr,
+                           int mmu_idx, uintptr_t retaddr);
+uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr,
+                            int mmu_idx, uintptr_t retaddr);
+uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr,
+                            int mmu_idx, uintptr_t retaddr);
+uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
+                            int mmu_idx, uintptr_t retaddr);
+
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+                        int mmu_idx, uintptr_t retaddr);
+void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+                        int mmu_idx, uintptr_t retaddr);
+void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+                        int mmu_idx, uintptr_t retaddr);
+void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+                        int mmu_idx, uintptr_t retaddr);
+
 uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
-                    int mmu_idx);
 uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
-                    int mmu_idx);
 uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
-                    int mmu_idx);
 uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
-                    int mmu_idx);
+
+void helper_stb_mmu(CPUArchState *env, target_ulong addr,
+                    uint8_t val, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr,
+                    uint16_t val, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr,
+                    uint32_t val, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr,
+                    uint64_t val, int mmu_idx);
 
 uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
-int mmu_idx);
 uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
-                     int mmu_idx);
 uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
-                     int mmu_idx);
 uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
-                     int mmu_idx);
-#endif
+
+#endif /* SOFTMMU_DEFS_H */
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index 8584902..7d8bcb5 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -78,15 +78,18 @@  static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
 }
 
 /* handle all cases except unaligned access which span two pages */
+#ifdef SOFTMMU_CODE_ACCESS
+static
+#endif
 DATA_TYPE
-glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
-                                         int mmu_idx)
+glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                             target_ulong addr, int mmu_idx,
+                                             uintptr_t retaddr)
 {
     DATA_TYPE res;
     int index;
     target_ulong tlb_addr;
     hwaddr ioaddr;
-    uintptr_t retaddr;
 
     /* test if there is match for unaligned or IO access */
     /* XXX: could done more in memory macro in a non portable way */
@@ -98,13 +101,11 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
             /* IO access */
             if ((addr & (DATA_SIZE - 1)) != 0)
                 goto do_unaligned_access;
-            retaddr = GETPC_EXT();
             ioaddr = env->iotlb[mmu_idx][index];
             res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
             /* slow unaligned access (it spans two pages or IO) */
         do_unaligned_access:
-            retaddr = GETPC_EXT();
 #ifdef ALIGNED_ONLY
             do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
 #endif
@@ -115,7 +116,6 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
             uintptr_t addend;
 #ifdef ALIGNED_ONLY
             if ((addr & (DATA_SIZE - 1)) != 0) {
-                retaddr = GETPC_EXT();
                 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
             }
 #endif
@@ -124,8 +124,6 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
                                                 (addr + addend));
         }
     } else {
-        /* the page is not in the TLB : fill it */
-        retaddr = GETPC_EXT();
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0)
             do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
@@ -136,6 +134,14 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
     return res;
 }
 
+DATA_TYPE
+glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+                                         int mmu_idx)
+{
+    return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
+                                                        GETPC_EXT());
+}
+
 /* handle all unaligned cases */
 static DATA_TYPE
 glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
@@ -214,13 +220,13 @@  static inline void glue(io_write, SUFFIX)(CPUArchState *env,
     io_mem_write(mr, physaddr, val, 1 << SHIFT);
 }
 
-void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
-                                              target_ulong addr, DATA_TYPE val,
-                                              int mmu_idx)
+void
+glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                             target_ulong addr, DATA_TYPE val,
+                                             int mmu_idx, uintptr_t retaddr)
 {
     hwaddr ioaddr;
     target_ulong tlb_addr;
-    uintptr_t retaddr;
     int index;
 
     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@@ -231,12 +237,10 @@  void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
             /* IO access */
             if ((addr & (DATA_SIZE - 1)) != 0)
                 goto do_unaligned_access;
-            retaddr = GETPC_EXT();
             ioaddr = env->iotlb[mmu_idx][index];
             glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
         do_unaligned_access:
-            retaddr = GETPC_EXT();
 #ifdef ALIGNED_ONLY
             do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
 #endif
@@ -247,7 +251,6 @@  void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
             uintptr_t addend;
 #ifdef ALIGNED_ONLY
             if ((addr & (DATA_SIZE - 1)) != 0) {
-                retaddr = GETPC_EXT();
                 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
             }
 #endif
@@ -257,7 +260,6 @@  void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
         }
     } else {
         /* the page is not in the TLB : fill it */
-        retaddr = GETPC_EXT();
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0)
             do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
@@ -267,6 +269,14 @@  void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
     }
 }
 
+void
+glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+                                         DATA_TYPE val, int mmu_idx)
+{
+    glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
+                                                 GETPC_EXT());
+}
+
 /* handles all unaligned cases */
 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
                                                    target_ulong addr,