diff mbox

[RFC,v3,15/49] softmmu: fixing usage of cpu_st/ld* from helpers

Message ID 20140731125452.1600.364.stgit@PASHA-ISP.novsu.ac.ru
State New
Headers show

Commit Message

Pavel Dovgalyuk July 31, 2014, 12:54 p.m. UTC
MMU helper functions are called from generated code and other helper
functions. In both cases they try to get function's return address for
using it while restoring virtual CPU state.

When MMU helper is called from some other helper function
(like helper_maskmov_xmm) through cpu_st* function, the return address
will point to that helper. That is why CPU state cannot be restored in
the case of MMU fault.

This patch introduces several inline helpers to load return address
which points to the right place.

Signed-off-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>
---
 include/exec/cpu_ldst_template.h |   31 +++++++++++++++++++++++++++----
 include/exec/exec-all.h          |   27 +++++++++++++++++++++++++++
 softmmu_template.h               |   18 ++++++++++++++++++
 3 files changed, 72 insertions(+), 4 deletions(-)

Comments

Alex Bennée July 31, 2014, 4:07 p.m. UTC | #1
Pavel Dovgalyuk writes:

> MMU helper functions are called from generated code and other helper
> functions. In both cases they try to get function's return address for
> using it while restoring virtual CPU state.
>
> When MMU helper is called from some other helper function
> (like helper_maskmov_xmm) through cpu_st* function, the return address
> will point to that helper. That is why CPU state cannot be restored in
> the case of MMU fault.
>
> This patch introduces several inline helpers to load return address
> which points to the right place.
>
<snip>

OK I find it fairly hard to follow all the glue magic (not your fault
;-) we have in QEMU. However wouldn't it be simpler for the helper
pre-amble code to ensure the subject pc is updated in the CPU
environment?

Can QEMU only rectify the processor state from a TranlationBlock tc address?
Pavel Dovgalyuk Aug. 26, 2014, 8 a.m. UTC | #2
> From: Alex Bennée [mailto:alex.bennee@linaro.org]
> Pavel Dovgalyuk writes:
> 
> > MMU helper functions are called from generated code and other helper
> > functions. In both cases they try to get function's return address for
> > using it while restoring virtual CPU state.
> >
> > When MMU helper is called from some other helper function
> > (like helper_maskmov_xmm) through cpu_st* function, the return address
> > will point to that helper. That is why CPU state cannot be restored in
> > the case of MMU fault.
> >
> > This patch introduces several inline helpers to load return address
> > which points to the right place.
> >
> <snip>
> 
> OK I find it fairly hard to follow all the glue magic (not your fault
> ;-) we have in QEMU. However wouldn't it be simpler for the helper
> pre-amble code to ensure the subject pc is updated in the CPU
> environment?

Then I'll need to rewrite all helper calls or change their structure
by adding code which restores the PC.

> Can QEMU only rectify the processor state from a TranlationBlock tc address?

Current guest PC is not known during execution of the TB. When memory access
exception occurs, helpers have to evaluate guest PC using the host one.
Host PC should point to the translated block and this patch eliminates reading
wrong host PC value during such recovery.

Pavel Dovgalyuk
diff mbox

Patch

diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
index 006093a..2658955 100644
--- a/include/exec/cpu_ldst_template.h
+++ b/include/exec/cpu_ldst_template.h
@@ -61,6 +61,17 @@ 
 #define MMUSUFFIX _mmu
 #endif
 
+/* inline helper ld function */
+
+static inline DATA_TYPE
+glue(glue(helper_inline_ld, SUFFIX), MEMSUFFIX)(CPUArchState *env,
+                                                target_ulong addr,
+                                                int mmu_idx)
+{
+    return glue(glue(helper_call_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
+                                                         GETRA());
+}
+
 /* generic load/store macros */
 
 static inline RES_TYPE
@@ -76,7 +87,8 @@  glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
-        res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+        res = glue(glue(helper_inline_ld, SUFFIX), MEMSUFFIX)(env, addr,
+                                                              mmu_idx);
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
@@ -97,8 +109,8 @@  glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
-        res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
-                               MMUSUFFIX)(env, addr, mmu_idx);
+        res = (DATA_STYPE)glue(glue(helper_inline_ld, SUFFIX),
+                               MEMSUFFIX)(env, addr, mmu_idx);
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
@@ -109,6 +121,17 @@  glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
 
 #ifndef SOFTMMU_CODE_ACCESS
 
+/* inline helper st function */
+
+static inline void
+glue(glue(helper_inline_st, SUFFIX), MEMSUFFIX)(CPUArchState *env,
+                                                target_ulong addr,
+                                                DATA_TYPE val, int mmu_idx)
+{
+    glue(glue(helper_call_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
+                                                  GETRA());
+}
+
 /* generic store macro */
 
 static inline void
@@ -124,7 +147,7 @@  glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
     mmu_idx = CPU_MMU_INDEX;
     if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
                  (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
-        glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+        glue(glue(helper_inline_st, SUFFIX), MEMSUFFIX)(env, addr, v, mmu_idx);
     } else {
         uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
         glue(glue(st, SUFFIX), _raw)(hostaddr, v);
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 5e5d86e..528928f 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -344,6 +344,33 @@  bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
 void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
               uintptr_t retaddr);
 
+uint8_t helper_call_ldb_cmmu(CPUArchState *env, target_ulong addr,
+                             int mmu_idx, uintptr_t retaddr);
+uint16_t helper_call_ldw_cmmu(CPUArchState *env, target_ulong addr,
+                              int mmu_idx, uintptr_t retaddr);
+uint32_t helper_call_ldl_cmmu(CPUArchState *env, target_ulong addr,
+                              int mmu_idx, uintptr_t retaddr);
+uint64_t helper_call_ldq_cmmu(CPUArchState *env, target_ulong addr,
+                              int mmu_idx, uintptr_t retaddr);
+
+uint8_t helper_call_ldb_mmu(CPUArchState *env, target_ulong addr,
+                            int mmu_idx, uintptr_t retaddr);
+uint16_t helper_call_ldw_mmu(CPUArchState *env, target_ulong addr,
+                             int mmu_idx, uintptr_t retaddr);
+uint32_t helper_call_ldl_mmu(CPUArchState *env, target_ulong addr,
+                             int mmu_idx, uintptr_t retaddr);
+uint64_t helper_call_ldq_mmu(CPUArchState *env, target_ulong addr,
+                             int mmu_idx, uintptr_t retaddr);
+
+void helper_call_stb_mmu(CPUArchState *env, target_ulong addr,
+                         uint8_t val, int mmu_idx, uintptr_t retaddr);
+void helper_call_stw_mmu(CPUArchState *env, target_ulong addr,
+                         uint16_t val, int mmu_idx, uintptr_t retaddr);
+void helper_call_stl_mmu(CPUArchState *env, target_ulong addr,
+                         uint32_t val, int mmu_idx, uintptr_t retaddr);
+void helper_call_stq_mmu(CPUArchState *env, target_ulong addr,
+                         uint64_t val, int mmu_idx, uintptr_t retaddr);
+
 #endif
 
 #if defined(CONFIG_USER_ONLY)
diff --git a/softmmu_template.h b/softmmu_template.h
index 5a07f99..1053cf3 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -311,6 +311,15 @@  glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
     return helper_te_ld_name (env, addr, mmu_idx, GETRA());
 }
 
+DATA_TYPE
+glue(glue(helper_call_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                              target_ulong addr,
+                                              int mmu_idx,
+                                              uintptr_t retaddr)
+{
+    return helper_te_ld_name(env, addr, mmu_idx, retaddr);
+}
+
 #ifndef SOFTMMU_CODE_ACCESS
 
 /* Provide signed versions of the load routines as well.  We can of course
@@ -505,6 +514,15 @@  glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
     helper_te_st_name(env, addr, val, mmu_idx, GETRA());
 }
 
+void
+glue(glue(helper_call_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+                                              target_ulong addr,
+                                              DATA_TYPE val, int mmu_idx,
+                                              uintptr_t retaddr)
+{
+    helper_te_st_name(env, addr, val, mmu_idx, retaddr);
+}
+
 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
 
 #undef READ_ACCESS_TYPE