diff mbox

tcg: fix segfault when MO_UNALN is set

Message ID 1432644395-45207-1-git-send-email-yongbok.kim@imgtec.com
State New
Headers show

Commit Message

Yongbok Kim May 26, 2015, 12:46 p.m. UTC
MO_UNALN caused segfaults when it is set, it reached out of boundary of
load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
or its equivalents.
This patch introduces a new macro get_memalign to be used to get alignment
information from combined op/idx parameter. The get_memop is now returning
without alignment information to avoid the segfaults.

Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com>
---
 softmmu_template.h |   24 ++++++++++++------------
 tcg/tcg.h          |   14 +++++++++++++-
 2 files changed, 25 insertions(+), 13 deletions(-)

Comments

Richard Henderson May 26, 2015, 3:49 p.m. UTC | #1
On 05/26/2015 05:46 AM, Yongbok Kim wrote:
> MO_UNALN caused segfaults when it is set, it reached out of boundary of
> load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
> or its equivalents.

I'd like to know more about this crash please.  Where does it happen?


r~
Yongbok Kim May 26, 2015, 3:57 p.m. UTC | #2
On 26/05/2015 16:49, Richard Henderson wrote:
> On 05/26/2015 05:46 AM, Yongbok Kim wrote:
>> MO_UNALN caused segfaults when it is set, it reached out of boundary of
>> load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
>> or its equivalents.
> 
> I'd like to know more about this crash please.  Where does it happen?
> 
> 
> r~
> 

tcg/i386/tcg-target.c

> static void * const qemu_st_helpers[16] = {
>     [MO_UB]   = helper_ret_stb_mmu,
>     [MO_LEUW] = helper_le_stw_mmu,
>     [MO_LEUL] = helper_le_stl_mmu,
>     [MO_LEQ]  = helper_le_stq_mmu,
>     [MO_BEUW] = helper_be_stw_mmu,
>     [MO_BEUL] = helper_be_stl_mmu,
>     [MO_BEQ]  = helper_be_stq_mmu,
> };

...

> static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
> {

>     TCGMemOp opc = get_memop(oi);

>     /* "Tail call" to the helper, with the return address back inline.  */
>     tcg_out_push(s, retaddr);
>     tcg_out_jmp(s, qemu_st_helpers[opc]);

Here is the crashing point...

Regards,
Yongbok
Richard Henderson May 26, 2015, 4:10 p.m. UTC | #3
On 05/26/2015 08:57 AM, Yongbok Kim wrote:
> On 26/05/2015 16:49, Richard Henderson wrote:
>> On 05/26/2015 05:46 AM, Yongbok Kim wrote:
>>> MO_UNALN caused segfaults when it is set, it reached out of boundary of
>>> load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
>>> or its equivalents.
>>
>> I'd like to know more about this crash please.  Where does it happen?
>>
>>
>> r~
>>
> 
> tcg/i386/tcg-target.c
> 
>> static void * const qemu_st_helpers[16] = {
>>     [MO_UB]   = helper_ret_stb_mmu,
>>     [MO_LEUW] = helper_le_stw_mmu,
>>     [MO_LEUL] = helper_le_stl_mmu,
>>     [MO_LEQ]  = helper_le_stq_mmu,
>>     [MO_BEUW] = helper_be_stw_mmu,
>>     [MO_BEUL] = helper_be_stl_mmu,
>>     [MO_BEQ]  = helper_be_stq_mmu,
>> };
> 
> ...
> 
>> static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
>> {
> 
>>     TCGMemOp opc = get_memop(oi);
> 
>>     /* "Tail call" to the helper, with the return address back inline.  */
>>     tcg_out_push(s, retaddr);
>>     tcg_out_jmp(s, qemu_st_helpers[opc]);
> 
> Here is the crashing point...

Ah, I think I'd masked things in there.  But clearly not.

Your patch has the nice property of not having to modify all the backends, but
it has the unfortunate property that make* and get* become asymmetrical.

I'll try to come up with an alternative soon, and we'll see how messy it gets.


r~
diff mbox

Patch

diff --git a/softmmu_template.h b/softmmu_template.h
index 4778473..1a1de4a 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -184,7 +184,7 @@  WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
     if ((addr & TARGET_PAGE_MASK)
          != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -218,7 +218,7 @@  WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
         DATA_TYPE res1, res2;
         unsigned shift;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -237,7 +237,7 @@  WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                              mmu_idx, retaddr);
     }
@@ -271,7 +271,7 @@  WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
     if ((addr & TARGET_PAGE_MASK)
          != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -305,7 +305,7 @@  WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
         DATA_TYPE res1, res2;
         unsigned shift;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -324,7 +324,7 @@  WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                              mmu_idx, retaddr);
     }
@@ -399,7 +399,7 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -430,7 +430,7 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                      >= TARGET_PAGE_SIZE)) {
         int i;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -450,7 +450,7 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                              mmu_idx, retaddr);
     }
@@ -479,7 +479,7 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -510,7 +510,7 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                      >= TARGET_PAGE_SIZE)) {
         int i;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -530,7 +530,7 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                              mmu_idx, retaddr);
     }
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 8098f82..849c51a 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -863,7 +863,19 @@  static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
  */
 static inline TCGMemOp get_memop(TCGMemOpIdx oi)
 {
-    return oi >> 4;
+    return (oi >> 4) & ~MO_AMASK;
+}
+
+/**
+ * get_memalign
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory alignment information from the combined value.
+ */
+
+static inline TCGMemOp get_memalign(TCGMemOpIdx oi)
+{
+    return (oi >> 4) & MO_AMASK;
 }
 
 /**