diff mbox

[v4,11/33] tcg-aarch64: Handle constant operands to add, sub, and compare

Message ID 1379195690-6509-12-git-send-email-rth@twiddle.net
State New
Headers show

Commit Message

Richard Henderson Sept. 14, 2013, 9:54 p.m. UTC
Signed-off-by: Richard Henderson <rth@twiddle.net>
---
 tcg/aarch64/tcg-target.c | 103 ++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 80 insertions(+), 23 deletions(-)

Comments

Claudio Fontana Sept. 16, 2013, 9:02 a.m. UTC | #1
On 14.09.2013 23:54, Richard Henderson wrote:
> Signed-off-by: Richard Henderson <rth@twiddle.net>
> ---
>  tcg/aarch64/tcg-target.c | 103 ++++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 80 insertions(+), 23 deletions(-)
> 
> diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
> index 93badfd..59499fd 100644
> --- a/tcg/aarch64/tcg-target.c
> +++ b/tcg/aarch64/tcg-target.c
> @@ -111,6 +111,9 @@ static inline void patch_reloc(uint8_t *code_ptr, int type,
>      }
>  }
>  
> +#define TCG_CT_CONST_IS32 0x100
> +#define TCG_CT_CONST_AIMM 0x200
> +
>  /* parse target specific constraints */
>  static int target_parse_constraint(TCGArgConstraint *ct,
>                                     const char **pct_str)
> @@ -134,6 +137,12 @@ static int target_parse_constraint(TCGArgConstraint *ct,
>          tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3);
>  #endif
>          break;
> +    case 'w': /* The operand should be considered 32-bit.  */
> +        ct->ct |= TCG_CT_CONST_IS32;
> +        break;
> +    case 'A': /* Valid for arithmetic immediate (positive or negative).  */
> +        ct->ct |= TCG_CT_CONST_AIMM;
> +        break;
>      default:
>          return -1;
>      }
> @@ -143,14 +152,25 @@ static int target_parse_constraint(TCGArgConstraint *ct,
>      return 0;
>  }
>  
> -static inline int tcg_target_const_match(tcg_target_long val,
> -                                         const TCGArgConstraint *arg_ct)
> +static inline bool is_aimm(uint64_t val)
> +{
> +    return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
> +}
> +
> +static int tcg_target_const_match(tcg_target_long val,
> +                                  const TCGArgConstraint *arg_ct)
>  {
>      int ct = arg_ct->ct;
>  
>      if (ct & TCG_CT_CONST) {
>          return 1;
>      }
> +    if (ct & TCG_CT_CONST_IS32) {
> +        val = (int32_t)val;
> +    }
> +    if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
> +        return 1;
> +    }
>  
>      return 0;
>  }
> @@ -558,11 +578,21 @@ static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
>      tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max));
>  }
>  
> -static inline void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg rn,
> -                               TCGReg rm)
> +static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
> +                        tcg_target_long b, bool const_b)
>  {
> -    /* Using CMP alias SUBS wzr, Wn, Wm */
> -    tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm);
> +    if (const_b) {
> +        /* Using CMP or CMN aliases.  */
> +        AArch64Insn insn = INSN_SUBSI;
> +        if (b < 0) {
> +            insn = INSN_ADDSI;
> +            b = -b;
> +        }
> +        tcg_fmt_Rdn_aimm(s, insn, ext, TCG_REG_XZR, a, b);
> +    } else {
> +        /* Using CMP alias SUBS wzr, Wn, Wm */
> +        tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, a, b);
> +    }
>  }

What about splitting into tcg_out_cmp_imm and tcg_out_cmp_reg?
or tcg_out_cmp_i and tcg_out_cmp_r.
The function is an 'if else' anyway with no code sharing, and we would avoid sidestepping the TCGReg type check for b in the _r case, as well as the const_b additional param.

>  
>  static inline void tcg_out_cset(TCGContext *s, TCGType ext,
> @@ -760,6 +790,17 @@ static inline void tcg_out_uxt(TCGContext *s, int s_bits,
>      tcg_out_ubfm(s, 0, rd, rn, 0, bits);
>  }
>  
> +static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
> +                            TCGReg rn, int aimm)
> +{
> +    AArch64Insn insn = INSN_ADDI;
> +    if (aimm < 0) {
> +        insn = INSN_SUBI;
> +        aimm = -aimm;
> +    }
> +    tcg_fmt_Rdn_aimm(s, insn, ext, rd, rn, aimm);
> +}
> +

Could this be a tcg_out_arith_imm, in the similar way we would do tcg_out_arith? (tcg_out_arith_reg?)

>  static inline void tcg_out_nop(TCGContext *s)
>  {
>      tcg_out32(s, 0xd503201f);
> @@ -896,7 +937,7 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg,
>                   (is_read ? offsetof(CPUTLBEntry, addr_read)
>                    : offsetof(CPUTLBEntry, addr_write)));
>      /* Perform the address comparison. */
> -    tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3);
> +    tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0);
>      *label_ptr = s->code_ptr;
>      /* If not equal, we jump to the slow path. */
>      tcg_out_goto_cond_noaddr(s, TCG_COND_NE);
> @@ -1157,14 +1198,26 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>                       a0, a1, a2);
>          break;
>  
> -    case INDEX_op_add_i64:
>      case INDEX_op_add_i32:
> -        tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
> +        a2 = (int32_t)a2;
> +        /* FALLTHRU */

/* fall through */ is less ugly.

> +    case INDEX_op_add_i64:
> +        if (c2) {
> +            tcg_out_addsubi(s, ext, a0, a1, a2);
> +        } else {
> +            tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
> +        }
>          break;
>  
> -    case INDEX_op_sub_i64:
>      case INDEX_op_sub_i32:
> -        tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
> +        a2 = (int32_t)a2;
> +        /* FALLTHRU */
> +    case INDEX_op_sub_i64:
> +        if (c2) {
> +            tcg_out_addsubi(s, ext, a0, a1, -a2);
> +        } else {
> +            tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
> +        }
>          break;
>  
>      case INDEX_op_and_i64:
> @@ -1233,15 +1286,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
>          }
>          break;
>  
> -    case INDEX_op_brcond_i64:
>      case INDEX_op_brcond_i32:
> -        tcg_out_cmp(s, ext, a0, a1);
> +        a1 = (int32_t)a1;
> +        /* FALLTHRU */
> +    case INDEX_op_brcond_i64:
> +        tcg_out_cmp(s, ext, a0, a1, const_args[1]);
>          tcg_out_goto_label_cond(s, a2, args[3]);
>          break;
>  
> -    case INDEX_op_setcond_i64:
>      case INDEX_op_setcond_i32:
> -        tcg_out_cmp(s, ext, a1, a2);
> +        a2 = (int32_t)a2;
> +        /* FALLTHRU */
> +    case INDEX_op_setcond_i64:
> +        tcg_out_cmp(s, ext, a1, a2, c2);
>          tcg_out_cset(s, 0, a0, args[3]);
>          break;
>  
> @@ -1362,10 +1419,10 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
>      { INDEX_op_st32_i64, { "r", "r" } },
>      { INDEX_op_st_i64, { "r", "r" } },
>  
> -    { INDEX_op_add_i32, { "r", "r", "r" } },
> -    { INDEX_op_add_i64, { "r", "r", "r" } },
> -    { INDEX_op_sub_i32, { "r", "r", "r" } },
> -    { INDEX_op_sub_i64, { "r", "r", "r" } },
> +    { INDEX_op_add_i32, { "r", "r", "rwA" } },
> +    { INDEX_op_add_i64, { "r", "r", "rA" } },
> +    { INDEX_op_sub_i32, { "r", "r", "rwA" } },
> +    { INDEX_op_sub_i64, { "r", "r", "rA" } },
>      { INDEX_op_mul_i32, { "r", "r", "r" } },
>      { INDEX_op_mul_i64, { "r", "r", "r" } },
>      { INDEX_op_and_i32, { "r", "r", "r" } },
> @@ -1386,10 +1443,10 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
>      { INDEX_op_rotl_i64, { "r", "r", "ri" } },
>      { INDEX_op_rotr_i64, { "r", "r", "ri" } },
>  
> -    { INDEX_op_brcond_i32, { "r", "r" } },
> -    { INDEX_op_setcond_i32, { "r", "r", "r" } },
> -    { INDEX_op_brcond_i64, { "r", "r" } },
> -    { INDEX_op_setcond_i64, { "r", "r", "r" } },
> +    { INDEX_op_brcond_i32, { "r", "rwA" } },
> +    { INDEX_op_brcond_i64, { "r", "rA" } },
> +    { INDEX_op_setcond_i32, { "r", "r", "rwA" } },
> +    { INDEX_op_setcond_i64, { "r", "r", "rA" } },
>  
>      { INDEX_op_qemu_ld8u, { "r", "l" } },
>      { INDEX_op_qemu_ld8s, { "r", "l" } },
>
Richard Henderson Sept. 16, 2013, 3:45 p.m. UTC | #2
On 09/16/2013 02:02 AM, Claudio Fontana wrote:
>> -static inline void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg rn,
>> -                               TCGReg rm)
>> +static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
>> +                        tcg_target_long b, bool const_b)
>>  {
>> -    /* Using CMP alias SUBS wzr, Wn, Wm */
>> -    tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm);
>> +    if (const_b) {
>> +        /* Using CMP or CMN aliases.  */
>> +        AArch64Insn insn = INSN_SUBSI;
>> +        if (b < 0) {
>> +            insn = INSN_ADDSI;
>> +            b = -b;
>> +        }
>> +        tcg_fmt_Rdn_aimm(s, insn, ext, TCG_REG_XZR, a, b);
>> +    } else {
>> +        /* Using CMP alias SUBS wzr, Wn, Wm */
>> +        tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, a, b);
>> +    }
>>  }
> 
> What about splitting into tcg_out_cmp_imm and tcg_out_cmp_reg? or
> tcg_out_cmp_i and tcg_out_cmp_r. The function is an 'if else' anyway with no
> code sharing, and we would avoid sidestepping the TCGReg type check for b in
> the _r case, as well as the const_b additional param.

This function is called once from tcg_out_tlb_read and three times from
tcg_out_opc.  I just thought since the majority of uses would have to perform
this if then we might as well have it in the subroutine than force all of the
callers to replicate it.


>> +static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
>> +                            TCGReg rn, int aimm)
>> +{
>> +    AArch64Insn insn = INSN_ADDI;
>> +    if (aimm < 0) {
>> +        insn = INSN_SUBI;
>> +        aimm = -aimm;
>> +    }
>> +    tcg_fmt_Rdn_aimm(s, insn, ext, rd, rn, aimm);
>> +}
>> +
> 
> Could this be a tcg_out_arith_imm, in the similar way we would do
> tcg_out_arith? (tcg_out_arith_reg?)

Which one gets renamed?  You already proposed tcg_fmt_Rdn_aimm be named
tcg_out_arith_imm.  Now you want tcg_out_addsubi renamed to the same name?

I suppose we could merge the two if we add the S bit as a parameter.  Then
we don't have to distinguish between ADDI and ADDIS, and we could share code
with comparisons above...


r~
Claudio Fontana Sept. 17, 2013, 8:49 a.m. UTC | #3
On 16.09.2013 17:45, Richard Henderson wrote:
> On 09/16/2013 02:02 AM, Claudio Fontana wrote:
>>> -static inline void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg rn,
>>> -                               TCGReg rm)
>>> +static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
>>> +                        tcg_target_long b, bool const_b)
>>>  {
>>> -    /* Using CMP alias SUBS wzr, Wn, Wm */
>>> -    tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm);
>>> +    if (const_b) {
>>> +        /* Using CMP or CMN aliases.  */
>>> +        AArch64Insn insn = INSN_SUBSI;
>>> +        if (b < 0) {
>>> +            insn = INSN_ADDSI;
>>> +            b = -b;
>>> +        }
>>> +        tcg_fmt_Rdn_aimm(s, insn, ext, TCG_REG_XZR, a, b);
>>> +    } else {
>>> +        /* Using CMP alias SUBS wzr, Wn, Wm */
>>> +        tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, a, b);
>>> +    }
>>>  }
>>
>> What about splitting into tcg_out_cmp_imm and tcg_out_cmp_reg? or
>> tcg_out_cmp_i and tcg_out_cmp_r. The function is an 'if else' anyway with no
>> code sharing, and we would avoid sidestepping the TCGReg type check for b in
>> the _r case, as well as the const_b additional param.
> 
> This function is called once from tcg_out_tlb_read and three times from
> tcg_out_opc.  I just thought since the majority of uses would have to perform
> this if then we might as well have it in the subroutine than force all of the
> callers to replicate it.

Ok, that's a good point.
What about we keep the tcg_out_cmp and we have it do

if (const_b) {
   tcg_out_cmp_i();
} else {
   tcg_out_cmp_r();
}

so that code that wants to call cmp_r or cmp_i directly can do that?
I realize that at them moment it would benefit only tcg_out_tlb_read's use.

>>> +static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
>>> +                            TCGReg rn, int aimm)
>>> +{
>>> +    AArch64Insn insn = INSN_ADDI;
>>> +    if (aimm < 0) {
>>> +        insn = INSN_SUBI;
>>> +        aimm = -aimm;
>>> +    }
>>> +    tcg_fmt_Rdn_aimm(s, insn, ext, rd, rn, aimm);
>>> +}
>>> +
>>
>> Could this be a tcg_out_arith_imm, in the similar way we would do
>> tcg_out_arith? (tcg_out_arith_reg?)
> 
> Which one gets renamed?  You already proposed tcg_fmt_Rdn_aimm be named
> tcg_out_arith_imm.  Now you want tcg_out_addsubi renamed to the same name?

This one confused me, possibly because I don't see the reason for addsubi.
> 
> I suppose we could merge the two if we add the S bit as a parameter.  Then
> we don't have to distinguish between ADDI and ADDIS, and we could share code
> with comparisons above...

I am ok with keeping the two separate, distinguishing in principle a subtraction from a comparison.

Claudio
diff mbox

Patch

diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 93badfd..59499fd 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -111,6 +111,9 @@  static inline void patch_reloc(uint8_t *code_ptr, int type,
     }
 }
 
+#define TCG_CT_CONST_IS32 0x100
+#define TCG_CT_CONST_AIMM 0x200
+
 /* parse target specific constraints */
 static int target_parse_constraint(TCGArgConstraint *ct,
                                    const char **pct_str)
@@ -134,6 +137,12 @@  static int target_parse_constraint(TCGArgConstraint *ct,
         tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3);
 #endif
         break;
+    case 'w': /* The operand should be considered 32-bit.  */
+        ct->ct |= TCG_CT_CONST_IS32;
+        break;
+    case 'A': /* Valid for arithmetic immediate (positive or negative).  */
+        ct->ct |= TCG_CT_CONST_AIMM;
+        break;
     default:
         return -1;
     }
@@ -143,14 +152,25 @@  static int target_parse_constraint(TCGArgConstraint *ct,
     return 0;
 }
 
-static inline int tcg_target_const_match(tcg_target_long val,
-                                         const TCGArgConstraint *arg_ct)
+static inline bool is_aimm(uint64_t val)
+{
+    return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
+}
+
+static int tcg_target_const_match(tcg_target_long val,
+                                  const TCGArgConstraint *arg_ct)
 {
     int ct = arg_ct->ct;
 
     if (ct & TCG_CT_CONST) {
         return 1;
     }
+    if (ct & TCG_CT_CONST_IS32) {
+        val = (int32_t)val;
+    }
+    if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
+        return 1;
+    }
 
     return 0;
 }
@@ -558,11 +578,21 @@  static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
     tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max));
 }
 
-static inline void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg rn,
-                               TCGReg rm)
+static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
+                        tcg_target_long b, bool const_b)
 {
-    /* Using CMP alias SUBS wzr, Wn, Wm */
-    tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm);
+    if (const_b) {
+        /* Using CMP or CMN aliases.  */
+        AArch64Insn insn = INSN_SUBSI;
+        if (b < 0) {
+            insn = INSN_ADDSI;
+            b = -b;
+        }
+        tcg_fmt_Rdn_aimm(s, insn, ext, TCG_REG_XZR, a, b);
+    } else {
+        /* Using CMP alias SUBS wzr, Wn, Wm */
+        tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, a, b);
+    }
 }
 
 static inline void tcg_out_cset(TCGContext *s, TCGType ext,
@@ -760,6 +790,17 @@  static inline void tcg_out_uxt(TCGContext *s, int s_bits,
     tcg_out_ubfm(s, 0, rd, rn, 0, bits);
 }
 
+static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
+                            TCGReg rn, int aimm)
+{
+    AArch64Insn insn = INSN_ADDI;
+    if (aimm < 0) {
+        insn = INSN_SUBI;
+        aimm = -aimm;
+    }
+    tcg_fmt_Rdn_aimm(s, insn, ext, rd, rn, aimm);
+}
+
 static inline void tcg_out_nop(TCGContext *s)
 {
     tcg_out32(s, 0xd503201f);
@@ -896,7 +937,7 @@  static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg,
                  (is_read ? offsetof(CPUTLBEntry, addr_read)
                   : offsetof(CPUTLBEntry, addr_write)));
     /* Perform the address comparison. */
-    tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3);
+    tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0);
     *label_ptr = s->code_ptr;
     /* If not equal, we jump to the slow path. */
     tcg_out_goto_cond_noaddr(s, TCG_COND_NE);
@@ -1157,14 +1198,26 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
                      a0, a1, a2);
         break;
 
-    case INDEX_op_add_i64:
     case INDEX_op_add_i32:
-        tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
+        a2 = (int32_t)a2;
+        /* FALLTHRU */
+    case INDEX_op_add_i64:
+        if (c2) {
+            tcg_out_addsubi(s, ext, a0, a1, a2);
+        } else {
+            tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
+        }
         break;
 
-    case INDEX_op_sub_i64:
     case INDEX_op_sub_i32:
-        tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
+        a2 = (int32_t)a2;
+        /* FALLTHRU */
+    case INDEX_op_sub_i64:
+        if (c2) {
+            tcg_out_addsubi(s, ext, a0, a1, -a2);
+        } else {
+            tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
+        }
         break;
 
     case INDEX_op_and_i64:
@@ -1233,15 +1286,19 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         }
         break;
 
-    case INDEX_op_brcond_i64:
     case INDEX_op_brcond_i32:
-        tcg_out_cmp(s, ext, a0, a1);
+        a1 = (int32_t)a1;
+        /* FALLTHRU */
+    case INDEX_op_brcond_i64:
+        tcg_out_cmp(s, ext, a0, a1, const_args[1]);
         tcg_out_goto_label_cond(s, a2, args[3]);
         break;
 
-    case INDEX_op_setcond_i64:
     case INDEX_op_setcond_i32:
-        tcg_out_cmp(s, ext, a1, a2);
+        a2 = (int32_t)a2;
+        /* FALLTHRU */
+    case INDEX_op_setcond_i64:
+        tcg_out_cmp(s, ext, a1, a2, c2);
         tcg_out_cset(s, 0, a0, args[3]);
         break;
 
@@ -1362,10 +1419,10 @@  static const TCGTargetOpDef aarch64_op_defs[] = {
     { INDEX_op_st32_i64, { "r", "r" } },
     { INDEX_op_st_i64, { "r", "r" } },
 
-    { INDEX_op_add_i32, { "r", "r", "r" } },
-    { INDEX_op_add_i64, { "r", "r", "r" } },
-    { INDEX_op_sub_i32, { "r", "r", "r" } },
-    { INDEX_op_sub_i64, { "r", "r", "r" } },
+    { INDEX_op_add_i32, { "r", "r", "rwA" } },
+    { INDEX_op_add_i64, { "r", "r", "rA" } },
+    { INDEX_op_sub_i32, { "r", "r", "rwA" } },
+    { INDEX_op_sub_i64, { "r", "r", "rA" } },
     { INDEX_op_mul_i32, { "r", "r", "r" } },
     { INDEX_op_mul_i64, { "r", "r", "r" } },
     { INDEX_op_and_i32, { "r", "r", "r" } },
@@ -1386,10 +1443,10 @@  static const TCGTargetOpDef aarch64_op_defs[] = {
     { INDEX_op_rotl_i64, { "r", "r", "ri" } },
     { INDEX_op_rotr_i64, { "r", "r", "ri" } },
 
-    { INDEX_op_brcond_i32, { "r", "r" } },
-    { INDEX_op_setcond_i32, { "r", "r", "r" } },
-    { INDEX_op_brcond_i64, { "r", "r" } },
-    { INDEX_op_setcond_i64, { "r", "r", "r" } },
+    { INDEX_op_brcond_i32, { "r", "rwA" } },
+    { INDEX_op_brcond_i64, { "r", "rA" } },
+    { INDEX_op_setcond_i32, { "r", "r", "rwA" } },
+    { INDEX_op_setcond_i64, { "r", "r", "rA" } },
 
     { INDEX_op_qemu_ld8u, { "r", "l" } },
     { INDEX_op_qemu_ld8s, { "r", "l" } },