Message ID | 1361346746-8511-21-git-send-email-rth@twiddle.net |
---|---|
State | New |
Headers | show |
Hi All, This patch breaks ARM TCG. Fix comming shortly. On Wed, Feb 20, 2013 at 5:52 PM, Richard Henderson <rth@twiddle.net> wrote: > Use add2 if available, otherwise use 64-bit arithmetic. > > Cc: Peter Maydell <peter.maydell@linaro.org> > Signed-off-by: Richard Henderson <rth@twiddle.net> > --- > target-arm/helper.h | 1 - > target-arm/op_helper.c | 15 --------------- > target-arm/translate.c | 39 ++++++++++++++++++++++++++++++++++----- > 3 files changed, 34 insertions(+), 21 deletions(-) > > diff --git a/target-arm/helper.h b/target-arm/helper.h > index bca5a5b..507bb9c 100644 > --- a/target-arm/helper.h > +++ b/target-arm/helper.h > @@ -140,7 +140,6 @@ DEF_HELPER_2(recpe_u32, i32, i32, env) > DEF_HELPER_2(rsqrte_u32, i32, i32, env) > DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32) > > -DEF_HELPER_3(adc_cc, i32, env, i32, i32) > DEF_HELPER_3(sbc_cc, i32, env, i32, i32) > > DEF_HELPER_3(shl_cc, i32, env, i32, i32) > diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c > index 99610d7..49fc036 100644 > --- a/target-arm/op_helper.c > +++ b/target-arm/op_helper.c > @@ -315,21 +315,6 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) > The only way to do that in TCG is a conditional branch, which clobbers > all our temporaries. For now implement these as helper functions. */ > > -uint32_t HELPER(adc_cc)(CPUARMState *env, uint32_t a, uint32_t b) > -{ > - uint32_t result; > - if (!env->CF) { > - result = a + b; > - env->CF = result < a; > - } else { > - result = a + b + 1; > - env->CF = result <= a; > - } > - env->VF = (a ^ b ^ -1) & (a ^ result); > - env->NF = env->ZF = result; > - return result; > -} > - > uint32_t HELPER(sbc_cc)(CPUARMState *env, uint32_t a, uint32_t b) > { > uint32_t result; > diff --git a/target-arm/translate.c b/target-arm/translate.c > index ca6f0af..493448a 100644 > --- a/target-arm/translate.c > +++ b/target-arm/translate.c > @@ -421,6 +421,34 @@ static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1) > tcg_gen_mov_i32(dest, cpu_NF); > } > > +/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */ > +static void gen_adc_CC(TCGv dest, TCGv t0, TCGv t1) > +{ > + TCGv tmp = tcg_temp_new_i32(); > + if (TCG_TARGET_HAS_add2_i32) { > + tcg_gen_movi_i32(tmp, 0); > + tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp); You discard the intermediary add result stored in NF here and add A to B. You have effectively discarded incoming carry here. Regards, Peter > + tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, cpu_CF, t1, tmp); > + } else { > + TCGv_i64 q0 = tcg_temp_new_i64(); > + TCGv_i64 q1 = tcg_temp_new_i64(); > + tcg_gen_extu_i32_i64(q0, t0); > + tcg_gen_extu_i32_i64(q1, t1); > + tcg_gen_add_i64(q0, q0, q1); > + tcg_gen_extu_i32_i64(q1, cpu_CF); > + tcg_gen_add_i64(q0, q0, q1); > + tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0); > + tcg_temp_free_i64(q0); > + tcg_temp_free_i64(q1); > + } > + tcg_gen_mov_i32(cpu_ZF, cpu_NF); > + tcg_gen_xor_i32(cpu_VF, cpu_NF, t0); > + tcg_gen_xor_i32(tmp, t0, t1); > + tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); > + tcg_temp_free_i32(tmp); > + tcg_gen_mov_i32(dest, cpu_NF); > +} > + > /* dest = T0 - T1. Compute C, N, V and Z flags */ > static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1) > { > @@ -7073,7 +7101,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) > break; > case 0x05: > if (set_cc) { > - gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); > + gen_adc_CC(tmp, tmp, tmp2); > } else { > gen_add_carry(tmp, tmp, tmp2); > } > @@ -7914,7 +7942,7 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCG > break; > case 10: /* adc */ > if (conds) > - gen_helper_adc_cc(t0, cpu_env, t0, t1); > + gen_adc_CC(t0, t0, t1); > else > gen_adc(t0, t1); > break; > @@ -9232,10 +9260,11 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) > } > break; > case 0x5: /* adc */ > - if (s->condexec_mask) > + if (s->condexec_mask) { > gen_adc(tmp, tmp2); > - else > - gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); > + } else { > + gen_adc_CC(tmp, tmp, tmp2); > + } > break; > case 0x6: /* sbc */ > if (s->condexec_mask) > -- > 1.8.1.2 > >
diff --git a/target-arm/helper.h b/target-arm/helper.h index bca5a5b..507bb9c 100644 --- a/target-arm/helper.h +++ b/target-arm/helper.h @@ -140,7 +140,6 @@ DEF_HELPER_2(recpe_u32, i32, i32, env) DEF_HELPER_2(rsqrte_u32, i32, i32, env) DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32) -DEF_HELPER_3(adc_cc, i32, env, i32, i32) DEF_HELPER_3(sbc_cc, i32, env, i32, i32) DEF_HELPER_3(shl_cc, i32, env, i32, i32) diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index 99610d7..49fc036 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -315,21 +315,6 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) The only way to do that in TCG is a conditional branch, which clobbers all our temporaries. For now implement these as helper functions. */ -uint32_t HELPER(adc_cc)(CPUARMState *env, uint32_t a, uint32_t b) -{ - uint32_t result; - if (!env->CF) { - result = a + b; - env->CF = result < a; - } else { - result = a + b + 1; - env->CF = result <= a; - } - env->VF = (a ^ b ^ -1) & (a ^ result); - env->NF = env->ZF = result; - return result; -} - uint32_t HELPER(sbc_cc)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t result; diff --git a/target-arm/translate.c b/target-arm/translate.c index ca6f0af..493448a 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -421,6 +421,34 @@ static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1) tcg_gen_mov_i32(dest, cpu_NF); } +/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */ +static void gen_adc_CC(TCGv dest, TCGv t0, TCGv t1) +{ + TCGv tmp = tcg_temp_new_i32(); + if (TCG_TARGET_HAS_add2_i32) { + tcg_gen_movi_i32(tmp, 0); + tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp); + tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, cpu_CF, t1, tmp); + } else { + TCGv_i64 q0 = tcg_temp_new_i64(); + TCGv_i64 q1 = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(q0, t0); + tcg_gen_extu_i32_i64(q1, t1); + tcg_gen_add_i64(q0, q0, q1); + tcg_gen_extu_i32_i64(q1, cpu_CF); + tcg_gen_add_i64(q0, q0, q1); + tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0); + tcg_temp_free_i64(q0); + tcg_temp_free_i64(q1); + } + tcg_gen_mov_i32(cpu_ZF, cpu_NF); + tcg_gen_xor_i32(cpu_VF, cpu_NF, t0); + tcg_gen_xor_i32(tmp, t0, t1); + tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); + tcg_temp_free_i32(tmp); + tcg_gen_mov_i32(dest, cpu_NF); +} + /* dest = T0 - T1. Compute C, N, V and Z flags */ static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1) { @@ -7073,7 +7101,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s) break; case 0x05: if (set_cc) { - gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); + gen_adc_CC(tmp, tmp, tmp2); } else { gen_add_carry(tmp, tmp, tmp2); } @@ -7914,7 +7942,7 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCG break; case 10: /* adc */ if (conds) - gen_helper_adc_cc(t0, cpu_env, t0, t1); + gen_adc_CC(t0, t0, t1); else gen_adc(t0, t1); break; @@ -9232,10 +9260,11 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) } break; case 0x5: /* adc */ - if (s->condexec_mask) + if (s->condexec_mask) { gen_adc(tmp, tmp2); - else - gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); + } else { + gen_adc_CC(tmp, tmp, tmp2); + } break; case 0x6: /* sbc */ if (s->condexec_mask)
Use add2 if available, otherwise use 64-bit arithmetic. Cc: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net> --- target-arm/helper.h | 1 - target-arm/op_helper.c | 15 --------------- target-arm/translate.c | 39 ++++++++++++++++++++++++++++++++++----- 3 files changed, 34 insertions(+), 21 deletions(-)