diff mbox series

[21/23] tcg/tci: Remove dead code for TCG_TARGET_HAS_div2_*

Message ID 20210128082331.196801-22-richard.henderson@linaro.org
State New
Headers show
Series TCI fixes and cleanups | expand

Commit Message

Richard Henderson Jan. 28, 2021, 8:23 a.m. UTC
We do not simultaneously support div and div2 -- it's one
or the other.  TCI is already using div, so remove div2.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/tci.c                | 12 ------------
 tcg/tci/tcg-target.c.inc | 16 ----------------
 2 files changed, 28 deletions(-)

Comments

Alex Bennée Jan. 28, 2021, 3:36 p.m. UTC | #1
Richard Henderson <richard.henderson@linaro.org> writes:

> We do not simultaneously support div and div2 -- it's one
> or the other.  TCI is already using div, so remove div2.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Stefan Weil Jan. 28, 2021, 3:39 p.m. UTC | #2
Am 28.01.21 um 09:23 schrieb Richard Henderson:

> We do not simultaneously support div and div2 -- it's one
> or the other.  TCI is already using div, so remove div2.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   tcg/tci.c                | 12 ------------
>   tcg/tci/tcg-target.c.inc | 16 ----------------
>   2 files changed, 28 deletions(-)
>
> diff --git a/tcg/tci.c b/tcg/tci.c
> index 2ce67a8fd3..32931ea611 100644
> --- a/tcg/tci.c
> +++ b/tcg/tci.c
> @@ -647,7 +647,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
>               t2 = tci_read_ri32(regs, &tb_ptr);
>               tci_write_reg(regs, t0, t1 * t2);
>               break;
> -#if TCG_TARGET_HAS_div_i32
>           case INDEX_op_div_i32:
>               t0 = *tb_ptr++;
>               t1 = tci_read_ri32(regs, &tb_ptr);
> @@ -672,12 +671,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
>               t2 = tci_read_ri32(regs, &tb_ptr);
>               tci_write_reg(regs, t0, t1 % t2);
>               break;
> -#elif TCG_TARGET_HAS_div2_i32
> -        case INDEX_op_div2_i32:
> -        case INDEX_op_divu2_i32:
> -            TODO();
> -            break;
> -#endif
>           case INDEX_op_and_i32:
>               t0 = *tb_ptr++;
>               t1 = tci_read_ri32(regs, &tb_ptr);
> @@ -903,11 +896,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
>           case INDEX_op_remu_i64:
>               TODO();
>               break;
> -#elif TCG_TARGET_HAS_div2_i64
> -        case INDEX_op_div2_i64:
> -        case INDEX_op_divu2_i64:
> -            TODO();
> -            break;
>   #endif
>           case INDEX_op_and_i64:
>               t0 = *tb_ptr++;
> diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
> index a60fa524a4..842807ff2e 100644
> --- a/tcg/tci/tcg-target.c.inc
> +++ b/tcg/tci/tcg-target.c.inc
> @@ -71,15 +71,10 @@ static const TCGTargetOpDef tcg_target_op_defs[] = {
>       { INDEX_op_add_i32, { R, RI, RI } },
>       { INDEX_op_sub_i32, { R, RI, RI } },
>       { INDEX_op_mul_i32, { R, RI, RI } },
> -#if TCG_TARGET_HAS_div_i32
>       { INDEX_op_div_i32, { R, R, R } },
>       { INDEX_op_divu_i32, { R, R, R } },
>       { INDEX_op_rem_i32, { R, R, R } },
>       { INDEX_op_remu_i32, { R, R, R } },
> -#elif TCG_TARGET_HAS_div2_i32
> -    { INDEX_op_div2_i32, { R, R, "0", "1", R } },
> -    { INDEX_op_divu2_i32, { R, R, "0", "1", R } },
> -#endif
>       /* TODO: Does R, RI, RI result in faster code than R, R, RI?
>          If both operands are constants, we can optimize. */
>       { INDEX_op_and_i32, { R, RI, RI } },
> @@ -156,9 +151,6 @@ static const TCGTargetOpDef tcg_target_op_defs[] = {
>       { INDEX_op_divu_i64, { R, R, R } },
>       { INDEX_op_rem_i64, { R, R, R } },
>       { INDEX_op_remu_i64, { R, R, R } },
> -#elif TCG_TARGET_HAS_div2_i64
> -    { INDEX_op_div2_i64, { R, R, "0", "1", R } },
> -    { INDEX_op_divu2_i64, { R, R, "0", "1", R } },
>   #endif
>       { INDEX_op_and_i64, { R, RI, RI } },
>   #if TCG_TARGET_HAS_andc_i64
> @@ -705,10 +697,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
>       case INDEX_op_remu_i64:     /* Optional (TCG_TARGET_HAS_div_i64). */
>           TODO();
>           break;
> -    case INDEX_op_div2_i64:     /* Optional (TCG_TARGET_HAS_div2_i64). */
> -    case INDEX_op_divu2_i64:    /* Optional (TCG_TARGET_HAS_div2_i64). */
> -        TODO();
> -        break;
>       case INDEX_op_brcond_i64:
>           tcg_out_r(s, args[0]);
>           tcg_out_ri64(s, const_args[1], args[1]);
> @@ -748,10 +736,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
>           tcg_out_ri32(s, const_args[1], args[1]);
>           tcg_out_ri32(s, const_args[2], args[2]);
>           break;
> -    case INDEX_op_div2_i32:     /* Optional (TCG_TARGET_HAS_div2_i32). */
> -    case INDEX_op_divu2_i32:    /* Optional (TCG_TARGET_HAS_div2_i32). */
> -        TODO();
> -        break;
>   #if TCG_TARGET_REG_BITS == 32
>       case INDEX_op_add2_i32:
>       case INDEX_op_sub2_i32:


One of the ideas for TCI is that it should ideally support any subset of 
TCG opcodes which is used by an existing TCG backend or a newly written 
backend.

This only requires copying the TCG_TARGET_HAS... defines.

So this patch should keep the preprocessor conditionals, and the TODO 
statements have to be replaced by code (or #error for the moment).

Patch 22 should also keep the preprocessor conditionals, see my comment 
there.

Thanks,

Stefan
Richard Henderson Jan. 28, 2021, 5:56 p.m. UTC | #3
On 1/28/21 5:39 AM, Stefan Weil wrote:
>> -    case INDEX_op_div2_i32:     /* Optional (TCG_TARGET_HAS_div2_i32). */
>> -    case INDEX_op_divu2_i32:    /* Optional (TCG_TARGET_HAS_div2_i32). */
>> -        TODO();
>> -        break;
>>   #if TCG_TARGET_REG_BITS == 32
>>       case INDEX_op_add2_i32:
>>       case INDEX_op_sub2_i32:
> 
> 
> One of the ideas for TCI is that it should ideally support any subset of TCG
> opcodes which is used by an existing TCG backend or a newly written backend.
> 
> This only requires copying the TCG_TARGET_HAS... defines.
> 
> So this patch should keep the preprocessor conditionals, and the TODO
> statements have to be replaced by code (or #error for the moment).


If that's what you are after, easily configure different setups, you still
don't need the ifdefs.  Just change the TCG_TARGET_HAS_* definition in the
header file.  The ifdefs are not necessary.  They will be unreachable code if
you zero out the HAS, but they'll still compile.

As for div2, there is no code for it in tci.c and therefore there should be no
code for it in tcg-target.c.inc.  Those bits of code are a pair and must to be
added together.


r~
diff mbox series

Patch

diff --git a/tcg/tci.c b/tcg/tci.c
index 2ce67a8fd3..32931ea611 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -647,7 +647,6 @@  uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
             t2 = tci_read_ri32(regs, &tb_ptr);
             tci_write_reg(regs, t0, t1 * t2);
             break;
-#if TCG_TARGET_HAS_div_i32
         case INDEX_op_div_i32:
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(regs, &tb_ptr);
@@ -672,12 +671,6 @@  uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
             t2 = tci_read_ri32(regs, &tb_ptr);
             tci_write_reg(regs, t0, t1 % t2);
             break;
-#elif TCG_TARGET_HAS_div2_i32
-        case INDEX_op_div2_i32:
-        case INDEX_op_divu2_i32:
-            TODO();
-            break;
-#endif
         case INDEX_op_and_i32:
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(regs, &tb_ptr);
@@ -903,11 +896,6 @@  uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
         case INDEX_op_remu_i64:
             TODO();
             break;
-#elif TCG_TARGET_HAS_div2_i64
-        case INDEX_op_div2_i64:
-        case INDEX_op_divu2_i64:
-            TODO();
-            break;
 #endif
         case INDEX_op_and_i64:
             t0 = *tb_ptr++;
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index a60fa524a4..842807ff2e 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -71,15 +71,10 @@  static const TCGTargetOpDef tcg_target_op_defs[] = {
     { INDEX_op_add_i32, { R, RI, RI } },
     { INDEX_op_sub_i32, { R, RI, RI } },
     { INDEX_op_mul_i32, { R, RI, RI } },
-#if TCG_TARGET_HAS_div_i32
     { INDEX_op_div_i32, { R, R, R } },
     { INDEX_op_divu_i32, { R, R, R } },
     { INDEX_op_rem_i32, { R, R, R } },
     { INDEX_op_remu_i32, { R, R, R } },
-#elif TCG_TARGET_HAS_div2_i32
-    { INDEX_op_div2_i32, { R, R, "0", "1", R } },
-    { INDEX_op_divu2_i32, { R, R, "0", "1", R } },
-#endif
     /* TODO: Does R, RI, RI result in faster code than R, R, RI?
        If both operands are constants, we can optimize. */
     { INDEX_op_and_i32, { R, RI, RI } },
@@ -156,9 +151,6 @@  static const TCGTargetOpDef tcg_target_op_defs[] = {
     { INDEX_op_divu_i64, { R, R, R } },
     { INDEX_op_rem_i64, { R, R, R } },
     { INDEX_op_remu_i64, { R, R, R } },
-#elif TCG_TARGET_HAS_div2_i64
-    { INDEX_op_div2_i64, { R, R, "0", "1", R } },
-    { INDEX_op_divu2_i64, { R, R, "0", "1", R } },
 #endif
     { INDEX_op_and_i64, { R, RI, RI } },
 #if TCG_TARGET_HAS_andc_i64
@@ -705,10 +697,6 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
     case INDEX_op_remu_i64:     /* Optional (TCG_TARGET_HAS_div_i64). */
         TODO();
         break;
-    case INDEX_op_div2_i64:     /* Optional (TCG_TARGET_HAS_div2_i64). */
-    case INDEX_op_divu2_i64:    /* Optional (TCG_TARGET_HAS_div2_i64). */
-        TODO();
-        break;
     case INDEX_op_brcond_i64:
         tcg_out_r(s, args[0]);
         tcg_out_ri64(s, const_args[1], args[1]);
@@ -748,10 +736,6 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
         tcg_out_ri32(s, const_args[1], args[1]);
         tcg_out_ri32(s, const_args[2], args[2]);
         break;
-    case INDEX_op_div2_i32:     /* Optional (TCG_TARGET_HAS_div2_i32). */
-    case INDEX_op_divu2_i32:    /* Optional (TCG_TARGET_HAS_div2_i32). */
-        TODO();
-        break;
 #if TCG_TARGET_REG_BITS == 32
     case INDEX_op_add2_i32:
     case INDEX_op_sub2_i32: