diff mbox

[v2,5/6] Do constant folding for shift operations.

Message ID 1307616344-27161-6-git-send-email-batuzovk@ispras.ru
State New
Headers show

Commit Message

Kirill Batuzov June 9, 2011, 10:45 a.m. UTC
Perform constant forlding for SHR, SHL, SAR, ROTR, ROTL operations.

Signed-off-by: Kirill Batuzov <batuzovk@ispras.ru>
---
 tcg/optimize.c |   86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 86 insertions(+), 0 deletions(-)

Comments

Richard Henderson June 10, 2011, 6:02 p.m. UTC | #1
On 06/09/2011 03:45 AM, Kirill Batuzov wrote:
> +    case INDEX_op_shl_i32:
> +#if TCG_TARGET_REG_BITS == 64
> +        y &= 0xffffffff;
> +    case INDEX_op_shl_i64:
> +#endif
> +        return x << y;
> +
> +    case INDEX_op_shr_i32:
> +#if TCG_TARGET_REG_BITS == 64
> +        x &= 0xffffffff;
> +        y &= 0xffffffff;
> +    case INDEX_op_shr_i64:
> +#endif
> +        /* Assuming TCGArg to be unsigned */
> +        return x >> y;

Don't assume when you've got a uint64_t type readily available.

> +    case INDEX_op_sar_i32:
> +#if TCG_TARGET_REG_BITS == 64
> +        x &= 0xffffffff;
> +        y &= 0xffffffff;
> +#endif
> +        return (int32_t)x >> (int32_t)y;

Masks are redundant with the casts.

> +    case INDEX_op_rotr_i32:
> +#if TCG_TARGET_REG_BITS == 64
> +        x &= 0xffffffff;
> +        y &= 0xffffffff;
> +#endif
> +        x = (x << (32 - y)) | (x >> y);

Have you looked to see if this gets recognized as a rotate
by the compiler?  I suspect that it will if you use a cast
to uint32_t here, but not if it is left as a 64-bit TCGArg.

> +#if TCG_TARGET_REG_BITS == 64
> +    case INDEX_op_rotl_i64:
> +        x = (x << y) | (x >> (64 - y));
> +        return x;
> +#endif

Likewise it's probably best to cast to uint64_t here.


r~
diff mbox

Patch

diff --git a/tcg/optimize.c b/tcg/optimize.c
index 0bd8c78..653f399 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -95,6 +95,11 @@  static int op_bits(int op)
     case INDEX_op_and_i32:
     case INDEX_op_or_i32:
     case INDEX_op_xor_i32:
+    case INDEX_op_shl_i32:
+    case INDEX_op_shr_i32:
+    case INDEX_op_sar_i32:
+    case INDEX_op_rotl_i32:
+    case INDEX_op_rotr_i32:
         return 32;
 #if TCG_TARGET_REG_BITS == 64
     case INDEX_op_mov_i64:
@@ -104,6 +109,11 @@  static int op_bits(int op)
     case INDEX_op_and_i64:
     case INDEX_op_or_i64:
     case INDEX_op_xor_i64:
+    case INDEX_op_shl_i64:
+    case INDEX_op_shr_i64:
+    case INDEX_op_sar_i64:
+    case INDEX_op_rotl_i64:
+    case INDEX_op_rotr_i64:
         return 64;
 #endif
     default:
@@ -177,6 +187,62 @@  static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
 #endif
         return x ^ y;
 
+    case INDEX_op_shl_i32:
+#if TCG_TARGET_REG_BITS == 64
+        y &= 0xffffffff;
+    case INDEX_op_shl_i64:
+#endif
+        return x << y;
+
+    case INDEX_op_shr_i32:
+#if TCG_TARGET_REG_BITS == 64
+        x &= 0xffffffff;
+        y &= 0xffffffff;
+    case INDEX_op_shr_i64:
+#endif
+        /* Assuming TCGArg to be unsigned */
+        return x >> y;
+
+    case INDEX_op_sar_i32:
+#if TCG_TARGET_REG_BITS == 64
+        x &= 0xffffffff;
+        y &= 0xffffffff;
+#endif
+        return (int32_t)x >> (int32_t)y;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_sar_i64:
+        return (int64_t)x >> (int64_t)y;
+#endif
+
+    case INDEX_op_rotr_i32:
+#if TCG_TARGET_REG_BITS == 64
+        x &= 0xffffffff;
+        y &= 0xffffffff;
+#endif
+        x = (x << (32 - y)) | (x >> y);
+        return x;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_rotr_i64:
+        x = (x << (64 - y)) | (x >> y);
+        return x;
+#endif
+
+    case INDEX_op_rotl_i32:
+#if TCG_TARGET_REG_BITS == 64
+        x &= 0xffffffff;
+        y &= 0xffffffff;
+#endif
+        x = (x << y) | (x >> (32 - y));
+        return x;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_rotl_i64:
+        x = (x << y) | (x >> (64 - y));
+        return x;
+#endif
+
     default:
         fprintf(stderr,
                 "Unrecognized operation %d in do_constant_folding.\n", op);
@@ -246,8 +312,18 @@  static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
             }
             /* Fallthrough */
         case INDEX_op_sub_i32:
+        case INDEX_op_shl_i32:
+        case INDEX_op_shr_i32:
+        case INDEX_op_sar_i32:
+        case INDEX_op_rotl_i32:
+        case INDEX_op_rotr_i32:
 #if TCG_TARGET_REG_BITS == 64
         case INDEX_op_sub_i64:
+        case INDEX_op_shl_i64:
+        case INDEX_op_shr_i64:
+        case INDEX_op_sar_i64:
+        case INDEX_op_rotl_i64:
+        case INDEX_op_rotr_i64:
 #endif
             if (temps[args[1]].state == TCG_TEMP_CONST) {
                 /* Proceed with possible constant folding. */
@@ -377,6 +453,11 @@  static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
         case INDEX_op_add_i32:
         case INDEX_op_sub_i32:
         case INDEX_op_mul_i32:
+        case INDEX_op_shl_i32:
+        case INDEX_op_shr_i32:
+        case INDEX_op_sar_i32:
+        case INDEX_op_rotl_i32:
+        case INDEX_op_rotr_i32:
 #if TCG_TARGET_REG_BITS == 64
         case INDEX_op_and_i64:
         case INDEX_op_or_i64:
@@ -384,6 +465,11 @@  static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
         case INDEX_op_add_i64:
         case INDEX_op_sub_i64:
         case INDEX_op_mul_i64:
+        case INDEX_op_shl_i64:
+        case INDEX_op_shr_i64:
+        case INDEX_op_sar_i64:
+        case INDEX_op_rotl_i64:
+        case INDEX_op_rotr_i64:
 #endif
             if (temps[args[1]].state == TCG_TEMP_CONST
                 && temps[args[2]].state == TCG_TEMP_CONST) {