diff mbox

[3/3] tcg: Mask shift counts to avoid undefined behaviour

Message ID 1395157702-23108-4-git-send-email-rth@twiddle.net
State New
Headers show

Commit Message

Richard Henderson March 18, 2014, 3:48 p.m. UTC
TCG now requires unspecified behaviour rather than a potential crash,
bring the C shift within the letter of the law.

Signed-off-by: Richard Henderson <rth@twiddle.net>
---
 tci.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
diff mbox

Patch

diff --git a/tci.c b/tci.c
index 0202ed9..6523ab8 100644
--- a/tci.c
+++ b/tci.c
@@ -669,32 +669,32 @@  uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(&tb_ptr);
             t2 = tci_read_ri32(&tb_ptr);
-            tci_write_reg32(t0, t1 << t2);
+            tci_write_reg32(t0, t1 << (t2 & 31));
             break;
         case INDEX_op_shr_i32:
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(&tb_ptr);
             t2 = tci_read_ri32(&tb_ptr);
-            tci_write_reg32(t0, t1 >> t2);
+            tci_write_reg32(t0, t1 >> (t2 & 31));
             break;
         case INDEX_op_sar_i32:
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(&tb_ptr);
             t2 = tci_read_ri32(&tb_ptr);
-            tci_write_reg32(t0, ((int32_t)t1 >> t2));
+            tci_write_reg32(t0, ((int32_t)t1 >> (t2 & 31)));
             break;
 #if TCG_TARGET_HAS_rot_i32
         case INDEX_op_rotl_i32:
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(&tb_ptr);
             t2 = tci_read_ri32(&tb_ptr);
-            tci_write_reg32(t0, rol32(t1, t2));
+            tci_write_reg32(t0, rol32(t1, t2 & 31));
             break;
         case INDEX_op_rotr_i32:
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(&tb_ptr);
             t2 = tci_read_ri32(&tb_ptr);
-            tci_write_reg32(t0, ror32(t1, t2));
+            tci_write_reg32(t0, ror32(t1, t2 & 31));
             break;
 #endif
 #if TCG_TARGET_HAS_deposit_i32
@@ -936,32 +936,32 @@  uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
             t0 = *tb_ptr++;
             t1 = tci_read_ri64(&tb_ptr);
             t2 = tci_read_ri64(&tb_ptr);
-            tci_write_reg64(t0, t1 << t2);
+            tci_write_reg64(t0, t1 << (t2 & 63));
             break;
         case INDEX_op_shr_i64:
             t0 = *tb_ptr++;
             t1 = tci_read_ri64(&tb_ptr);
             t2 = tci_read_ri64(&tb_ptr);
-            tci_write_reg64(t0, t1 >> t2);
+            tci_write_reg64(t0, t1 >> (t2 & 63));
             break;
         case INDEX_op_sar_i64:
             t0 = *tb_ptr++;
             t1 = tci_read_ri64(&tb_ptr);
             t2 = tci_read_ri64(&tb_ptr);
-            tci_write_reg64(t0, ((int64_t)t1 >> t2));
+            tci_write_reg64(t0, ((int64_t)t1 >> (t2 & 63)));
             break;
 #if TCG_TARGET_HAS_rot_i64
         case INDEX_op_rotl_i64:
             t0 = *tb_ptr++;
             t1 = tci_read_ri64(&tb_ptr);
             t2 = tci_read_ri64(&tb_ptr);
-            tci_write_reg64(t0, rol64(t1, t2));
+            tci_write_reg64(t0, rol64(t1, t2 & 63));
             break;
         case INDEX_op_rotr_i64:
             t0 = *tb_ptr++;
             t1 = tci_read_ri64(&tb_ptr);
             t2 = tci_read_ri64(&tb_ptr);
-            tci_write_reg64(t0, ror64(t1, t2));
+            tci_write_reg64(t0, ror64(t1, t2 & 63));
             break;
 #endif
 #if TCG_TARGET_HAS_deposit_i64