diff mbox series

[27/31] tcg: Add TCG_CALL_RET_BY_VEC

Message ID 20221020115242.2301066-28-richard.henderson@linaro.org
State New
Headers show
Series tcg: Support for Int128 with helpers | expand

Commit Message

Richard Henderson Oct. 20, 2022, 11:52 a.m. UTC
This will be used by _WIN64 to return i128.  Not yet used,
because allocation is not yet enabled.  Since this is for
exactly one backend, go ahead and make the few changes needed.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/tcg-internal.h        |  1 +
 tcg/tcg.c                 | 17 +++++++++++++++++
 tcg/i386/tcg-target.c.inc | 16 +++++++++++++++-
 3 files changed, 33 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
index 1fe7bd7d5d..44ef51ca30 100644
--- a/tcg/tcg-internal.h
+++ b/tcg/tcg-internal.h
@@ -38,6 +38,7 @@  typedef enum {
     TCG_CALL_RET_NORMAL,         /* by registers */
     TCG_CALL_RET_NORMAL_4,       /* for i128, by 4 registers */
     TCG_CALL_RET_BY_REF,         /* for i128, by reference as first arg */
+    TCG_CALL_RET_BY_VEC,         /* for i128, by vector register */
 } TCGCallReturnKind;
 
 typedef enum {
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 47f1f906fd..2c7eece27f 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -768,6 +768,8 @@  static void init_call_layout(TCGHelperInfo *info)
                 cum.reg_slot = 1;
             }
             break;
+        case TCG_CALL_RET_BY_VEC:
+            break;
         default:
             g_assert_not_reached();
         }
@@ -4683,6 +4685,21 @@  static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
         }
         break;
 
+    case TCG_CALL_RET_BY_VEC:
+        {
+            TCGTemp *ts = arg_temp(op->args[0]);
+
+            tcg_debug_assert(ts->type == TCG_TYPE_I128);
+            if (!ts->mem_allocated) {
+                temp_allocate_frame(s, ts);
+            }
+            tcg_out_st(s, TCG_TYPE_V128,
+                       tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
+                       ts->mem_base->reg, ts->mem_offset + i * 4);
+            ts->val_type = TEMP_VAL_MEM;
+        }
+        break;
+
     default:
         g_assert_not_reached();
     }
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 82c8491152..3b4b66c224 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -120,6 +120,13 @@  static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
             return TCG_REG_EDX;
         }
         break;
+#ifdef _WIN64
+    case TCG_CALL_RET_BY_VEC:
+        if (slot == 0) {
+            return TCG_REG_XMM0;
+        }
+        break;
+#endif
     default:
         break;
     }
@@ -1194,9 +1201,16 @@  static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
          * The gvec infrastructure is asserts that v128 vector loads
          * and stores use a 16-byte aligned offset.  Validate that the
          * final pointer is aligned by using an insn that will SIGSEGV.
+         *
+         * This specific instance is also used by TCG_CALL_RET_BY_VEC,
+         * for _WIN64, which must have SSE2 but may not have AVX.
          */
         tcg_debug_assert(arg >= 16);
-        tcg_out_vex_modrm_offset(s, OPC_MOVDQA_WxVx, arg, 0, arg1, arg2);
+        if (have_avx1) {
+            tcg_out_vex_modrm_offset(s, OPC_MOVDQA_WxVx, arg, 0, arg1, arg2);
+        } else {
+            tcg_out_modrm_offset(s, OPC_MOVDQA_WxVx, arg, arg1, arg2);
+        }
         break;
     case TCG_TYPE_V256:
         /*