@@ -2,6 +2,7 @@
* Tiny Code Generator for QEMU
*
* Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -22,84 +23,1234 @@
* THE SOFTWARE.
*/
+/* #define DEBUG_S390_TCG */
+
+#ifdef DEBUG_S390_TCG
+#define dprintf(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define dprintf(fmt, ...) \
+ do { } while (0)
+#endif
+
static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ /* XXX many insns can't be used with R0, so we better avoid it for now */
+ /* TCG_REG_R0 */
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
};
static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
};
static const int tcg_target_call_oarg_regs[] = {
+ TCG_REG_R2,
+ TCG_REG_R3,
+};
+
+/* signed/unsigned is handled by using COMPARE and COMPARE LOGICAL,
+ respectively */
+static const uint8_t tcg_cond_to_s390_cond[10] = {
+ [TCG_COND_EQ] = 8,
+ [TCG_COND_LT] = 4,
+ [TCG_COND_LTU] = 4,
+ [TCG_COND_LE] = 8 | 4,
+ [TCG_COND_LEU] = 8 | 4,
+ [TCG_COND_GT] = 2,
+ [TCG_COND_GTU] = 2,
+ [TCG_COND_GE] = 8 | 2,
+ [TCG_COND_GEU] = 8 | 2,
+ [TCG_COND_NE] = 4 | 2 | 1,
+};
+
+#ifdef CONFIG_SOFTMMU
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
};
+#endif
+
+static uint8_t *tb_ret_addr;
static void patch_reloc(uint8_t *code_ptr, int type,
- tcg_target_long value, tcg_target_long addend)
+ tcg_target_long value, tcg_target_long addend)
{
- tcg_abort();
+ uint32_t *code_ptr_32 = (uint32_t*)code_ptr;
+ tcg_target_long code_ptr_tlong = (tcg_target_long)code_ptr;
+
+ switch (type) {
+ case R_390_PC32DBL:
+ *code_ptr_32 = (value - (code_ptr_tlong + addend)) >> 1;
+ break;
+ default:
+ tcg_abort();
+ break;
+ }
}
-static inline int tcg_target_get_call_iarg_regs_count(int flags)
+static int tcg_target_get_call_iarg_regs_count(int flags)
{
- tcg_abort();
- return 0;
+ return sizeof(tcg_target_call_iarg_regs) / sizeof(int);
+}
+
+static void constraint_softmmu(TCGArgConstraint *ct, const char c)
+{
+#ifdef CONFIG_SOFTMMU
+ switch (c) {
+ case 'S': /* qemu_st constraint */
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
+ /* fall through */
+ case 'L': /* qemu_ld constraint */
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
+ break;
+ }
+#endif
}
/* parse target specific constraints */
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
{
- tcg_abort();
+ const char *ct_str;
+
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffff);
+ ct_str = *pct_str;
+
+ switch (ct_str[0]) {
+ case 'L': /* qemu_ld constraint */
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
+ constraint_softmmu(ct, 'L');
+ break;
+ case 'S': /* qemu_st constraint */
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
+ constraint_softmmu(ct, 'S');
+ break;
+ case 'R': /* not R0 */
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ break;
+ case 'I':
+ ct->ct &= ~TCG_CT_REG;
+ ct->ct |= TCG_CT_CONST_S16;
+ break;
+ default:
+ break;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+
return 0;
}
/* Test if a constant matches the constraint. */
-static inline int tcg_target_const_match(tcg_target_long val,
- const TCGArgConstraint *arg_ct)
+static int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
{
- tcg_abort();
+ int ct = arg_ct->ct;
+
+ if ((ct & TCG_CT_CONST) ||
+ ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) ||
+ ((ct & TCG_CT_CONST_U12) && val == (val & 0xfff))) {
+ return 1;
+ }
+
return 0;
}
+/* emit load/store (and then some) instructions (E3 prefix) */
+static void tcg_out_e3(TCGContext* s, int op, int r1, int r2, int disp)
+{
+ tcg_out16(s, 0xe300 | (r1 << 4));
+ tcg_out32(s, op | (r2 << 28) | ((disp & 0xfff) << 16) | ((disp >> 12) << 8));
+}
+
+/* emit 64-bit register/register insns (B9 prefix) */
+static void tcg_out_b9(TCGContext* s, int op, int r1, int r2)
+{
+ tcg_out32(s, 0xb9000000 | (op << 16) | (r1 << 4) | r2);
+}
+
+/* emit (mostly) 32-bit register/register insns */
+static void tcg_out_rr(TCGContext* s, int op, int r1, int r2)
+{
+ tcg_out16(s, (op << 8) | (r1 << 4) | r2);
+}
+
+static void tcg_out_a7(TCGContext *s, int op, int r1, int16_t i2)
+{
+ tcg_out32(s, 0xa7000000UL | (r1 << 20) | (op << 16) | ((uint16_t)i2));
+}
+
+/* emit 64-bit shifts (EB prefix) */
+static void tcg_out_sh64(TCGContext* s, int op, int r0, int r1, int r2, int imm)
+{
+ tcg_out16(s, 0xeb00 | (r0 << 4) | r1);
+ tcg_out32(s, op | (r2 << 28) | ((imm & 0xfff) << 16) | ((imm >> 12) << 8));
+}
+
+/* emit 32-bit shifts */
+static void tcg_out_sh32(TCGContext* s, int op, int r0, int r1, int imm)
+{
+ tcg_out32(s, 0x80000000 | (op << 24) | (r0 << 20) | (r1 << 12) | imm);
+}
+
+/* branch to relative address (long) */
+static void tcg_out_brasl(TCGContext* s, int r, tcg_target_long raddr)
+{
+ tcg_out16(s, 0xc005 | (r << 4));
+ tcg_out32(s, raddr >> 1);
+}
+
+/* store 8/16/32 bits */
+static void tcg_out_store(TCGContext* s, int op, int r0, int r1, int off)
+{
+ tcg_out32(s, (op << 24) | (r0 << 20) | (r1 << 12) | off);
+}
+
/* load a register with an immediate value */
-static inline void tcg_out_movi(TCGContext *s, TCGType type,
- int ret, tcg_target_long arg)
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
{
- tcg_abort();
+ if (arg >= -0x8000 && arg < 0x8000) { /* signed immediate load */
+ /* lghi %rret, arg */
+ tcg_out32(s, S390_INS_LGHI | (ret << 20) | (arg & 0xffff));
+ } else if (!(arg & 0xffffffffffff0000UL)) {
+ /* llill %rret, arg */
+ tcg_out32(s, S390_INS_LLILL | (ret << 20) | arg);
+ } else if (!(arg & 0xffffffff00000000UL) || type == TCG_TYPE_I32) {
+ /* llill %rret, arg */
+ tcg_out32(s, S390_INS_LLILL | (ret << 20) | (arg & 0xffff));
+ /* iilh %rret, arg */
+ tcg_out32(s, S390_INS_IILH | (ret << 20) | ((arg & 0xffffffff) >> 16));
+ } else {
+ /* branch over constant and store its address in R13 */
+ tcg_out_brasl(s, TCG_REG_R13, 14);
+ /* 64-bit constant */
+ tcg_out32(s,arg >> 32);
+ tcg_out32(s,arg);
+ /* load constant to ret */
+ tcg_out_e3(s, E3_LG, ret, TCG_REG_R13, 0);
+ }
}
/* load data without address translation or endianness conversion */
-static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
- int arg1, tcg_target_long arg2)
+static void tcg_out_ld(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
{
- tcg_abort();
+ int op;
+
+ dprintf("tcg_out_ld type %d arg %d arg1 %d arg2 %ld\n",
+ type, arg, arg1, arg2);
+
+ op = (type == TCG_TYPE_I32) ? E3_LLGF : E3_LG;
+
+ if (arg2 < -0x80000 || arg2 > 0x7ffff) {
+ /* load the displacement */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, arg2);
+ /* add the address */
+ tcg_out_b9(s, B9_AGR, TCG_REG_R13, arg1);
+ /* load the data */
+ tcg_out_e3(s, op, arg, TCG_REG_R13, 0);
+ } else {
+ /* load the data */
+ tcg_out_e3(s, op, arg, arg1, arg2);
+ }
}
-static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
- int arg1, tcg_target_long arg2)
+#if defined(CONFIG_SOFTMMU)
+static void tcg_prepare_qemu_ldst(TCGContext* s, int data_reg, int addr_reg,
+ int mem_index, int opc,
+ uint16_t **label2_ptr_p, int is_store)
{
- tcg_abort();
+ int arg0 = TCG_REG_R2;
+ int arg1 = TCG_REG_R3;
+ int arg2 = TCG_REG_R4;
+ int s_bits;
+ uint16_t *label1_ptr;
+
+ if (is_store) {
+ s_bits = opc;
+ } else {
+ s_bits = opc & 3;
+ }
+
+#if TARGET_LONG_BITS == 32
+ tcg_out_b9(s, B9_LLGFR, arg1, addr_reg);
+ tcg_out_b9(s, B9_LLGFR, arg0, addr_reg);
+#else
+ tcg_out_b9(s, B9_LGR, arg1, addr_reg);
+ tcg_out_b9(s, B9_LGR, arg0, addr_reg);
+#endif
+
+ tcg_out_sh64(s, SH64_SRLG, arg1, addr_reg, SH64_REG_NONE,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+ tcg_out_b9(s, B9_NGR, arg0, TCG_REG_R13);
+
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+ tcg_out_b9(s, B9_NGR, arg1, TCG_REG_R13);
+
+ if (is_store) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_write));
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_read));
+ }
+ tcg_out_b9(s, B9_AGR, arg1, TCG_REG_R13);
+
+ tcg_out_b9(s, B9_AGR, arg1, TCG_AREG0);
+
+ tcg_out_e3(s, E3_CG, arg0, arg1, 0);
+
+ label1_ptr = (uint16_t*)s->code_ptr;
+
+ /* je label1 (offset will be patched in later) */
+ tcg_out32(s, 0xa7840000);
+
+ /* call load/store helper */
+#if TARGET_LONG_BITS == 32
+ tcg_out_b9(s, B9_LLGFR, arg0, addr_reg);
+#else
+ tcg_out_b9(s, B9_LGR, arg0, addr_reg);
+#endif
+
+ if (is_store) {
+ tcg_out_b9(s, B9_LGR, arg1, data_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ (tcg_target_ulong)qemu_st_helpers[s_bits]);
+ tcg_out_rr(s, RR_BASR, TCG_REG_R14, TCG_REG_R13);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ (tcg_target_ulong)qemu_ld_helpers[s_bits]);
+ tcg_out_rr(s, RR_BASR, TCG_REG_R14, TCG_REG_R13);
+
+ /* sign extension */
+ switch (opc) {
+ case LD_INT8:
+ tcg_out_sh64(s, SH64_SLLG, data_reg, arg0, SH64_REG_NONE, 56);
+ tcg_out_sh64(s, SH64_SRAG, data_reg, data_reg, SH64_REG_NONE, 56);
+ break;
+ case LD_INT16:
+ tcg_out_sh64(s, SH64_SLLG, data_reg, arg0, SH64_REG_NONE, 48);
+ tcg_out_sh64(s, SH64_SRAG, data_reg, data_reg, SH64_REG_NONE, 48);
+ break;
+ case LD_INT32:
+ tcg_out_b9(s, B9_LGFR, data_reg, arg0);
+ break;
+ default:
+ /* unsigned -> just copy */
+ tcg_out_b9(s, B9_LGR, data_reg, arg0);
+ break;
+ }
+ }
+
+ /* jump to label2 (end) */
+ *label2_ptr_p = (uint16_t*)s->code_ptr;
+
+ /* bras %r13, label2 */
+ tcg_out32(s, 0xa7d50000);
+
+ /* this is label1, patch branch */
+ *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
+ (unsigned long)label1_ptr) >> 1;
+
+ if (is_store) {
+ tcg_out_e3(s, E3_LG, arg1, arg1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_write));
+ } else {
+ tcg_out_e3(s, E3_LG, arg1, arg1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_read));
+ }
+
+#if TARGET_LONG_BITS == 32
+ /* zero upper 32 bits */
+ tcg_out_b9(s, B9_LLGFR, arg0, addr_reg);
+#else
+ /* just copy */
+ tcg_out_b9(s, B9_LGR, arg0, addr_reg);
+#endif
+ tcg_out_b9(s, B9_AGR, arg0, arg1);
}
-static inline void tcg_out_op(TCGContext *s, int opc,
- const TCGArg *args, const int *const_args)
+static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
{
- tcg_abort();
+ /* patch branch */
+ *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
+ (unsigned long)label2_ptr) >> 1;
+}
+
+#else /* CONFIG_SOFTMMU */
+
+static void tcg_prepare_qemu_ldst(TCGContext* s, int data_reg, int addr_reg,
+ int mem_index, int opc,
+ uint16_t **label2_ptr_p, int is_store)
+{
+ /* user mode, no address translation required */
+ *arg0 = addr_reg;
+}
+
+static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
+{
+}
+
+#endif /* CONFIG_SOFTMMU */
+
+/* load data with address translation (if applicable)
+ and endianness conversion */
+static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
+{
+ int addr_reg, data_reg, mem_index, s_bits;
+ int arg0 = TCG_REG_R2;
+ uint16_t *label2_ptr;
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+
+ s_bits = opc & 3;
+
+ dprintf("tcg_out_qemu_ld opc %d data_reg %d addr_reg %d mem_index %d "
+ "s_bits %d\n", opc, data_reg, addr_reg, mem_index, s_bits);
+
+ tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
+ opc, &label2_ptr, 0);
+
+ switch (opc) {
+ case LD_UINT8:
+ tcg_out_e3(s, E3_LLGC, data_reg, arg0, 0);
+ break;
+ case LD_INT8:
+ tcg_out_e3(s, E3_LGB, data_reg, arg0, 0);
+ break;
+ case LD_UINT16:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_e3(s, E3_LLGH, data_reg, arg0, 0);
+#else
+ /* swapped unsigned halfword load with upper bits zeroed */
+ tcg_out_e3(s, E3_LRVH, data_reg, arg0, 0);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, 0xffffL);
+ tcg_out_b9(s, B9_NGR, data_reg, 13);
+#endif
+ break;
+ case LD_INT16:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_e3(s, E3_LGH, data_reg, arg0, 0);
+#else
+ /* swapped sign-extended halfword load */
+ tcg_out_e3(s, E3_LRVH, data_reg, arg0, 0);
+ tcg_out_sh64(s, SH64_SLLG, data_reg, data_reg, SH64_REG_NONE, 48);
+ tcg_out_sh64(s, SH64_SRAG, data_reg, data_reg, SH64_REG_NONE, 48);
+#endif
+ break;
+ case LD_UINT32:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_e3(s, E3_LLGF, data_reg, arg0, 0);
+#else
+ /* swapped unsigned int load with upper bits zeroed */
+ tcg_out_e3(s, E3_LRV, data_reg, arg0, 0);
+ tcg_out_b9(s, B9_LLGFR, data_reg, data_reg);
+#endif
+ break;
+ case LD_INT32:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_e3(s, E3_LGF, data_reg, arg0, 0);
+#else
+ /* swapped sign-extended int load */
+ tcg_out_e3(s, E3_LRV, data_reg, arg0, 0);
+ tcg_out_b9(s, B9_LGFR, data_reg, data_reg);
+#endif
+ break;
+ case LD_UINT64:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_e3(s, E3_LG, data_reg, arg0, 0);
+#else
+ tcg_out_e3(s, E3_LRVG, data_reg, arg0, 0);
+#endif
+ break;
+ default:
+ tcg_abort();
+ }
+
+ tcg_finish_qemu_ldst(s, label2_ptr);
+}
+
+static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
+{
+ int addr_reg, data_reg, mem_index, s_bits;
+ uint16_t *label2_ptr;
+ int arg0 = TCG_REG_R2;
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+
+ s_bits = opc;
+
+ dprintf("tcg_out_qemu_st opc %d data_reg %d addr_reg %d mem_index %d "
+ "s_bits %d\n", opc, data_reg, addr_reg, mem_index, s_bits);
+
+ tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
+ opc, &label2_ptr, 1);
+
+ switch (opc) {
+ case LD_UINT8:
+ tcg_out_store(s, ST_STC, data_reg, arg0, 0);
+ break;
+ case LD_UINT16:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_store(s, ST_STH, data_reg, arg0, 0);
+#else
+ tcg_out_e3(s, E3_STRVH, data_reg, arg0, 0);
+#endif
+ break;
+ case LD_UINT32:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_store(s, ST_ST, data_reg, arg0, 0);
+#else
+ tcg_out_e3(s, E3_STRV, data_reg, arg0, 0);
+#endif
+ break;
+ case LD_UINT64:
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_out_e3(s, E3_STG, data_reg, arg0, 0);
+#else
+ tcg_out_e3(s, E3_STRVG, data_reg, arg0, 0);
+#endif
+ break;
+ default:
+ tcg_abort();
+ }
+
+ tcg_finish_qemu_ldst(s, label2_ptr);
+}
+
+static void tcg_out_st(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ dprintf("tcg_out_st arg 0x%x arg1 0x%x arg2 0x%lx\n", arg, arg1, arg2);
+
+ if (type == TCG_TYPE_I32) {
+ if (((long)arg2) < -0x800 || ((long)arg2) > 0x7ff) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, arg2);
+ tcg_out_b9(s, B9_AGR, 13, arg1);
+ tcg_out_store(s, ST_ST, arg, TCG_REG_R13, 0);
+ } else {
+ tcg_out_store(s, ST_ST, arg, arg1, arg2);
+ }
+ }
+ else {
+ if (((long)arg2) < -0x80000 || ((long)arg2) > 0x7ffff) {
+ tcg_abort();
+ }
+ tcg_out_e3(s, E3_STG, arg, arg1, arg2);
+ }
}
+static void tcg_out_op(TCGContext *s, int opc,
+ const TCGArg *args, const int *const_args)
+{
+ TCGLabel* l;
+ int op;
+ int op2;
+
+ dprintf("0x%x\n", INDEX_op_divu_i32);
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ dprintf("op 0x%x exit_tb 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* return value */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, (unsigned long)tb_ret_addr);
+ /* br %r13 */
+ tcg_out16(s, S390_INS_BR | TCG_REG_R13);
+ break;
+
+ case INDEX_op_goto_tb:
+ dprintf("op 0x%x goto_tb 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (s->tb_jmp_offset) {
+ tcg_abort();
+ } else {
+ tcg_target_long off = ((tcg_target_long)(s->tb_next + args[0]) -
+ (tcg_target_long)s->code_ptr) >> 1;
+ if (off > -0x80000000L && off < 0x7fffffffL) {
+ /* load address relative to PC */
+ /* larl %r13, off */
+ tcg_out16(s, S390_INS_LARL | (TCG_REG_R13 << 4));
+ tcg_out32(s, off);
+ } else {
+ /* too far for larl */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+ (tcg_target_long)(s->tb_next + args[0]));
+ }
+ /* load address stored at s->tb_next + args[0] */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R13, TCG_REG_R13, 0);
+ /* and go there */
+ tcg_out_rr(s, RR_BASR, TCG_REG_R13, TCG_REG_R13);
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+
+ case INDEX_op_call:
+ dprintf("op 0x%x call 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (const_args[0]) {
+ tcg_target_long off;
+
+ /* FIXME: + 4? Where did that come from? */
+ off = (args[0] - (tcg_target_long)s->code_ptr + 4) >> 1;
+ if (off > -0x80000000 && off < 0x7fffffff) {
+ /* relative call */
+ tcg_out_brasl(s, TCG_REG_R14, off << 1);
+ /* XXX untested */
+ tcg_abort();
+ } else {
+ /* too far for a relative call, load full address */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, args[0]);
+ tcg_out_rr(s, RR_BASR, TCG_REG_R14, TCG_REG_R13);
+ }
+ } else {
+ /* call function in register args[0] */
+ tcg_out_rr(s, RR_BASR, TCG_REG_R14, args[0]);
+ }
+ break;
+
+ case INDEX_op_jmp:
+ dprintf("op 0x%x jmp 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* XXX */
+ tcg_abort();
+ break;
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ dprintf("op 0x%x ld8u_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if ((long)args[2] > -0x80000 && (long)args[2] < 0x7ffff) {
+ tcg_out_e3(s, E3_LLGC, args[0], args[1], args[2]);
+ } else {
+ /* XXX displacement too large, have to calculate address manually */
+ tcg_abort();
+ }
+ break;
+
+ case INDEX_op_ld8s_i32:
+ dprintf("op 0x%x ld8s_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* XXX */
+ tcg_abort();
+ break;
+
+ case INDEX_op_ld16u_i32:
+ dprintf("op 0x%x ld16u_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if ((long)args[2] > -0x80000 && (long)args[2] < 0x7ffff) {
+ tcg_out_e3(s, E3_LLGH, args[0], args[1], args[2]);
+ } else {
+ /* XXX displacement too large, have to calculate address manually */
+ tcg_abort();
+ }
+ break;
+
+ case INDEX_op_ld16s_i32:
+ dprintf("op 0x%x ld16s_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* XXX */
+ tcg_abort();
+ break;
+
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32u_i64:
+ tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_ld32s_i64:
+ if (args[2] < -0x80000 || args[2] > 0x7ffff) {
+ /* load the displacement */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, args[2]);
+ /* add the address */
+ tcg_out_b9(s, B9_AGR, TCG_REG_R13, args[1]);
+ /* load the data (sign-extended) */
+ tcg_out_e3(s, E3_LGF, args[0], TCG_REG_R13, 0);
+ } else {
+ /* load the data (sign-extended) */
+ tcg_out_e3(s, E3_LGF, args[0], args[1], args[2]);
+ }
+ break;
+
+ case INDEX_op_ld_i64:
+ tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ dprintf("op 0x%x st8_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (((long)args[2]) >= -0x800 && ((long)args[2]) < 0x800) {
+ tcg_out_store(s, ST_STC, args[0], args[1], args[2]);
+ } else if (((long)args[2]) >= -0x80000 && ((long)args[2]) < 0x80000) {
+ /* FIXME: requires long displacement facility */
+ tcg_out_e3(s, E3_STCY, args[0], args[1], args[2]);
+ tcg_abort();
+ } else {
+ tcg_abort();
+ }
+ break;
+
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ dprintf("op 0x%x st16_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (((long)args[2]) >= -0x800 && ((long)args[2]) < 0x800) {
+ tcg_out_store(s, ST_STH, args[0], args[1], args[2]);
+ } else if (((long)args[2]) >= -0x80000 && ((long)args[2]) < 0x80000) {
+ /* FIXME: requires long displacement facility */
+ tcg_out_e3(s, E3_STHY, args[0], args[1], args[2]);
+ tcg_abort();
+ } else {
+ tcg_abort();
+ }
+ break;
+
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_st_i64:
+ tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_mov_i32:
+ dprintf("op 0x%x mov_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* XXX */
+ tcg_abort();
+ break;
+
+ case INDEX_op_movi_i32:
+ dprintf("op 0x%x movi_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* XXX */
+ tcg_abort();
+ break;
+
+ case INDEX_op_add_i32:
+ if (const_args[2]) {
+ if (args[0] == args[1]) {
+ tcg_out_a7(s, A7_AHI, args[1], args[2]);
+ } else {
+ tcg_out_rr(s, RR_LR, args[0], args[1]);
+ tcg_out_a7(s, A7_AHI, args[0], args[2]);
+ }
+ } else if (args[0] == args[1]) {
+ tcg_out_rr(s, RR_AR, args[1], args[2]);
+ } else if (args[0] == args[2]) {
+ tcg_out_rr(s, RR_AR, args[0], args[1]);
+ } else {
+ tcg_out_rr(s, RR_LR, args[0], args[1]);
+ tcg_out_rr(s, RR_AR, args[0], args[2]);
+ }
+ break;
+
+ case INDEX_op_sub_i32:
+ dprintf("op 0x%x sub_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (args[0] == args[1]) {
+ /* sr %ra0/1, %ra2 */
+ tcg_out_rr(s, RR_SR, args[1], args[2]);
+ } else if (args[0] == args[2]) {
+ /* lr %r13, %raa0/2 */
+ tcg_out_rr(s, RR_LR, TCG_REG_R13, args[2]);
+ /* lr %ra0/2, %ra1 */
+ tcg_out_rr(s, RR_LR, args[0], args[1]);
+ /* sr %ra0/2, %r13 */
+ tcg_out_rr(s, RR_SR, args[0], TCG_REG_R13);
+ } else {
+ /* lr %ra0, %ra1 */
+ tcg_out_rr(s, RR_LR, args[0], args[1]);
+ /* sr %ra0, %ra2 */
+ tcg_out_rr(s, RR_SR, args[0], args[2]);
+ }
+ break;
+
+ case INDEX_op_sub_i64:
+ dprintf("op 0x%x sub_i64 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (args[0] == args[1]) {
+ /* sgr %ra0/1, %ra2 */
+ tcg_out_b9(s, B9_SGR, args[1], args[2]);
+ } else if (args[0] == args[2]) {
+ /* lgr %r13, %raa0/2 */
+ tcg_out_b9(s, B9_LGR, TCG_REG_R13, args[2]);
+ /* lgr %ra0/2, %ra1 */
+ tcg_out_b9(s, B9_LGR, args[0], args[1]);
+ /* sgr %ra0/2, %r13 */
+ tcg_out_b9(s, B9_SGR, args[0], TCG_REG_R13);
+ } else {
+ /* lgr %ra0, %ra1 */
+ tcg_out_b9(s, B9_LGR, args[0], args[1]);
+ /* sgr %ra0, %ra2 */
+ tcg_out_b9(s, B9_SGR, args[0], args[2]);
+ }
+ break;
+
+ case INDEX_op_add_i64:
+ dprintf("op 0x%x add_i64 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (args[0] == args[1]) {
+ tcg_out_b9(s, B9_AGR, args[1], args[2]);
+ } else if (args[0] == args[2]) {
+ tcg_out_b9(s, B9_AGR, args[0], args[1]);
+ } else {
+ tcg_out_b9(s, B9_LGR, args[0], args[1]);
+ tcg_out_b9(s, B9_AGR, args[0], args[2]);
+ }
+ break;
+
+ case INDEX_op_and_i32:
+ op = RR_NR;
+do_logic_i32:
+ if (args[0] == args[1]) {
+ /* xr %ra0/1, %ra2 */
+ tcg_out_rr(s, op, args[1], args[2]);
+ } else if (args[0] == args[2]) {
+ /* xr %ra0/2, %ra1 */
+ tcg_out_rr(s, op, args[0], args[1]);
+ } else {
+ /* lr %ra0, %ra1 */
+ tcg_out_rr(s, RR_LR, args[0], args[1]);
+ /* xr %ra0, %ra2 */
+ tcg_out_rr(s, op, args[0], args[2]);
+ }
+ break;
+
+ case INDEX_op_or_i32:
+ op = RR_OR;
+ goto do_logic_i32;
+
+ case INDEX_op_xor_i32:
+ op = RR_XR;
+ goto do_logic_i32;
+
+ case INDEX_op_and_i64:
+ dprintf("op 0x%x and_i64 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ op = B9_NGR;
+do_logic_i64:
+ if (args[0] == args[1]) {
+ tcg_out_b9(s, op, args[0], args[2]);
+ } else if (args[0] == args[2]) {
+ tcg_out_b9(s, op, args[0], args[1]);
+ } else {
+ tcg_out_b9(s, B9_LGR, args[0], args[1]);
+ tcg_out_b9(s, op, args[0], args[2]);
+ }
+ break;
+
+ case INDEX_op_or_i64:
+ op = B9_OGR;
+ goto do_logic_i64;
+
+ case INDEX_op_xor_i64:
+ op = B9_XGR;
+ goto do_logic_i64;
+
+ case INDEX_op_neg_i32:
+ dprintf("op 0x%x neg_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* FIXME: optimize args[0] != args[1] case */
+ tcg_out_rr(s, RR_LR, 13, args[1]);
+ /* lghi %ra0, 0 */
+ tcg_out32(s, S390_INS_LGHI | (args[0] << 20));
+ tcg_out_rr(s, RR_SR, args[0], 13);
+ break;
+
+ case INDEX_op_neg_i64:
+ dprintf("op 0x%x neg_i64 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ /* FIXME: optimize args[0] != args[1] case */
+ tcg_out_b9(s, B9_LGR, 13, args[1]);
+ /* lghi %ra0, 0 */
+ tcg_out32(s, S390_INS_LGHI | (args[0] << 20));
+ tcg_out_b9(s, B9_SGR, args[0], 13);
+ break;
+
+ case INDEX_op_mul_i32:
+ dprintf("op 0x%x mul_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (args[0] == args[1])
+ /* msr %ra0/1, %ra2 */
+ tcg_out32(s, S390_INS_MSR | (args[0] << 4) | args[2]);
+ else if (args[0] == args[2])
+ /* msr %ra0/2, %ra1 */
+ tcg_out32(s, S390_INS_MSR | (args[0] << 4) | args[1]);
+ else {
+ tcg_out_rr(s, RR_LR, args[0], args[1]);
+ /* msr %ra0, %ra2 */
+ tcg_out32(s, S390_INS_MSR | (args[0] << 4) | args[2]);
+ }
+ break;
+
+ case INDEX_op_mul_i64:
+ dprintf("op 0x%x mul_i64 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ if (args[0] == args[1]) {
+ tcg_out_b9(s, B9_MSGR, args[0], args[2]);
+ } else if (args[0] == args[2]) {
+ tcg_out_b9(s, B9_MSGR, args[0], args[1]);
+ } else {
+ /* XXX */
+ tcg_abort();
+ }
+ break;
+
+ case INDEX_op_divu_i32:
+ case INDEX_op_remu_i32:
+ dprintf("op 0x%x div/remu_i32 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R12, 0);
+ tcg_out_rr(s, RR_LR, TCG_REG_R13, args[1]);
+ tcg_out_b9(s, B9_DLR, TCG_REG_R12, args[2]);
+ if (opc == INDEX_op_divu_i32) {
+ tcg_out_rr(s, RR_LR, args[0], TCG_REG_R13); /* quotient */
+ } else {
+ tcg_out_rr(s, RR_LR, args[0], TCG_REG_R12); /* remainder */
+ }
+ break;
+
+ case INDEX_op_shl_i32:
+ op = SH32_SLL;
+ op2 = SH64_SLLG;
+ do_shift32:
+ if (const_args[2]) {
+ if (args[0] == args[1]) {
+ tcg_out_sh32(s, op, args[0], SH32_REG_NONE, args[2]);
+ } else {
+ tcg_out_rr(s, RR_LR, args[0], args[1]);
+ tcg_out_sh32(s, op, args[0], SH32_REG_NONE, args[2]);
+ }
+ } else {
+ if (args[0] == args[1]) {
+ tcg_out_sh32(s, op, args[0], args[2], 0);
+ } else {
+ tcg_out_sh64(s, op2, args[0], args[1], args[2], 0);
+ }
+ }
+ break;
+
+ case INDEX_op_shr_i32:
+ op = SH32_SRL;
+ op2 = SH64_SRLG;
+ goto do_shift32;
+
+ case INDEX_op_sar_i32:
+ op = SH32_SRA;
+ op2 = SH64_SRAG;
+ goto do_shift32;
+
+ case INDEX_op_shl_i64:
+ op = SH64_SLLG;
+ do_shift64:
+ if (const_args[2]) {
+ tcg_out_sh64(s, op, args[0], args[1], SH64_REG_NONE, args[2]);
+ } else {
+ tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
+ }
+ break;
+
+ case INDEX_op_shr_i64:
+ op = SH64_SRLG;
+ goto do_shift64;
+
+ case INDEX_op_sar_i64:
+ op = SH64_SRAG;
+ goto do_shift64;
+
+ case INDEX_op_br:
+ dprintf("op 0x%x br 0x%lx 0x%lx 0x%lx\n",
+ opc, args[0], args[1], args[2]);
+ l = &s->labels[args[0]];
+ if (l->has_value) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, l->u.value);
+ } else {
+ /* larl %r13, ... */
+ tcg_out16(s, S390_INS_LARL | (TCG_REG_R13 << 4));
+ tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, args[0], -2);
+ s->code_ptr += 4;
+ }
+ tcg_out_rr(s, RR_BASR, TCG_REG_R13, TCG_REG_R13);
+ break;
+
+ case INDEX_op_brcond_i64:
+ dprintf("op 0x%x brcond_i64 0x%lx 0x%lx (c %d) 0x%lx\n",
+ opc, args[0], args[1], const_args[1], args[2]);
+ if (args[2] > TCG_COND_GT) {
+ /* unsigned */
+ /* clgr %ra0, %ra1 */
+ tcg_out_b9(s, B9_CLGR, args[0], args[1]);
+ } else {
+ /* signed */
+ /* cgr %ra0, %ra1 */
+ tcg_out_b9(s, B9_CGR, args[0], args[1]);
+ }
+ goto do_brcond;
+
+ case INDEX_op_brcond_i32:
+ dprintf("op 0x%x brcond_i32 0x%lx 0x%lx (c %d) 0x%lx\n",
+ opc, args[0], args[1], const_args[1], args[2]);
+ if (args[2] > TCG_COND_GT) {
+ /* unsigned */
+ /* clr %ra0, %ra1 */
+ tcg_out_rr(s, RR_CLR, args[0], args[1]);
+ } else {
+ /* signed */
+ /* cr %ra0, %ra1 */
+ tcg_out_rr(s, RR_CR, args[0], args[1]);
+ }
+ do_brcond:
+ l = &s->labels[args[3]];
+ if (l->has_value) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, l->u.value);
+ } else {
+ /* larl %r13, ... */
+ tcg_out16(s, S390_INS_LARL | (TCG_REG_R13 << 4));
+ tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, args[3], -2);
+ s->code_ptr += 4;
+ }
+ /* bcr cond, %r13 */
+ tcg_out16(s, S390_INS_BCR | TCG_REG_R13 |
+ (tcg_cond_to_s390_cond[args[2]] << 4));
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, LD_UINT8);
+ break;
+
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, LD_INT8);
+ break;
+
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, LD_UINT16);
+ break;
+
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, LD_INT16);
+ break;
+
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, LD_UINT32);
+ break;
+
+ case INDEX_op_qemu_ld32s:
+ tcg_out_qemu_ld(s, args, LD_INT32);
+ break;
+
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, args, LD_UINT64);
+ break;
+
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, LD_UINT8);
+ break;
+
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, LD_UINT16);
+ break;
+
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, LD_UINT32);
+ break;
+
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, args, LD_UINT64);
+ break;
+
+ default:
+ fprintf(stderr,"unimplemented opc 0x%x\n",opc);
+ tcg_abort();
+ }
+}
+
+ static const TCGTargetOpDef s390_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "rI" } },
+ { INDEX_op_sub_i32, { "r", "r", "r" } },
+ { INDEX_op_mul_i32, { "r", "r", "r" } },
+
+ { INDEX_op_div_i32, { "r", "r", "r" } },
+ { INDEX_op_divu_i32, { "r", "r", "r" } },
+ { INDEX_op_rem_i32, { "r", "r", "r" } },
+ { INDEX_op_remu_i32, { "r", "r", "r" } },
+
+ { INDEX_op_and_i32, { "r", "r", "r" } },
+ { INDEX_op_or_i32, { "r", "r", "r" } },
+ { INDEX_op_xor_i32, { "r", "r", "r" } },
+ { INDEX_op_neg_i32, { "r", "r" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "Ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "Ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "Ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "r" } },
+
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "S", "S" } },
+ { INDEX_op_qemu_st16, { "S", "S" } },
+ { INDEX_op_qemu_st32, { "S", "S" } },
+
+#if defined(__s390x__)
+ { INDEX_op_mov_i64, { "r", "r" } },
+ { INDEX_op_movi_i64, { "r" } },
+
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+
+ { INDEX_op_st8_i64, { "r", "r" } },
+ { INDEX_op_st16_i64, { "r", "r" } },
+ { INDEX_op_st32_i64, { "r", "r" } },
+ { INDEX_op_st_i64, { "r", "r" } },
+
+ { INDEX_op_qemu_ld64, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "S", "S" } },
+
+ { INDEX_op_add_i64, { "r", "r", "r" } },
+ { INDEX_op_mul_i64, { "r", "r", "r" } },
+ { INDEX_op_sub_i64, { "r", "r", "r" } },
+
+ { INDEX_op_and_i64, { "r", "r", "r" } },
+ { INDEX_op_or_i64, { "r", "r", "r" } },
+ { INDEX_op_xor_i64, { "r", "r", "r" } },
+ { INDEX_op_neg_i64, { "r", "r" } },
+
+ { INDEX_op_shl_i64, { "r", "r", "Ri" } },
+ { INDEX_op_shr_i64, { "r", "r", "Ri" } },
+ { INDEX_op_sar_i64, { "r", "r", "Ri" } },
+
+ { INDEX_op_brcond_i64, { "r", "r" } },
+#endif
+
+ { -1 },
+ };
+
void tcg_target_init(TCGContext *s)
{
- /* gets called with KVM */
+ /* fail safe */
+ if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) {
+ tcg_abort();
+ }
+
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_R0) |
+ (1 << TCG_REG_R1) |
+ (1 << TCG_REG_R2) |
+ (1 << TCG_REG_R3) |
+ (1 << TCG_REG_R4) |
+ (1 << TCG_REG_R5) |
+ (1 << TCG_REG_R14)); /* link register */
+
+ tcg_regset_clear(s->reserved_regs);
+ /* frequently used as a temporary */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13);
+ /* another temporary */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12);
+
+ tcg_add_target_add_op_defs(s390_op_defs);
}
void tcg_target_qemu_prologue(TCGContext *s)
{
- /* gets called with KVM */
+ /* stmg %r6,%r15,48(%r15) (save registers) */
+ tcg_out16(s, 0xeb6f);
+ tcg_out32(s, 0xf0300024);
+
+ /* aghi %r15,-160 (stack frame) */
+ tcg_out32(s, 0xa7fbff60);
+
+ /* br %r2 (go to TB) */
+ tcg_out16(s, S390_INS_BR | TCG_REG_R2);
+
+ tb_ret_addr = s->code_ptr;
+
+ /* lmg %r6,%r15,208(%r15) (restore registers) */
+ tcg_out16(s, 0xeb6f);
+ tcg_out32(s, 0xf0d00004);
+
+ /* br %r14 (return) */
+ tcg_out16(s, S390_INS_BR | TCG_REG_R14);
}
-static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+static void tcg_out_mov(TCGContext *s, int ret, int arg)
{
- tcg_abort();
+ tcg_out_b9(s, B9_LGR, ret, arg);
}
-static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
{
tcg_abort();
}
@@ -26,6 +26,10 @@
#define TCG_TARGET_REG_BITS 64
#define TCG_TARGET_WORDS_BIGENDIAN
+#define TCG_TARGET_HAS_div_i32
+#define TCG_TARGET_HAS_neg_i32
+#define TCG_TARGET_HAS_neg_i64
+
enum {
TCG_REG_R0 = 0,
TCG_REG_R1,
@@ -47,9 +51,89 @@ enum {
#define TCG_TARGET_NB_REGS 16
/* used for function call generation */
-#define TCG_REG_CALL_STACK TCG_REG_R15
-#define TCG_TARGET_STACK_ALIGN 8
-#define TCG_TARGET_CALL_STACK_OFFSET 0
+#define TCG_REG_CALL_STACK TCG_REG_R15
+#define TCG_TARGET_STACK_ALIGN 8
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+#define TCG_CT_CONST_S16 0x100
+#define TCG_CT_CONST_U12 0x200
+
+#define E3_LG 0x04
+#define E3_LRVG 0x0f
+#define E3_LGF 0x14
+#define E3_LGH 0x15
+#define E3_LLGF 0x16
+#define E3_LRV 0x1e
+#define E3_LRVH 0x1f
+#define E3_CG 0x20
+#define E3_STG 0x24
+#define E3_STRVG 0x2f
+#define E3_STRV 0x3e
+#define E3_STRVH 0x3f
+#define E3_STHY 0x70
+#define E3_STCY 0x72
+#define E3_LGB 0x77
+#define E3_LLGC 0x90
+#define E3_LLGH 0x91
+
+#define B9_LGR 0x04
+#define B9_AGR 0x08
+#define B9_SGR 0x09
+#define B9_MSGR 0x0c
+#define B9_LGFR 0x14
+#define B9_LLGFR 0x16
+#define B9_CGR 0x20
+#define B9_CLGR 0x21
+#define B9_NGR 0x80
+#define B9_OGR 0x81
+#define B9_XGR 0x82
+#define B9_DLGR 0x87
+#define B9_DLR 0x97
+
+#define RR_BASR 0x0d
+#define RR_NR 0x14
+#define RR_CLR 0x15
+#define RR_OR 0x16
+#define RR_XR 0x17
+#define RR_LR 0x18
+#define RR_CR 0x19
+#define RR_AR 0x1a
+#define RR_SR 0x1b
+
+#define A7_AHI 0xa
+#define A7_AHGI 0xb
+
+#define SH64_REG_NONE 0x00 /* use immediate only (not R0!) */
+#define SH64_SRAG 0x0a
+#define SH64_SRLG 0x0c
+#define SH64_SLLG 0x0d
+
+#define SH32_REG_NONE 0x00 /* use immediate only (not R0!) */
+#define SH32_SRL 0x08
+#define SH32_SLL 0x09
+#define SH32_SRA 0x0a
+
+#define ST_STH 0x40
+#define ST_STC 0x42
+#define ST_ST 0x50
+
+#define LD_SIGNED 0x04
+#define LD_UINT8 0x00
+#define LD_INT8 (LD_UINT8 | LD_SIGNED)
+#define LD_UINT16 0x01
+#define LD_INT16 (LD_UINT16 | LD_SIGNED)
+#define LD_UINT32 0x02
+#define LD_INT32 (LD_UINT32 | LD_SIGNED)
+#define LD_UINT64 0x03
+#define LD_INT64 (LD_UINT64 | LD_SIGNED)
+
+#define S390_INS_BCR 0x0700
+#define S390_INS_BR (S390_INS_BCR | 0x00f0)
+#define S390_INS_IILH 0xa5020000
+#define S390_INS_LLILL 0xa50f0000
+#define S390_INS_LGHI 0xa7090000
+#define S390_INS_MSR 0xb2520000
+#define S390_INS_LARL 0xc000
enum {
/* Note: must be synced with dyngen-exec.h */
We already have stubs for a TCG target on S390, but were missing code that would actually generate instructions. So I took Uli's patch, cleaned it up and present it to you again :-). I hope I found all odd coding style and unprettiness issues, but if you still spot one feel free to nag about it. Signed-off-by: Alexander Graf <agraf@suse.de> CC: Uli Hecht <uli@suse.de> --- tcg/s390/tcg-target.c | 1203 +++++++++++++++++++++++++++++++++++++++++++++++- tcg/s390/tcg-target.h | 90 ++++- 2 files changed, 1264 insertions(+), 29 deletions(-)