@@ -134,13 +134,19 @@ elif check_define _ARCH_PPC ; then
else
cpu="ppc"
fi
+elif check_define __SH4__ ; then
+ if check_define __BIG_ENDIAN__ ; then
+ cpu="sh4eb"
+ else
+ cpu="sh4"
+ fi
else
cpu=`uname -m`
fi
target_list=""
case "$cpu" in
- alpha|cris|ia64|m68k|microblaze|mips|mips64|ppc|ppc64|sparc64)
+ alpha|cris|ia64|m68k|microblaze|mips|mips64|ppc|ppc64|sh4|sh4eb|sparc64)
cpu="$cpu"
;;
i386|i486|i586|i686|i86pc|BePC)
@@ -1878,6 +1884,9 @@ case "$cpu" in
armv4b|armv4l)
ARCH=arm
;;
+ sh4|sh4eb)
+ ARCH=sh4
+ ;;
*)
echo "Unsupported CPU = $cpu"
exit 1
@@ -1190,6 +1190,21 @@ int cpu_signal_handler(int host_signum,
&uc->uc_sigmask, puc);
}
+#elif defined(__SH4__)
+
+int cpu_signal_handler(int host_signum, void *pinfo,
+ void *puc)
+{
+ siginfo_t *info = pinfo;
+ struct ucontext *uc = puc;
+ greg_t pc = uc->uc_mcontext.pc;
+ int is_write;
+
+ /* XXX: compute is_write */
+ is_write = 0;
+ return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+ is_write, &uc->uc_sigmask, puc);
+}
#else
#error host CPU specific signal handler needed
@@ -106,6 +106,10 @@ extern int printf(const char *, ...);
#define AREG0 "r7"
#define AREG1 "r4"
#define AREG2 "r5"
+#elif defined(__SH4__)
+#define AREG0 "r11"
+#define AREG1 "r12"
+#define AREG2 "r13"
#else
#error unsupported CPU
#endif
@@ -114,7 +114,7 @@ static inline int tlb_set_page(CPUState
#define CODE_GEN_AVG_BLOCK_SIZE 64
#endif
-#if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__)
+#if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__) || defined(__SH4__)
#define USE_DIRECT_JUMP
#endif
@@ -189,6 +189,13 @@ extern int code_gen_max_blocks;
#if defined(_ARCH_PPC)
extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
#define tb_set_jmp_target1 ppc_tb_set_jmp_target
+#elif defined(__SH4__)
+static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
+{
+ /* patch the branch destination */
+ *(uint32_t *)jmp_addr = addr;
+ /* FIXME: need to handle caches */
+}
#elif defined(__i386__) || defined(__x86_64__)
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
{
@@ -0,0 +1,864 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ * Copyright (c) 2009 Magnus Damm
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#if (defined(TARGET_WORDS_BIGENDIAN) && !defined(HOST_WORDS_BIGENDIAN)) || \
+ (!defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN))
+static const int swap_endian = 1;
+#else
+static const int swap_endian = 0;
+#endif
+
+#ifndef CONFIG_USER_ONLY
+#define GUEST_BASE 0
+#endif
+
+#if TARGET_LONG_BITS != 32
+#error Only 32-bit targets supported at this point!
+#endif
+
+static uint8_t *tb_ret_addr;
+
+#ifndef NDEBUG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+};
+
+static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_R0,
+ TCG_REG_R1,
+};
+
+static const int tcg_target_callee_save_regs[] = {
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+};
+
+#define OPC_ADD 0x300c
+#define OPC_AND 0x2009
+#define OPC_MULS 0x200f
+#define OPC_NEG 0x600b
+#define OPC_NOP 0x0009
+#define OPC_OR 0x200b
+#define OPC_RTS 0x000b
+#define OPC_SHAD 0x400c
+#define OPC_SHLD 0x400d
+#define OPC_SUB 0x3008
+#define OPC_XOR 0x200a
+
+#define OPC_MN(opc, m, n) ((opc) | ((n) << 8) | ((m) << 4))
+#define OPC_MDN(opc, m, d, n) ((opc) | ((n) << 8) | ((m) << 4) | (d))
+#define OPC_N(opc, n) ((opc) | (n) << 8)
+#define OPC_NI(opc, n, i) ((opc) | ((n) << 8) | (i))
+#define OPC_D(opc, d) ((opc) | (d))
+
+#define ADD(m, n) OPC_MN(OPC_ADD, m, n) /* ADD Rm,Rn */
+#define AND(m, n) OPC_MN(OPC_AND, m, n) /* AND Rm,Rn */
+#define BF(d) OPC_D(0x8b00, d) /* BF disp [relative] */
+#define BT(d) OPC_D(0x8900, d) /* BT disp [relative] */
+#define BRA(d) OPC_D(0xa000, d) /* BRA disp [relative] */
+#define CMPEQ(m, n) OPC_MN(0x3000, m, n) /* CMP/EQ Rm,Rn */
+#define CMPGE(m, n) OPC_MN(0x3003, m, n) /* CMP/GE Rm,Rn */
+#define CMPGT(m, n) OPC_MN(0x3007, m, n) /* CMP/GT Rm,Rn */
+#define CMPHI(m, n) OPC_MN(0x3006, m, n) /* CMP/HI Rm,Rn */
+#define CMPHS(m, n) OPC_MN(0x3002, m, n) /* CMP/HS Rm,Rn */
+#define EXTSB(m, n) OPC_MN(0x600e, m, n) /* EXTS.B Rm,Rn [sign extend] */
+#define EXTSW(m, n) OPC_MN(0x600f, m, n) /* EXTS.W Rm,Rn [sign extend] */
+#define EXTUB(m, n) OPC_MN(0x600c, m, n) /* EXTU.B Rm,Rn [zero extend] */
+#define EXTUW(m, n) OPC_MN(0x600d, m, n) /* EXTU.W Rm,Rn [zero extend] */
+#define JMP(n) OPC_N(0x402b, n) /* JMP @Rn */
+#define JSR(n) OPC_N(0x400b, n) /* JSR @Rn */
+#define LDSMPR(n) OPC_N(0x4026, n) /* LDS.L @Rm+,PR */
+#define MOV(m, n) OPC_MN(0x6003, m, n) /* MOV Rm,Rn */
+#define MOVI(i, n) OPC_NI(0xe000, n, i) /* MOV #imm, Rn */
+#define MOVWI(d, n) OPC_NI(0x9000, n ,d) /* MOV.W @(disp, PC),Rn */
+#define MOVLI(d, n) OPC_NI(0xd000, n, d) /* MOV.L @(disp, PC),Rn */
+#define MOVBS(m, n) OPC_MN(0x2000, m, n) /* MOV.B Rm,@Rn */
+#define MOVWS(m, n) OPC_MN(0x2001, m, n) /* MOV.W Rm,@Rn */
+#define MOVLS(m, n) OPC_MN(0x2002, m, n) /* MOV.L Rm,@Rn */
+#define MOVBL(m, n) OPC_MN(0x6000, m, n) /* MOV.B @Rm,Rn [sign extend] */
+#define MOVWL(m, n) OPC_MN(0x6001, m, n) /* MOV.W @Rm,Rn [sign extend] */
+#define MOVLL(m, n) OPC_MN(0x6002, m, n) /* MOV.L @Rm,Rn */
+#define MOVLM(m, n) OPC_MN(0x2006, m, n) /* MOV.L Rm,@-Rn */
+#define MOVLP(m, n) OPC_MN(0x6006, m, n) /* MOV.L @Rm+,Rn */
+#define MOVLS4(m, d, n) OPC_MDN(0x1000, m, d, n) /* MOV.L Rm,@(disp,Rn) */
+#define MOVLL4(m, d, n) OPC_MDN(0x5000, m, d, n) /* MOV.L @(disp,Rm),Rn */
+#define MULS(m, n) OPC_MN(OPC_MULS, m, n) /* MULS Rm,Rn */
+#define NEG(m, n) OPC_MN(OPC_NEG, m, n) /* NEG Rm,Rn */
+#define OR(m, n) OPC_MN(OPC_OR, m, n) /* OR Rm, Rn */
+#define SHAD(m, n) OPC_MN(OPC_SHAD, m, n) /* SHAD Rm,Rn */
+#define SHLD(m, n) OPC_MN(OPC_SHLD, m, n) /* SHLD Rm,Rn */
+#define SUB(m, n) OPC_MN(OPC_SUB, m, n) /* SUB Rm,Rn */
+#define SWAPB(m, n) OPC_MN(0x6008, m, n) /* SWAPB Rm,Rn */
+#define SWAPW(m, n) OPC_MN(0x6009, m, n) /* SWAPW Rm,Rn */
+#define STS_MACL(n) OPC_N(0x001a, n) /* STS MACL,Rn */
+#define STSMPR(n) OPC_N(0x4022, n) /* STS.L PR,@-Rn */
+#define XOR(m, n) OPC_MN(OPC_XOR, m, n) /* XOR Rm,Rn */
+
+static void __mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out16(s, MOV(arg, ret));
+}
+
+static int __need_pc_align(TCGContext *s)
+{
+ unsigned long pc = (unsigned long)s->code_ptr;
+
+ if (pc & 0x01)
+ tcg_abort ();
+
+ if (pc & 0x02)
+ return 1;
+
+ return 0;
+}
+
+static uint8_t *__movi32(TCGContext *s, int ret, tcg_target_long arg,
+ unsigned int opc1, unsigned int opc2)
+{
+ uint8_t *reloc_pos;
+ int needs_align = __need_pc_align(s);
+
+ tcg_out16(s, MOVLI(1, ret));
+ if (!needs_align || opc2 == OPC_NOP) {
+ tcg_out16(s, BRA(3 - needs_align));
+ tcg_out16(s, opc1); /* delay slot */
+ if (!needs_align)
+ tcg_out16(s, MOV(0, 0)); /* Never reached */
+ } else {
+ tcg_out16(s, opc1);
+ tcg_out16(s, BRA(2));
+ tcg_out16(s, opc2); /* delay slot */
+ opc2 = OPC_NOP;
+ }
+
+ reloc_pos = s->code_ptr;
+ tcg_out32(s, arg); /* Must be 32-bit aligned */
+
+ if (opc2 != OPC_NOP)
+ tcg_out16(s, opc2);
+
+ return reloc_pos;
+}
+
+static void __movi(TCGContext *s, int ret, tcg_target_long arg,
+ unsigned int opc1, unsigned int opc2)
+{
+ do {
+ if (arg == (int8_t) arg) {
+ tcg_out16(s, MOVI(arg & 0xff, ret));
+ if (opc1 != OPC_NOP)
+ tcg_out16(s, opc1);
+ break;
+ }
+
+ if (arg == (uint8_t) arg) {
+ tcg_out16(s, MOVI(arg & 0xff, ret));
+ tcg_out16(s, EXTUB(ret, ret));
+ if (opc1 != OPC_NOP)
+ tcg_out16(s, opc1);
+ break;
+ }
+
+ if (arg == (int16_t) arg) {
+ tcg_out16(s, MOVWI(1, ret));
+ tcg_out16(s, BRA(1));
+ tcg_out16(s, opc1); /* delay slot */
+ tcg_out16(s, arg);
+ break;
+ }
+
+ __movi32(s, ret, arg, opc1, opc2);
+ opc2 = OPC_NOP;
+ } while (0);
+
+ if (opc2 != OPC_NOP)
+ tcg_out16(s, opc2);
+}
+
+static void __ld(TCGContext *s, int size, int is_signed,
+ int ret, int arg1, int offset)
+{
+ unsigned int opc = MOV(0, 0);
+ unsigned int tmp;
+
+ if (size == 32 && offset > 0 && offset <= 60 && !(offset & 3) ) {
+ tcg_out16(s, MOVLL4(arg1, (offset >> 2), ret));
+ return;
+ }
+
+ if (offset)
+ tmp = TCG_REG_R14;
+ else
+ tmp = arg1;
+
+ switch (size) {
+ case 8:
+ opc = MOVBL(tmp, ret);
+ break;
+ case 16:
+ opc = MOVWL(tmp, ret);
+ break;
+ case 32:
+ opc = MOVLL(tmp, ret);
+ break;
+ default:
+ fprintf(stderr, "unsupported __ld size\n");
+ tcg_abort();
+ }
+
+ if (offset)
+ __movi(s, TCG_REG_R14, offset, ADD(arg1, TCG_REG_R14), opc);
+ else
+ tcg_out16(s, opc);
+
+ if (!is_signed) {
+ if (size == 8)
+ tcg_out16(s, EXTUB(ret, ret));
+ if (size == 16)
+ tcg_out16(s, EXTUW(ret, ret));
+ }
+}
+
+static void __ld_swap(TCGContext *s, int size, int is_signed,
+ int ret, int arg1, int offset)
+{
+ __ld(s, size, 1, ret, arg1, offset);
+
+ if (size == 16) {
+ tcg_out16(s, SWAPB(ret, ret));
+ if (is_signed)
+ tcg_out16(s, EXTSW(ret, ret));
+ else
+ tcg_out16(s, EXTUW(ret, ret));
+ }
+ if (size == 32) {
+ tcg_out16(s, SWAPB(ret, ret));
+ tcg_out16(s, SWAPW(ret, ret));
+ tcg_out16(s, SWAPB(ret, ret));
+ }
+}
+
+static void __st(TCGContext *s, int size, int arg, int arg1, int offset)
+{
+ unsigned int opc = MOV(0, 0);
+ unsigned int tmp;
+
+ if (size == 32 && offset > 0 && !(offset & 3) && offset <= 60) {
+ tcg_out16(s, MOVLS4(arg1, (offset >> 2), arg));
+ return;
+ }
+
+ if (offset)
+ tmp = TCG_REG_R14;
+ else
+ tmp = arg;
+
+ switch (size) {
+ case 8:
+ opc = MOVBS(arg1, tmp);
+ break;
+ case 16:
+ opc = MOVWS(arg1, tmp);
+ break;
+ case 32:
+ opc = MOVLS(arg1, tmp);
+ break;
+ default:
+ fprintf(stderr, "unsupported __st size\n");
+ tcg_abort();
+ }
+
+ if (offset)
+ __movi(s, TCG_REG_R14, offset, ADD(arg, TCG_REG_R14), opc);
+ else
+ tcg_out16(s, opc);
+}
+
+static void __st_swap(TCGContext *s, int size, int arg, int arg1, int offset)
+{
+ if (offset == 0) {
+ if (size == 16) {
+ tcg_out16(s, SWAPB(arg1, TCG_REG_R14));
+ tcg_out16(s, MOVWS(TCG_REG_R14, arg));
+ }
+ if (size == 32) {
+ tcg_out16(s, SWAPB(arg1, TCG_REG_R14));
+ tcg_out16(s, SWAPW(TCG_REG_R14, TCG_REG_R14));
+ tcg_out16(s, SWAPB(TCG_REG_R14, TCG_REG_R14));
+ tcg_out16(s, MOVLS(TCG_REG_R14, arg));
+ }
+ } else {
+ if (size == 16 || size == 32)
+ tcg_out16(s, SWAPB(arg1, arg1));
+
+ if (size == 32) {
+ tcg_out16(s, SWAPW(arg1, arg1));
+ tcg_out16(s, SWAPB(arg1, arg1));
+ }
+
+ __st(s, size, arg, arg1, offset);
+
+ if (size == 32) {
+ tcg_out16(s, SWAPB(arg1, arg1));
+ tcg_out16(s, SWAPW(arg1, arg1));
+ }
+ if (size == 16 || size == 32)
+ tcg_out16(s, SWAPB(arg1, arg1));
+ }
+}
+
+static void __alu(TCGContext *s, int ret, unsigned int opc, int arg1,
+ tcg_target_long arg2, int const_arg2)
+{
+ int tmp = TCG_REG_R14;
+
+ if (const_arg2) {
+ if (ret == arg1)
+ __movi(s, tmp, arg2, OPC_MN(opc, tmp, ret), OPC_NOP);
+ else
+ __movi(s, tmp, arg2, OPC_MN(opc, arg1, tmp), MOV(tmp, ret));
+ } else {
+ if (ret == arg1)
+ tcg_out16(s, OPC_MN(opc, arg2, ret));
+ else {
+ tcg_out16(s, MOV(arg2, tmp));
+ tcg_out16(s, OPC_MN(opc, arg1, tmp));
+ tcg_out16(s, MOV(tmp, ret));
+ }
+ }
+}
+
+static void __shr(TCGContext *s, int ret, unsigned int opc,
+ int arg1, tcg_target_long arg2, int const_arg2)
+{
+ if (const_arg2)
+ __alu(s, ret, opc, arg1, -arg2, 1);
+ else {
+ if (ret == arg1) {
+ tcg_out16(s, NEG(arg2, TCG_REG_R14));
+ tcg_out16(s, OPC_MN(opc, TCG_REG_R14, ret));
+ } else {
+ tcg_out16(s, NEG(arg2, TCG_REG_R14));
+ tcg_out16(s, MOV(arg1, ret));
+ tcg_out16(s, OPC_MN(opc, TCG_REG_R14, ret));
+ }
+ }
+}
+
+static void __mul(TCGContext *s, int ret, int arg1,
+ tcg_target_long arg2, int const_arg2)
+{
+ int tmp = TCG_REG_R14;
+
+ if (const_arg2)
+ __movi(s, tmp, arg2, OPC_MN(OPC_MULS, arg1, tmp), STS_MACL(ret));
+ else {
+ tcg_out16(s, OPC_MN(OPC_MULS, arg2, arg1));
+ tcg_out16(s, STS_MACL(ret));
+ }
+}
+
+static unsigned int __cmp_opc(TCGContext *s, int cond, int arg1, int arg2)
+{
+ unsigned int opc = MOV(0, 0);
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ opc = CMPEQ(arg1, arg2);
+ break;
+ case TCG_COND_GT:
+ case TCG_COND_LT:
+ opc = CMPGT(arg1, arg2);
+ break;
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ opc = CMPGE(arg1, arg2);
+ break;
+ case TCG_COND_GTU:
+ case TCG_COND_LTU:
+ opc = CMPHI(arg1, arg2);
+ break;
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ opc = CMPHS(arg1, arg2);
+ break;
+ }
+
+ return opc;
+}
+
+static unsigned int __cmp_inv(TCGContext *s, int cond)
+{
+ switch (cond) {
+ case TCG_COND_NE:
+ case TCG_COND_LT:
+ case TCG_COND_LE:
+ case TCG_COND_LTU:
+ case TCG_COND_LEU:
+ return 1;
+ }
+
+ return 0;
+}
+
+static void __jmp_reg(TCGContext *s, int arg)
+{
+ tcg_out16(s, JMP(arg));
+ tcg_out16(s, OPC_NOP); /* delay slot */
+}
+
+static void __jmp_imm(TCGContext *s, tcg_target_long arg)
+{
+ __movi(s, TCG_REG_R14, arg, OPC_NOP, OPC_NOP);
+ __jmp_reg(s, TCG_REG_R14);
+}
+
+static void __jmp(TCGContext *s, tcg_target_long arg, int const_arg)
+{
+ if (const_arg)
+ __jmp_imm(s, arg);
+ else
+ __jmp_reg(s, arg);
+}
+
+static uint8_t *__jmp_imm32(TCGContext *s, unsigned int opc1, unsigned int opc2)
+{
+ uint8_t *reloc_slot;
+
+ reloc_slot = __movi32(s, TCG_REG_R14, 0, opc1, OPC_NOP);
+
+ if (opc2 != OPC_NOP)
+ tcg_out16(s, opc2);
+
+ __jmp_reg(s, TCG_REG_R14);
+ return reloc_slot;
+}
+
+static void __jmp_index(TCGContext *s, unsigned int opc1,
+ unsigned int opc2, int index)
+{
+ tcg_out_reloc(s, __jmp_imm32(s, opc1, opc2), 0, index, 0);
+}
+
+static void __brcond(TCGContext *s, int arg0, int arg1, int cond, int index)
+{
+ unsigned int opc1 = __cmp_opc(s, cond, arg0, arg1);
+ unsigned int opc2 = __cmp_inv(s, cond) ? BT(1) : BF(1);
+
+ __jmp_index(s, opc1, opc2, index);
+}
+
+static void __jsr(TCGContext *s, tcg_target_long arg, int const_arg)
+{
+ if (const_arg) {
+ __movi(s, TCG_REG_R14, arg, STSMPR(TCG_REG_R15), OPC_NOP);
+ arg = TCG_REG_R14;
+ }
+ else
+ tcg_out16(s, STSMPR(TCG_REG_R15));
+
+ tcg_out16(s, JSR(arg));
+ tcg_out16(s, OPC_NOP); /* delay slot */
+ tcg_out16(s, LDSMPR(TCG_REG_R15));
+}
+
+static void __qemu_ld(TCGContext *s, const TCGArg *args,
+ int size, int is_signed)
+{
+ if (size == 8 || !swap_endian)
+ __ld(s, size, is_signed, args[0], args[1], GUEST_BASE);
+ else
+ __ld_swap(s, size, is_signed, args[0], args[1], GUEST_BASE);
+}
+
+static void __qemu_st(TCGContext *s, const TCGArg *args, int size)
+{
+ if (size == 8 || !swap_endian)
+ __st(s, size, args[0], args[1], GUEST_BASE);
+ else
+ __st_swap(s, size, args[0], args[1], GUEST_BASE);
+}
+
+static void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ __mov(s, ret, arg);
+}
+
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ __movi(s, ret, arg, OPC_NOP, OPC_NOP);
+}
+
+static void tcg_out_ld(TCGContext *s, TCGType type, int ret, int arg1,
+ tcg_target_long arg2)
+{
+ __ld(s, 32, 0, ret, arg1, arg2);
+}
+
+static void tcg_out_st(TCGContext *s, TCGType type, int arg, int arg1,
+ tcg_target_long arg2)
+{
+ __st(s, 32, arg1, arg, arg2);
+}
+
+static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ __alu(s, reg, OPC_ADD, reg, val, 1);
+}
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ *(uint32_t *)code_ptr = value;
+}
+
+/* maximum number of register used for input function arguments */
+static int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return ARRAY_SIZE (tcg_target_call_iarg_regs);
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0x0000ffff);
+ break;
+ /* qemu_ld/st address constraint */
+ case 'L':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0x0000ffff);
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ return 0;
+}
+
+void tcg_target_qemu_prologue (TCGContext *s)
+{
+ int i, frame_size, push_size, stack_addend;
+
+ /* TB prologue */
+ /* save all callee saved registers */
+ for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++)
+ tcg_out16(s, MOVLM(tcg_target_callee_save_regs[i], TCG_REG_R15));
+
+ /* reserve some stack space */
+ push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
+ frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
+ frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
+ ~(TCG_TARGET_STACK_ALIGN - 1);
+ stack_addend = frame_size - push_size;
+ tcg_out_addi(s, TCG_REG_R15, -stack_addend);
+
+ __jmp_reg(s, TCG_REG_R4); /* tb_ptr in R4 from tcg_qemu_tb_exec(tb_ptr) */
+
+ /* TB epilogue */
+ tb_ret_addr = s->code_ptr;
+ tcg_out_addi(s, TCG_REG_R15, stack_addend);
+
+ for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--)
+ tcg_out16(s, MOVLP(TCG_REG_R15, tcg_target_callee_save_regs[i]));
+
+ tcg_out16(s, OPC_RTS);
+ tcg_out16(s, OPC_NOP); /* delay slot */
+}
+
+static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ __movi(s, TCG_REG_R0, args[0], OPC_NOP, OPC_NOP);
+ __jmp_imm(s, (tcg_target_long) tb_ret_addr);
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) { /* direct jump method */
+ uint8_t *imm32_addr = __jmp_imm32(s, OPC_NOP, OPC_NOP);
+ s->tb_jmp_offset[args[0]] = imm32_addr - s->code_buf;
+ } else {
+ fprintf(stderr, "unsupported indirect goto_tb\n");
+ tcg_abort();
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_br:
+ __jmp_index(s, OPC_NOP, OPC_NOP, args[0]);
+ break;
+ case INDEX_op_call:
+ __jsr(s, args[0], const_args[0]);
+ break;
+ case INDEX_op_jmp:
+ __jmp(s, args[0], const_args[0]);
+ break;
+ case INDEX_op_mov_i32:
+ __mov(s, args[0], args[1]);
+ break;
+ case INDEX_op_movi_i32:
+ __movi(s, args[0], args[1], OPC_NOP, OPC_NOP);
+ break;
+ case INDEX_op_ld8u_i32:
+ __ld(s, 8, 0, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld8s_i32:
+ __ld(s, 8, 1, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16u_i32:
+ __ld(s, 16, 0, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16s_i32:
+ __ld(s, 16, 1, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld_i32:
+ __ld(s, 32, 0, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st8_i32:
+ __st(s, 8, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st16_i32:
+ __st(s, 16, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st_i32:
+ __st(s, 32, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_add_i32:
+ __alu(s, args[0], OPC_ADD, args[1],args[2], const_args[2]);
+ break;
+ case INDEX_op_sub_i32:
+ __alu(s, args[0], OPC_SUB, args[1], args[2], 0);
+ break;
+ case INDEX_op_and_i32:
+ __alu(s, args[0], OPC_AND, args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_or_i32:
+ __alu(s, args[0], OPC_OR, args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_xor_i32:
+ __alu(s, args[0], OPC_XOR, args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_mul_i32:
+ __mul(s, args[0], args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_shl_i32:
+ __alu(s, args[0], OPC_SHLD, args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_shr_i32:
+ __shr(s, args[0], OPC_SHLD, args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_sar_i32:
+ __shr(s, args[0], OPC_SHAD, args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_mulu2_i32:
+ fprintf(stderr, "unimplemented mulu2\n");
+ tcg_abort();
+ break;
+ case INDEX_op_div2_i32:
+ fprintf(stderr, "unimplemented div2\n");
+ tcg_abort();
+ break;
+ case INDEX_op_divu2_i32:
+ fprintf(stderr, "unimplemented divu2\n");
+ tcg_abort();
+ break;
+ case INDEX_op_brcond_i32:
+ __brcond(s, args[0], args[1], args[2], args[3]);
+ break;
+ case INDEX_op_qemu_ld8u:
+ __qemu_ld(s, args, 8, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ __qemu_ld(s, args, 8, 1);
+ break;
+ case INDEX_op_qemu_ld16u:
+ __qemu_ld(s, args, 16, 0);
+ break;
+ case INDEX_op_qemu_ld16s:
+ __qemu_ld(s, args, 16, 1);
+ break;
+ case INDEX_op_qemu_ld32u:
+ __qemu_ld(s, args, 32, 0);
+ break;
+ case INDEX_op_qemu_ld64:
+ __qemu_ld(s, args, 64, 0);
+ break;
+ case INDEX_op_qemu_st8:
+ __qemu_st(s, args, 8);
+ break;
+ case INDEX_op_qemu_st16:
+ __qemu_st(s, args, 16);
+ break;
+ case INDEX_op_qemu_st32:
+ __qemu_st(s, args, 32);
+ break;
+ case INDEX_op_qemu_st64:
+ __qemu_st(s, args, 64);
+ break;
+
+ default:
+ tcg_dump_ops (s, stderr);
+ tcg_abort ();
+ }
+}
+
+static const TCGTargetOpDef sh4_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_sub_i32, { "r", "r", "r" } },
+ { INDEX_op_and_i32, { "r", "r", "ri" } },
+ { INDEX_op_or_i32, { "r", "r", "ri" } },
+ { INDEX_op_xor_i32, { "r", "r", "ri" } },
+ { INDEX_op_add_i32, { "r", "r", "ri" } },
+ { INDEX_op_mul_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "r" } },
+
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L" } },
+
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ /* fail safe */
+ if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
+ tcg_abort();
+
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0x0000ffff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0, 0);
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14); /* Scratch */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R15); /* Stack pointer */
+
+ tcg_add_target_add_op_defs(sh4_op_defs);
+}
@@ -0,0 +1,65 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ * Copyright (c) 2009 Magnus Damm
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_SH4 1
+
+#define TCG_TARGET_REG_BITS 32
+#ifdef __BIG_ENDIAN__
+#define TCG_TARGET_WORDS_BIGENDIAN
+#endif
+#define TCG_TARGET_NB_REGS 16
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R15
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+#define TCG_AREG0 TCG_REG_R11
+#define TCG_AREG1 TCG_REG_R12
+#define TCG_AREG2 TCG_REG_R13
+
+//#define TCG_TARGET_HAS_GUEST_BASE
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+}