From patchwork Sat Mar 21 10:29:22 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chen Gang X-Patchwork-Id: 452927 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id BE1FF14010F for ; Sat, 21 Mar 2015 21:30:06 +1100 (AEDT) Received: from localhost ([::1]:47348 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YZGfM-0002Rn-P3 for incoming@patchwork.ozlabs.org; Sat, 21 Mar 2015 06:30:04 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:53449) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YZGet-0001qD-5p for qemu-devel@nongnu.org; Sat, 21 Mar 2015 06:29:37 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YZGep-0003JL-2X for qemu-devel@nongnu.org; Sat, 21 Mar 2015 06:29:35 -0400 Received: from blu004-omc1s29.hotmail.com ([65.55.116.40]:63104) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YZGeo-0003JH-TM for qemu-devel@nongnu.org; Sat, 21 Mar 2015 06:29:31 -0400 Received: from BLU436-SMTP53 ([65.55.116.9]) by BLU004-OMC1S29.hotmail.com over TLS secured channel with Microsoft SMTPSVC(7.5.7601.22751); Sat, 21 Mar 2015 03:29:30 -0700 X-TMN: [GX82flTia2gk81eXYSdoMWJqNcpnoMA/] X-Originating-Email: [xili_gchen_5257@hotmail.com] Message-ID: Date: Sat, 21 Mar 2015 18:29:22 +0800 From: Chen Gang User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:31.0) Gecko/20100101 Thunderbird/31.5.0 MIME-Version: 1.0 To: "rth@twiddle.net" , Peter Maydell , =?UTF-8?B?QW5kcmVhcyBGw6RyYmVy?= , Chris Metcalf , Riku Voipio , "walt@tilera.com" References: <550D455B.8000004@hotmail.com> In-Reply-To: <550D455B.8000004@hotmail.com> X-OriginalArrivalTime: 21 Mar 2015 10:29:28.0809 (UTC) FILETIME=[E952A590:01D063C1] X-detected-operating-system: by eggs.gnu.org: Windows 7 or 8 [fuzzy] X-Received-From: 65.55.116.40 Cc: qemu-devel Subject: [Qemu-devel] [PATCH 12/12 v8] target-tilegx: Generate tcg instructions to execute to 1st system call X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Generate related tcg instructions, and qemu tilegx can run to 1st system call (uname) successfully in _dl_discover_osversion(), and return to __libc_start_main(). Signed-off-by: Chen Gang --- target-tilegx/translate.c | 543 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 543 insertions(+) diff --git a/target-tilegx/translate.c b/target-tilegx/translate.c index dada275..8274d8b 100644 --- a/target-tilegx/translate.c +++ b/target-tilegx/translate.c @@ -73,6 +73,32 @@ typedef struct DisasContext { #include "exec/gen-icount.h" +static TCGv load_zero(DisasContext *dc) +{ + if (TCGV_IS_UNUSED_I64(dc->zero)) { + dc->zero = tcg_const_i64(0); + } + return dc->zero; +} + +static TCGv load_gr(DisasContext *dc, uint8_t reg) +{ + if (likely(reg < TILEGX_R_COUNT)) { + return cpu_regs[reg]; + } else if (reg != TILEGX_R_ZERO) { + dc->exception = TILEGX_EXCP_REG_UNSUPPORTED; + } + return load_zero(dc); +} + +static TCGv dest_gr(DisasContext *dc, uint8_t rdst) +{ + DisasContextTemp *tmp = dc->tmp_regcur; + tmp->idx = rdst; + tmp->val = tcg_temp_new_i64(); + return tmp->val; +} + static void gen_exception(DisasContext *dc, int num) { TCGv_i32 tmp = tcg_const_i32(num); @@ -81,9 +107,402 @@ static void gen_exception(DisasContext *dc, int num) tcg_temp_free_i32(tmp); } +static void gen_fnop(void) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "(f)nop\n"); +} + +static void gen_cmpltui(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int8_t imm8) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmpltui r%d, r%d, %d\n", + rdst, rsrc, imm8); + tcg_gen_setcondi_i64(TCG_COND_LTU, dest_gr(dc, rdst), load_gr(dc, rsrc), + (uint64_t)imm8); +} + +static void gen_cmpeqi(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int8_t imm8) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmpeqi r%d, r%d, %d\n", rdst, rsrc, imm8); + tcg_gen_setcondi_i64(TCG_COND_EQ, dest_gr(dc, rdst), load_gr(dc, rsrc), + (uint64_t)imm8); +} + +static void gen_cmpne(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmpne r%d, r%d, r%d\n", + rdst, rsrc, rsrcb); + tcg_gen_setcond_i64(TCG_COND_NE, dest_gr(dc, rdst), load_gr(dc, rsrc), + load_gr(dc, rsrcb)); +} + +static void gen_cmoveqz(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmoveqz r%d, r%d, r%d\n", + rdst, rsrc, rsrcb); + tcg_gen_movcond_i64(TCG_COND_EQ, dest_gr(dc, rdst), load_gr(dc, rsrc), + load_zero(dc), load_gr(dc, rsrcb), load_gr(dc, rdst)); +} + +static void gen_cmovnez(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmovnez r%d, r%d, r%d\n", + rdst, rsrc, rsrcb); + tcg_gen_movcond_i64(TCG_COND_NE, dest_gr(dc, rdst), load_gr(dc, rsrc), + load_zero(dc), load_gr(dc, rsrcb), load_gr(dc, rdst)); +} + +static void gen_add(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "add r%d, r%d, r%d\n", + rdst, rsrc, rsrcb); + tcg_gen_add_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb)); +} + +static void gen_addimm(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int64_t imm) +{ + tcg_gen_addi_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), imm); +} + +static void gen_addi(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int8_t imm8) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "addi r%d, r%d, %d\n", rdst, rsrc, imm8); + gen_addimm(dc, rdst, rsrc, (int64_t)imm8); +} + +static void gen_addli(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int16_t im16) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "addli r%d, r%d, %d\n", rdst, rsrc, im16); + gen_addimm(dc, rdst, rsrc, (int64_t)im16); +} + +static void gen_addx(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + TCGv vdst = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "addx r%d, r%d, r%d\n", rdst, rsrc, rsrcb); + tcg_gen_add_i64(vdst, load_gr(dc, rsrc), load_gr(dc, rsrcb)); + tcg_gen_ext32s_i64(vdst, vdst); +} + +static void gen_addximm(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int32_t imm) +{ + TCGv vdst = dest_gr(dc, rdst); + + tcg_gen_addi_i64(vdst, load_gr(dc, rsrc), imm); + tcg_gen_ext32s_i64(vdst, vdst); +} + +static void gen_addxi(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int8_t imm8) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "addxi r%d, r%d, %d\n", rdst, rsrc, imm8); + gen_addximm(dc, rdst, rsrc, (int32_t)imm8); +} + +static void gen_subx(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + TCGv vdst = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "subx r%d, r%d, r%d\n", rdst, rsrc, rsrcb); + tcg_gen_sub_i64(vdst, load_gr(dc, rsrc), load_gr(dc, rsrcb)); + tcg_gen_ext32s_i64(vdst, vdst); +} + +/* + * The related functional description for bfextu in isa document: + * + * uint64_t mask = 0; + * mask = (-1ULL) ^ ((-1ULL << ((BFEnd - BFStart) & 63)) << 1); + * uint64_t rot_src = (((uint64_t) rf[SrcA]) >> BFStart) + * | (rf[SrcA] << (64 - BFStart)); + * rf[Dest] = rot_src & mask; + */ +static void gen_bfextu(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc, + int8_t start, int8_t end) +{ + uint64_t mask = (-1ULL) ^ ((-1ULL << ((end - start) & 63)) << 1); + TCGv tmp = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "bfextu r%d, r%d, %d, %d\n", + rdst, rsrc, start, end); + + tcg_gen_rotri_i64(tmp, load_gr(dc, rsrc), start); + tcg_gen_andi_i64(tmp, tmp, mask); +} + +static void gen_or(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "or r%d, r%d, r%d\n", rdst, rsrc, rsrcb); + tcg_gen_or_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb)); +} + +static void gen_orimm(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int64_t imm) +{ + tcg_gen_ori_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), imm); +} + +static void gen_ori(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int8_t imm8) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "ori r%d, r%d, %d\n", rdst, rsrc, imm8); + gen_orimm(dc, rdst, rsrc, (int64_t)imm8); +} + +static void gen_xor(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "xor r%d, r%d, r%d\n", rdst, rsrc, rsrcb); + tcg_gen_xor_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb)); +} + +static void gen_and(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "and r%d, r%d, r%d\n", rdst, rsrc, rsrcb); + tcg_gen_and_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb)); +} + +static void gen_andimm(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int64_t imm) +{ + tcg_gen_andi_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), imm); +} + +static void gen_andi(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, int8_t imm8) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "andi r%d, r%d, %d\n", rdst, rsrc, imm8); + gen_andimm(dc, rdst, rsrc, (int64_t)imm8); +} + +static void gen_mulx(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + TCGv vdst = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "mulx r%d, r%d, r%d\n", rdst, rsrc, rsrcb); + + tcg_gen_mul_i64(vdst, load_gr(dc, rsrc), load_gr(dc, rsrcb)); + tcg_gen_ext32s_i64(vdst, vdst); +} + +static void gen_shlx(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + TCGv vdst = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "shlx r%d, r%d, r%d\n", rdst, rsrc, rsrcb); + tcg_gen_andi_i64(vdst, load_gr(dc, rsrcb), 31); + tcg_gen_shl_i64(vdst, load_gr(dc, rsrc), vdst); + tcg_gen_ext32s_i64(vdst, vdst); +} + +static void gen_shlxi(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t shamt) +{ + TCGv vdst = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "shlxi r%d, r%d, r%d\n", + rdst, rsrc, shamt); + tcg_gen_shli_i64(vdst, load_gr(dc, rsrc), shamt & 31); + tcg_gen_ext32s_i64(vdst, vdst); +} + +static void gen_shl3add(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint8_t rsrcb) +{ + TCGv vdst = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "shl3add r%d, r%d, r%d\n", + rdst, rsrc, rsrcb); + tcg_gen_shli_i64(vdst, load_gr(dc, rsrc), 3); + tcg_gen_add_i64(vdst, vdst, load_gr(dc, rsrcb)); +} + +static void gen_shl16insli(struct DisasContext *dc, + uint8_t rdst, uint8_t rsrc, uint16_t uimm16) +{ + TCGv vdst = dest_gr(dc, rdst); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "shl16insli r%d, r%d, %x\n", + rdst, rsrc, uimm16); + tcg_gen_shli_i64(vdst, load_gr(dc, rsrc), 16); + tcg_gen_ori_i64(vdst, vdst, uimm16); +} + +static void gen_ld(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "ld r%d, r%d\n", rdst, rsrc); + tcg_gen_qemu_ld_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), + MMU_USER_IDX, MO_LEQ); +} + +static void gen_ld1s(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "ld1s r%d, r%d\n", rdst, rsrc); + tcg_gen_qemu_ld_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), + MMU_USER_IDX, MO_SB); +} + +static void gen_ld4s(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "ld4s r%d, r%d\n", rdst, rsrc); + tcg_gen_qemu_ld_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), + MMU_USER_IDX, MO_LESL); +} + +static void gen_st(struct DisasContext *dc, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "st r%d, r%d\n", rsrc, rsrcb); + tcg_gen_qemu_st_i64(load_gr(dc, rsrcb), load_gr(dc, rsrc), + MMU_USER_IDX, MO_LEQ); +} + +static void gen_st4(struct DisasContext *dc, uint8_t rsrc, uint8_t rsrcb) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "st4 r%d, r%d\n", rsrc, rsrcb); + tcg_gen_qemu_st_i64(load_gr(dc, rsrcb), load_gr(dc, rsrc), + MMU_USER_IDX, MO_LEUL); +} + +static void gen_lnk(struct DisasContext *dc, uint8_t rdst) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "lnk r%d\n", rdst); + tcg_gen_movi_i64(dest_gr(dc, rdst), dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); +} + +static int gen_beqz(struct DisasContext *dc, uint8_t rsrc, int32_t off) +{ + uint64_t pos = dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "beqz(t) r%d, %d ([" TARGET_FMT_lx "] %s)\n", + rsrc, off, pos, lookup_symbol(pos)); + + dc->jmp.dest = tcg_temp_new_i64(); + dc->jmp.val1 = tcg_temp_new_i64(); + dc->jmp.val2 = tcg_temp_new_i64(); + + dc->jmp.cond = TCG_COND_EQ; + tcg_gen_movi_i64(dc->jmp.dest, pos); + tcg_gen_mov_i64(dc->jmp.val1, load_gr(dc, rsrc)); + tcg_gen_movi_i64(dc->jmp.val2, 0); + + return 0; +} + +static int gen_bnezt(struct DisasContext *dc, uint8_t rsrc, int32_t off) +{ + uint64_t pos = dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "bnez(t) r%d, %d ([" TARGET_FMT_lx "] %s)\n", + rsrc, off, pos, lookup_symbol(pos)); + + dc->jmp.dest = tcg_temp_new_i64(); + dc->jmp.val1 = tcg_temp_new_i64(); + dc->jmp.val2 = tcg_temp_new_i64(); + + dc->jmp.cond = TCG_COND_NE; + tcg_gen_movi_i64(dc->jmp.dest, pos); + tcg_gen_mov_i64(dc->jmp.val1, load_gr(dc, rsrc)); + tcg_gen_movi_i64(dc->jmp.val2, 0); + + return 0; +} + +static int gen_blbc(struct DisasContext *dc, uint8_t rsrc, int32_t off) +{ + uint64_t pos = dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "blbc r%d, %d ([" TARGET_FMT_lx "] %s)\n", + rsrc, off, pos, lookup_symbol(pos)); + + dc->jmp.dest = tcg_temp_new_i64(); + dc->jmp.val1 = tcg_temp_new_i64(); + dc->jmp.val2 = tcg_temp_new_i64(); + + dc->jmp.cond = TCG_COND_EQ; + tcg_gen_movi_i64(dc->jmp.dest, pos); + tcg_gen_mov_i64(dc->jmp.val1, load_gr(dc, rsrc)); + tcg_gen_andi_i64(dc->jmp.val1, dc->jmp.val1, 1ULL); + tcg_gen_movi_i64(dc->jmp.val2, 0); + + return 0; +} + +static void gen_jr(struct DisasContext *dc, uint8_t rsrc) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "jr(p) r%d\n", rsrc); + + dc->jmp.dest = tcg_temp_new_i64(); + + dc->jmp.cond = TCG_COND_ALWAYS; + tcg_gen_mov_i64(dc->jmp.dest, load_gr(dc, rsrc)); +} + +static void gen_j(struct DisasContext *dc, int off) +{ + uint64_t pos = dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "j %d ([" TARGET_FMT_lx "] %s)\n", + off, pos, lookup_symbol(pos)); + + dc->jmp.dest = tcg_temp_new_i64(); + + dc->jmp.cond = TCG_COND_ALWAYS; + tcg_gen_movi_i64(dc->jmp.dest, pos); +} + +static void gen_jal(struct DisasContext *dc, int off) +{ + uint64_t pos = dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "jal %d ([" TARGET_FMT_lx "] %s)\n", + off, pos, lookup_symbol(pos)); + + + dc->jmp.dest = tcg_temp_new_i64(); + tcg_gen_movi_i64(dest_gr(dc, TILEGX_R_LR), + dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); + + dc->jmp.cond = TCG_COND_ALWAYS; + tcg_gen_movi_i64(dc->jmp.dest, pos); +} + +static void gen_swint1(struct DisasContext *dc) +{ + qemu_log_mask(CPU_LOG_TB_IN_ASM, "swint1\n"); + + tcg_gen_movi_i64(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); + dc->exception = TILEGX_EXCP_SYSCALL; +} + static void decode_addi_opcode_y0(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_Y0(bundle); + uint8_t rdst = (uint8_t)get_Dest_Y0(bundle); + int8_t imm8 = (int8_t)get_Imm8_Y0(bundle); + + gen_addi(dc, rdst, rsrc, imm8); } static void decode_rrr_1_opcode_y0(struct DisasContext *dc, @@ -95,6 +514,7 @@ static void decode_rrr_1_opcode_y0(struct DisasContext *dc, case NOP_UNARY_OPCODE_Y0: case FNOP_UNARY_OPCODE_Y0: if (!get_SrcA_Y0(bundle) && !get_Dest_Y0(bundle)) { + gen_fnop(); return; } break; @@ -127,8 +547,13 @@ static void decode_rrr_1_opcode_y0(struct DisasContext *dc, static void decode_rrr_5_opcode_y0(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_Y0(bundle); + uint8_t rsrcb = (uint8_t)get_SrcB_Y0(bundle); + uint8_t rdst = (uint8_t)get_Dest_Y0(bundle); + switch (get_RRROpcodeExtension_Y0(bundle)) { case OR_RRR_5_OPCODE_Y0: + gen_or(dc, rdst, rsrc, rsrcb); return; case AND_RRR_5_OPCODE_Y0: case NOR_RRR_5_OPCODE_Y0: @@ -144,6 +569,11 @@ static void decode_rrr_5_opcode_y0(struct DisasContext *dc, static void decode_addi_opcode_y1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_Y1(bundle); + uint8_t rdst = (uint8_t)get_Dest_Y1(bundle); + int8_t imm8 = (int8_t)get_Imm8_Y1(bundle); + + gen_addi(dc, rdst, rsrc, imm8); } static void decode_rrr_1_opcode_y1(struct DisasContext *dc, @@ -155,6 +585,7 @@ static void decode_rrr_1_opcode_y1(struct DisasContext *dc, case NOP_UNARY_OPCODE_Y1: case FNOP_UNARY_OPCODE_Y1: if (!get_SrcA_Y1(bundle) && !get_Dest_Y1(bundle)) { + gen_fnop(); return; } break; @@ -182,8 +613,13 @@ static void decode_rrr_1_opcode_y1(struct DisasContext *dc, static void decode_rrr_5_opcode_y1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_Y1(bundle); + uint8_t rsrcb = (uint8_t)get_SrcB_Y1(bundle); + uint8_t rdst = (uint8_t)get_Dest_Y1(bundle); + switch (get_RRROpcodeExtension_Y1(bundle)) { case OR_RRR_5_OPCODE_Y1: + gen_or(dc, rdst, rsrc, rsrcb); return; case AND_RRR_5_OPCODE_Y1: case NOR_RRR_5_OPCODE_Y1: @@ -199,8 +635,12 @@ static void decode_rrr_5_opcode_y1(struct DisasContext *dc, static void decode_ldst0_opcode_y2(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle); + uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle); + switch (get_Mode(bundle)) { case MODE_OPCODE_YA2: + gen_ld1s(dc, rsrcbdst, rsrca); return; case MODE_OPCODE_YB2: case MODE_OPCODE_YC2: @@ -215,8 +655,12 @@ static void decode_ldst0_opcode_y2(struct DisasContext *dc, static void decode_ldst1_opcode_y2(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle); + uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle); + switch (get_Mode(bundle)) { case MODE_OPCODE_YB2: + gen_ld4s(dc, rsrcbdst, rsrca); return; case MODE_OPCODE_YA2: case MODE_OPCODE_YC2: @@ -231,8 +675,12 @@ static void decode_ldst1_opcode_y2(struct DisasContext *dc, static void decode_ldst2_opcode_y2(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle); + uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle); + switch (get_Mode(bundle)) { case MODE_OPCODE_YC2: + gen_st4(dc, rsrca, rsrcbdst); return; case MODE_OPCODE_YA2: case MODE_OPCODE_YB2: @@ -247,10 +695,15 @@ static void decode_ldst2_opcode_y2(struct DisasContext *dc, static void decode_ldst3_opcode_y2(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle); + uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle); + switch (get_Mode(bundle)) { case MODE_OPCODE_YB2: + gen_ld(dc, rsrcbdst, rsrca); return; case MODE_OPCODE_YC2: + gen_st(dc, rsrca, rsrcbdst); return; case MODE_OPCODE_YA2: default: @@ -264,13 +717,24 @@ static void decode_ldst3_opcode_y2(struct DisasContext *dc, static void decode_addli_opcode_x0(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle); + uint8_t rdst = (uint8_t)get_Dest_X0(bundle); + int16_t imm16 = (int16_t)get_Imm16_X0(bundle); + + gen_addli(dc, rdst, rsrc, imm16); } static void decode_bf_opcode_x0(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle); + uint8_t rdst = (uint8_t)get_Dest_X0(bundle); + int8_t start = (int8_t)get_BFStart_X0(bundle); + int8_t end = (int8_t)get_BFEnd_X0(bundle); + switch (get_BFOpcodeExtension_X0(bundle)) { case BFEXTU_BF_OPCODE_X0: + gen_bfextu(dc, rdst, rsrc, start, end); return; case BFEXTS_BF_OPCODE_X0: case BFINS_BF_OPCODE_X0: @@ -287,16 +751,25 @@ static void decode_bf_opcode_x0(struct DisasContext *dc, static void decode_imm8_opcode_x0(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle); + uint8_t rdst = (uint8_t)get_Dest_X0(bundle); + int8_t imm8 = (int8_t)get_Imm8_X0(bundle); + switch (get_Imm8OpcodeExtension_X0(bundle)) { case ADDI_IMM8_OPCODE_X0: + gen_addi(dc, rdst, rsrc, imm8); return; case ADDXI_IMM8_OPCODE_X0: + gen_addxi(dc, rdst, rsrc, imm8); return; case ANDI_IMM8_OPCODE_X0: + gen_andi(dc, rdst, rsrc, imm8); return; case CMPEQI_IMM8_OPCODE_X0: + gen_cmpeqi(dc, rdst, rsrc, imm8); return; case ORI_IMM8_OPCODE_X0: + gen_ori(dc, rdst, rsrc, imm8); return; case CMPLTSI_IMM8_OPCODE_X0: case CMPLTUI_IMM8_OPCODE_X0: @@ -325,32 +798,43 @@ static void decode_rrr_0_opcode_x0(struct DisasContext *dc, tilegx_bundle_bits bundle) { uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle); + uint8_t rsrcb = (uint8_t)get_SrcB_X0(bundle); uint8_t rdst = (uint8_t)get_Dest_X0(bundle); switch (get_RRROpcodeExtension_X0(bundle)) { case ADD_RRR_0_OPCODE_X0: + gen_add(dc, rdst, rsrc, rsrcb); return; case CMOVEQZ_RRR_0_OPCODE_X0: + gen_cmoveqz(dc, rdst, rsrc, rsrcb); return; case CMOVNEZ_RRR_0_OPCODE_X0: + gen_cmovnez(dc, rdst, rsrc, rsrcb); return; case CMPNE_RRR_0_OPCODE_X0: + gen_cmpne(dc, rdst, rsrc, rsrcb); return; case MULX_RRR_0_OPCODE_X0: + gen_mulx(dc, rdst, rsrc, rsrcb); return; case OR_RRR_0_OPCODE_X0: + gen_or(dc, rdst, rsrc, rsrcb); return; case SHL3ADD_RRR_0_OPCODE_X0: + gen_shl3add(dc, rdst, rsrc, rsrcb); return; case SHLX_RRR_0_OPCODE_X0: + gen_shlx(dc, rdst, rsrc, rsrcb); return; case SUBX_RRR_0_OPCODE_X0: + gen_subx(dc, rdst, rsrc, rsrcb); return; case UNARY_RRR_0_OPCODE_X0: switch (get_UnaryOpcodeExtension_X0(bundle)) { case FNOP_UNARY_OPCODE_X0: case NOP_UNARY_OPCODE_X0: if (!rsrc && !rdst) { + gen_fnop(); return; } break; @@ -368,6 +852,7 @@ static void decode_rrr_0_opcode_x0(struct DisasContext *dc, } break; case XOR_RRR_0_OPCODE_X0: + gen_xor(dc, rdst, rsrc, rsrcb); return; case ADDXSC_RRR_0_OPCODE_X0: case ADDX_RRR_0_OPCODE_X0: @@ -533,8 +1018,13 @@ static void decode_rrr_0_opcode_x0(struct DisasContext *dc, static void decode_shift_opcode_x0(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle); + uint8_t rdst = (uint8_t)get_Dest_X0(bundle); + uint8_t shamt = (uint8_t)get_ShAmt_X0(bundle); + switch (get_ShiftOpcodeExtension_X0(bundle)) { case SHLXI_SHIFT_OPCODE_X0: + gen_shlxi(dc, rdst, rsrc, shamt); return; case ROTLI_SHIFT_OPCODE_X0: case SHLI_SHIFT_OPCODE_X0: @@ -558,24 +1048,40 @@ static void decode_shift_opcode_x0(struct DisasContext *dc, static void decode_shl16insli_opcode_x0(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle); + uint8_t rdst = (uint8_t)get_Dest_X0(bundle); + uint16_t uimm16 = (uint16_t)get_Imm16_X0(bundle); + + gen_shl16insli(dc, rdst, rsrc, uimm16); } static void decode_addli_opcode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle); + uint8_t rdst = (uint8_t)get_Dest_X1(bundle); + int16_t imm16 = (int16_t)get_Imm16_X1(bundle); + + gen_addli(dc, rdst, rsrc, imm16); } static void decode_branch_opcode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t src = (uint8_t)get_SrcA_X1(bundle); + int32_t off = get_BrOff_X1(bundle); + switch (get_BrType_X1(bundle)) { case BEQZT_BRANCH_OPCODE_X1: case BEQZ_BRANCH_OPCODE_X1: + gen_beqz(dc, src, sign_extend(off, 17)); return; case BNEZT_BRANCH_OPCODE_X1: case BNEZ_BRANCH_OPCODE_X1: + gen_bnezt(dc, src, sign_extend(off, 17)); return; case BLBC_BRANCH_OPCODE_X1: + gen_blbc(dc, src, sign_extend(off, 17)); return; case BGEZT_BRANCH_OPCODE_X1: case BGEZ_BRANCH_OPCODE_X1: @@ -599,14 +1105,22 @@ static void decode_branch_opcode_x1(struct DisasContext *dc, static void decode_imm8_opcode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle); + uint8_t rdst = (uint8_t)get_Dest_X1(bundle); + int8_t imm8 = (int8_t)get_Imm8_X1(bundle); + switch (get_Imm8OpcodeExtension_X1(bundle)) { case ADDI_IMM8_OPCODE_X1: + gen_addi(dc, rdst, rsrc, imm8); return; case ADDXI_IMM8_OPCODE_X1: + gen_addxi(dc, rdst, rsrc, imm8); return; case CMPEQI_IMM8_OPCODE_X1: + gen_cmpeqi(dc, rdst, rsrc, imm8); return; case CMPLTUI_IMM8_OPCODE_X1: + gen_cmpltui(dc, rdst, rsrc, imm8); return; case ANDI_IMM8_OPCODE_X1: case CMPLTSI_IMM8_OPCODE_X1: @@ -660,10 +1174,14 @@ static void decode_imm8_opcode_x1(struct DisasContext *dc, static void decode_jump_opcode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + int off = sign_extend(get_JumpOff_X1(bundle), 27); + switch (get_JumpOpcodeExtension_X1(bundle)) { case JAL_JUMP_OPCODE_X1: + gen_jal(dc, off); return; case J_JUMP_OPCODE_X1: + gen_j(dc, off); return; default: break; @@ -677,28 +1195,37 @@ static void decode_rrr_0_opcode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle) { uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle); + uint8_t rsrcb = (uint8_t)get_SrcB_X1(bundle); uint8_t rdst = (uint8_t)get_Dest_X1(bundle); switch (get_RRROpcodeExtension_X1(bundle)) { case ADDX_RRR_0_OPCODE_X1: + gen_addx(dc, rdst, rsrc, rsrcb); return; case ADD_RRR_0_OPCODE_X1: + gen_add(dc, rdst, rsrc, rsrcb); return; case AND_RRR_0_OPCODE_X1: + gen_and(dc, rdst, rsrc, rsrcb); return; case OR_RRR_0_OPCODE_X1: + gen_or(dc, rdst, rsrc, rsrcb); return; case CMPNE_RRR_0_OPCODE_X1: + gen_cmpne(dc, rdst, rsrc, rsrcb); return; case SHL3ADD_RRR_0_OPCODE_X1: + gen_shl3add(dc, rdst, rsrc, rsrcb); return; case ST4_RRR_0_OPCODE_X1: if (!rdst) { + gen_st4(dc, rsrc, rsrcb); return; } break; case ST_RRR_0_OPCODE_X1: if (!rdst) { + gen_st(dc, rsrc, rsrcb); return; } break; @@ -707,26 +1234,32 @@ static void decode_rrr_0_opcode_x1(struct DisasContext *dc, case NOP_UNARY_OPCODE_X1: case FNOP_UNARY_OPCODE_X1: if (!rdst && !rsrc) { + gen_fnop(); return; } break; case JRP_UNARY_OPCODE_X1: case JR_UNARY_OPCODE_X1: if (!rdst) { + gen_jr(dc, rsrc); return; } break; case LD4S_UNARY_OPCODE_X1: + gen_ld4s(dc, rdst, rsrc); return; case LD_UNARY_OPCODE_X1: + gen_ld(dc, rdst, rsrc); return; case LNK_UNARY_OPCODE_X1: if (!rsrc) { + gen_lnk(dc, (uint8_t) get_Dest_X1(bundle)); return; } break; case SWINT1_UNARY_OPCODE_X1: if (!rsrc && !rdst) { + gen_swint1(dc); return; } break; @@ -873,8 +1406,13 @@ static void decode_rrr_0_opcode_x1(struct DisasContext *dc, static void decode_shift_opcode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle); + uint8_t rdst = (uint8_t)get_Dest_X1(bundle); + uint8_t shamt = (uint8_t)get_ShAmt_X1(bundle); + switch (get_ShiftOpcodeExtension_X1(bundle)) { case SHLXI_SHIFT_OPCODE_X1: + gen_shlxi(dc, rdst, rsrc, shamt); return; case ROTLI_SHIFT_OPCODE_X1: case SHLI_SHIFT_OPCODE_X1: @@ -898,6 +1436,11 @@ static void decode_shift_opcode_x1(struct DisasContext *dc, static void decode_shl16insli_opcode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle) { + uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle); + uint8_t rdst = (uint8_t)get_Dest_X1(bundle); + uint16_t uimm16 = (uint16_t)get_Imm16_X1(bundle); + + gen_shl16insli(dc, rdst, rsrc, uimm16); } static void decode_y0(struct DisasContext *dc, tilegx_bundle_bits bundle)