From patchwork Tue Apr 23 20:46:47 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Henderson X-Patchwork-Id: 239001 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 86DFC2C0121 for ; Wed, 24 Apr 2013 06:52:20 +1000 (EST) Received: from localhost ([::1]:47555 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UUkCI-0007jU-Qu for incoming@patchwork.ozlabs.org; Tue, 23 Apr 2013 16:52:18 -0400 Received: from eggs.gnu.org ([208.118.235.92]:48825) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UUk7f-00023E-Db for qemu-devel@nongnu.org; Tue, 23 Apr 2013 16:47:35 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UUk7b-0003oa-VC for qemu-devel@nongnu.org; Tue, 23 Apr 2013 16:47:31 -0400 Received: from mail-we0-x22b.google.com ([2a00:1450:400c:c03::22b]:39029) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UUk7b-0003ne-MI for qemu-devel@nongnu.org; Tue, 23 Apr 2013 16:47:27 -0400 Received: by mail-we0-f171.google.com with SMTP id i48so1035270wef.16 for ; Tue, 23 Apr 2013 13:47:27 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=x-received:sender:from:to:cc:subject:date:message-id:x-mailer :in-reply-to:references; bh=WWDD4MB4Hy9c+LprF+4G+OMbPqrkoDOdcR9pErdVwks=; b=ukK69c3t2QCNyXhkTHQ6ISoLTTCiYlsjh5zILSKseWO4Pgs2QGWaXsCGyQmMUheIFr 6jb5eQMcVF/er0DG00GW9ja0homNBCZwCW72BRYbIgJxLhybOo3j4qkIFy1R4pH0h3Ey 20zSkjXsoo+XTAaL8BBhQNduEM45T3/mAo6yYXL/R1Zf10V+0sx2AzxCayFv4vzJjOC1 b0o89w2kdQUIXk/KDAlQBtbqqSMI6VINu545OtDW82i+VoHmexg26GtI8ejpchJIYV75 DvcO6iosL63mzVvBv2Fi995muH6b9Gbgr3gpjqOXK2mweDNh9gGn8lPyE57E4P3cscmd fg7A== X-Received: by 10.194.235.169 with SMTP id un9mr26490239wjc.1.1366750046925; Tue, 23 Apr 2013 13:47:26 -0700 (PDT) Received: from fremont.twiddle.net ([212.183.132.78]) by mx.google.com with ESMTPS id q20sm18463501wiv.7.2013.04.23.13.47.25 (version=TLSv1.2 cipher=RC4-SHA bits=128/128); Tue, 23 Apr 2013 13:47:26 -0700 (PDT) From: Richard Henderson To: qemu-devel@nongnu.org Date: Tue, 23 Apr 2013 13:46:47 -0700 Message-Id: <1366750012-25015-16-git-send-email-rth@twiddle.net> X-Mailer: git-send-email 1.8.1.4 In-Reply-To: <1366750012-25015-1-git-send-email-rth@twiddle.net> References: <1366750012-25015-1-git-send-email-rth@twiddle.net> X-detected-operating-system: by eggs.gnu.org: Error: Malformed IPv6 address (bad octet value). X-Received-From: 2a00:1450:400c:c03::22b Cc: aurelien@aurel32.net Subject: [Qemu-devel] [PATCH v6 15/20] tcg-arm: Split out tcg_out_tlb_read X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Share code between qemu_ld and qemu_st to process the tlb. Reviewed-by: Aurelien Jarno Signed-off-by: Richard Henderson --- tcg/arm/tcg-target.c | 169 +++++++++++++++++++++------------------------------ 1 file changed, 70 insertions(+), 99 deletions(-) diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index 92d7cee..a96471c 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -1147,40 +1147,15 @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, argreg = tcg_out_arg_reg32(s, argreg, arghi); return argreg; } -#endif /* SOFTMMU */ #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) -static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) -{ - int addr_reg, data_reg, data_reg2, bswap; -#ifdef CONFIG_SOFTMMU - int mem_index, s_bits, tlb_offset; - TCGReg argreg; -# if TARGET_LONG_BITS == 64 - int addr_reg2; -# endif - uint32_t *label_ptr; -#endif - -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 1; -#else - bswap = 0; -#endif - data_reg = *args++; - if (opc == 3) - data_reg2 = *args++; - else - data_reg2 = 0; /* suppress warning */ - addr_reg = *args++; -#ifdef CONFIG_SOFTMMU -# if TARGET_LONG_BITS == 64 - addr_reg2 = *args++; -# endif - mem_index = *args; - s_bits = opc & 3; +/* Load and compare a TLB entry, leaving the flags set. Leaves R0 pointing + to the tlb entry. Clobbers R1 and TMP. */ +static void tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, + int s_bits, int tlb_offset) +{ /* Should generate something like the following: * shr r8, addr_reg, #TARGET_PAGE_BITS * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 @@ -1190,13 +1165,13 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) # error # endif tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, - 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); tcg_out_dat_imm(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1); tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); + /* We assume that the offset is contained within 20 bits. */ - tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); assert((tlb_offset & ~0xfffff) == 0); if (tlb_offset > 0xfff) { tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0, @@ -1206,16 +1181,48 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + /* Check alignment. */ - if (s_bits) + if (s_bits) { tcg_out_dat_imm(s, COND_EQ, ARITH_TST, - 0, addr_reg, (1 << s_bits) - 1); -# if TARGET_LONG_BITS == 64 - /* XXX: possibly we could use a block data load in the first access. */ - tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4); - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, - TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); -# endif + 0, addrlo, (1 << s_bits) - 1); + } + + if (TARGET_LONG_BITS == 64) { + /* XXX: possibly we could use a block data load in the first access. */ + tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4); + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, + TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0)); + } +} +#endif /* SOFTMMU */ + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) +{ + TCGReg addr_reg, data_reg, data_reg2; + bool bswap; +#ifdef CONFIG_SOFTMMU + int mem_index, s_bits; + TCGReg argreg, addr_reg2; + uint32_t *label_ptr; +#endif +#ifdef TARGET_WORDS_BIGENDIAN + bswap = 1; +#else + bswap = 0; +#endif + + data_reg = *args++; + data_reg2 = (opc == 3 ? *args++ : 0); + addr_reg = *args++; +#ifdef CONFIG_SOFTMMU + addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0); + mem_index = *args; + s_bits = opc & 3; + + tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits, + offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)); + tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_read)); @@ -1271,11 +1278,11 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) */ argreg = TCG_REG_R0; argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); -#if TARGET_LONG_BITS == 64 - argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2); -#else - argreg = tcg_out_arg_reg32(s, argreg, addr_reg); -#endif + if (TARGET_LONG_BITS == 64) { + argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2); + } else { + argreg = tcg_out_arg_reg32(s, argreg, addr_reg); + } argreg = tcg_out_arg_imm32(s, argreg, mem_index); tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]); @@ -1302,8 +1309,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) #else /* !CONFIG_SOFTMMU */ if (GUEST_BASE) { uint32_t offset = GUEST_BASE; - int i; - int rot; + int i, rot; while (offset) { i = ctz32(offset) & ~1; @@ -1362,68 +1368,33 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) #endif } -static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) { - int addr_reg, data_reg, data_reg2, bswap; + TCGReg addr_reg, data_reg, data_reg2; + bool bswap; #ifdef CONFIG_SOFTMMU - int mem_index, s_bits, tlb_offset; - TCGReg argreg; -# if TARGET_LONG_BITS == 64 - int addr_reg2; -# endif + int mem_index, s_bits; + TCGReg argreg, addr_reg2; uint32_t *label_ptr; #endif - #ifdef TARGET_WORDS_BIGENDIAN bswap = 1; #else bswap = 0; #endif + data_reg = *args++; - if (opc == 3) - data_reg2 = *args++; - else - data_reg2 = 0; /* suppress warning */ + data_reg2 = (opc == 3 ? *args++ : 0); addr_reg = *args++; #ifdef CONFIG_SOFTMMU -# if TARGET_LONG_BITS == 64 - addr_reg2 = *args++; -# endif + addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0); mem_index = *args; s_bits = opc & 3; - /* Should generate something like the following: - * shr r8, addr_reg, #TARGET_PAGE_BITS - * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 - * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS - */ - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, - TCG_REG_TMP, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); - tcg_out_dat_imm(s, COND_AL, ARITH_AND, - TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1); - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, - TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); - /* We assume that the offset is contained within 20 bits. */ - tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); - assert((tlb_offset & ~0xfffff) == 0); - if (tlb_offset > 0xfff) { - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0, - 0xa00 | (tlb_offset >> 12)); - tlb_offset &= 0xfff; - } - tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset); - tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, - TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); - /* Check alignment. */ - if (s_bits) - tcg_out_dat_imm(s, COND_EQ, ARITH_TST, - 0, addr_reg, (1 << s_bits) - 1); -# if TARGET_LONG_BITS == 64 - /* XXX: possibly we could use a block data load in the first access. */ - tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4); - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, - TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); -# endif + tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits, + offsetof(CPUArchState, + tlb_table[mem_index][0].addr_write)); + tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_write)); @@ -1472,11 +1443,11 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) */ argreg = TCG_REG_R0; argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); -#if TARGET_LONG_BITS == 64 - argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2); -#else - argreg = tcg_out_arg_reg32(s, argreg, addr_reg); -#endif + if (TARGET_LONG_BITS == 64) { + argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2); + } else { + argreg = tcg_out_arg_reg32(s, argreg, addr_reg); + } switch (opc) { case 0: