From patchwork Tue Apr 2 04:23:26 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Henderson X-Patchwork-Id: 232867 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 09DBD2C012A for ; Tue, 2 Apr 2013 15:32:41 +1100 (EST) Received: from localhost ([::1]:53205 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UMstj-000501-7G for incoming@patchwork.ozlabs.org; Tue, 02 Apr 2013 00:32:39 -0400 Received: from eggs.gnu.org ([208.118.235.92]:58080) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UMslz-0003az-G4 for qemu-devel@nongnu.org; Tue, 02 Apr 2013 00:24:42 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UMslu-0007hr-Of for qemu-devel@nongnu.org; Tue, 02 Apr 2013 00:24:39 -0400 Received: from mail-gh0-f177.google.com ([209.85.160.177]:53470) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UMslu-0007he-IZ for qemu-devel@nongnu.org; Tue, 02 Apr 2013 00:24:34 -0400 Received: by mail-gh0-f177.google.com with SMTP id g22so3114ghb.8 for ; Mon, 01 Apr 2013 21:24:34 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=x-received:sender:from:to:cc:subject:date:message-id:x-mailer :in-reply-to:references; bh=avInPSd5+jNu4at3FNtwH2aTYoHN4u2K91JMopIBryI=; b=aBQiuxB4XwHzI+6/XabM8uCkM4j8BLk+/QwLe5odPyNsdPULwk7aDDFr5ms3B1Uo6i FGhx+ZKVzmh2xcU8zAYm6ZIIK8vzFwbg6FYvtNhrg84NXUeodpOGqq0RxqZazDpjeTs2 ev0X8CxlBWrpwudhoDdirEnvID16NXLV9fGSY2KkcEAQwo+2Cedjqib19dLcSNnv+mqH f+2vnfr3uUwCmhabTyKMOUuOp8JX4GvIbqLoKAQtHuar36xzYjfyzAhyU1tBk/1uq4e0 J4IShbGSA+5wPDKYWOFxJz9UGxdF5GOQdWRGIQhO3fGlLR06phpnSw2EyoWT72OPeo0V Drwg== X-Received: by 10.236.57.4 with SMTP id n4mr1225410yhc.25.1364876674214; Mon, 01 Apr 2013 21:24:34 -0700 (PDT) Received: from pebble.com ([12.236.175.36]) by mx.google.com with ESMTPS id z64sm32502731yhc.24.2013.04.01.21.24.32 (version=TLSv1.2 cipher=RC4-SHA bits=128/128); Mon, 01 Apr 2013 21:24:33 -0700 (PDT) From: Richard Henderson To: qemu-devel@nongnu.org Date: Mon, 1 Apr 2013 21:23:26 -0700 Message-Id: <1364876610-3933-24-git-send-email-rth@twiddle.net> X-Mailer: git-send-email 1.8.1.4 In-Reply-To: <1364876610-3933-1-git-send-email-rth@twiddle.net> References: <1364876610-3933-1-git-send-email-rth@twiddle.net> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x [fuzzy] X-Received-From: 209.85.160.177 Cc: av1474@comtv.ru, agraf@suse.de, aurelien@aurel32.net Subject: [Qemu-devel] [PATCH v3 23/27] tcg-ppc64: Rewrite setcond X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Never use MFCR, as the latency is really high. Even MFOCRF, at half the latency of MFCR, isn't as fast as we can do with carry tricks. The ADDIC+SUBFE trick only works for word-sized operands, as we need carry-out from bit 63. So for ppc64 we must extend 32-bit inputs. Use ISEL if available. Reviewed-by: Aurelien Jarno Signed-off-by: Richard Henderson --- tcg/ppc64/tcg-target.c | 264 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 181 insertions(+), 83 deletions(-) diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c index 96a05d9..30954ff 100644 --- a/tcg/ppc64/tcg-target.c +++ b/tcg/ppc64/tcg-target.c @@ -45,6 +45,7 @@ static uint8_t *tb_ret_addr; #endif #define HAVE_ISA_2_06 0 +#define HAVE_ISEL 0 #ifdef CONFIG_USE_GUEST_BASE #define TCG_GUEST_BASE_REG 30 @@ -389,6 +390,7 @@ static int tcg_target_const_match (tcg_target_long val, #define ORC XO31(412) #define EQV XO31(284) #define NAND XO31(476) +#define ISEL XO31( 15) #define MULLD XO31(233) #define MULHD XO31( 73) @@ -443,6 +445,7 @@ static int tcg_target_const_match (tcg_target_long val, #define BT(n, c) (((c)+((n)*4))<<21) #define BA(n, c) (((c)+((n)*4))<<16) #define BB(n, c) (((c)+((n)*4))<<11) +#define BC_(n, c) (((c)+((n)*4))<<6) #define BO_COND_TRUE BO (12) #define BO_COND_FALSE BO ( 4) @@ -468,6 +471,20 @@ static const uint32_t tcg_to_bc[] = { [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE, }; +/* The low bit here is set if the RA and RB fields must be inverted. */ +static const uint32_t tcg_to_isel[] = { + [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ), + [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, + [TCG_COND_LT] = ISEL | BC_(7, CR_LT), + [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, + [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, + [TCG_COND_GT] = ISEL | BC_(7, CR_GT), + [TCG_COND_LTU] = ISEL | BC_(7, CR_LT), + [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, + [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, + [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), +}; + static inline void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { @@ -1124,105 +1141,186 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, } } -static void tcg_out_setcond (TCGContext *s, TCGType type, TCGCond cond, - TCGArg arg0, TCGArg arg1, TCGArg arg2, - int const_arg2) +static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, + TCGReg dst, TCGReg src) { - int crop, sh, arg; + tcg_out32(s, (type == TCG_TYPE_I64 ? CNTLZD : CNTLZW) | RS(dst) | RA(src)); + tcg_out_shri64(s, dst, dst, type == TCG_TYPE_I64 ? 6 : 5); +} - switch (cond) { - case TCG_COND_EQ: - if (const_arg2) { - if (!arg2) { - arg = arg1; - } - else { - arg = 0; - if ((uint16_t) arg2 == arg2) { - tcg_out32(s, XORI | SAI(arg1, 0, arg2)); - } - else { - tcg_out_movi (s, type, 0, arg2); - tcg_out32 (s, XOR | SAB (arg1, 0, 0)); - } - } - } - else { - arg = 0; - tcg_out32 (s, XOR | SAB (arg1, 0, arg2)); - } +static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src) +{ + /* X != 0 implies X + -1 generates a carry. Extra addition + trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */ + if (dst != src) { + tcg_out32(s, ADDIC | TAI(dst, src, -1)); + tcg_out32(s, SUBFE | TAB(dst, dst, src)); + } else { + tcg_out32(s, ADDIC | TAI(0, src, -1)); + tcg_out32(s, SUBFE | TAB(dst, 0, src)); + } +} - if (type == TCG_TYPE_I64) { - tcg_out32 (s, CNTLZD | RS (arg) | RA (0)); - tcg_out_rld (s, RLDICL, arg0, 0, 58, 6); - } - else { - tcg_out32 (s, CNTLZW | RS (arg) | RA (0)); - tcg_out_rlw(s, RLWINM, arg0, 0, 27, 5, 31); +static int tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2, + bool const_arg2) +{ + if (const_arg2) { + if ((uint32_t)arg2 == arg2) { + tcg_out_xori32(s, 0, arg1, arg2); + } else { + tcg_out_movi(s, TCG_TYPE_I64, 0, arg2); + tcg_out32(s, XOR | SAB(arg1, 0, 0)); } - break; + } else { + tcg_out32(s, XOR | SAB(arg1, 0, arg2)); + } + return 0; +} - case TCG_COND_NE: - if (const_arg2) { - if (!arg2) { - arg = arg1; - } - else { - arg = 0; - if ((uint16_t) arg2 == arg2) { - tcg_out32(s, XORI | SAI(arg1, 0, arg2)); - } else { - tcg_out_movi (s, type, 0, arg2); - tcg_out32 (s, XOR | SAB (arg1, 0, 0)); - } +static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGArg arg0, TCGArg arg1, TCGArg arg2, + int const_arg2) +{ + bool invert, swap; + + /* Handle common and trivial cases before handling anything else. */ + if (arg2 == 0) { + switch (cond) { + case TCG_COND_EQ: + tcg_out_setcond_eq0(s, type, arg0, arg1); + return; + case TCG_COND_NE: + if (type == TCG_TYPE_I32) { + tcg_out_ext32u(s, 0, arg1); + arg1 = 0; } + tcg_out_setcond_ne0(s, arg0, arg1); + return; + case TCG_COND_GE: + tcg_out32(s, NOR | SAB(arg1, arg0, arg1)); + arg1 = arg0; + /* FALLTHRU */ + case TCG_COND_LT: + /* Extract the sign bit. */ + tcg_out_rld(s, RLDICL, arg0, arg1, + type == TCG_TYPE_I64 ? 1 : 33, 63); + return; + default: + break; } - else { - arg = 0; - tcg_out32 (s, XOR | SAB (arg1, 0, arg2)); - } + } - if (arg == arg1 && arg1 == arg0) { - tcg_out32(s, ADDIC | TAI(0, arg, -1)); - tcg_out32(s, SUBFE | TAB(arg0, 0, arg)); + /* If we have ISEL, we can implement everything with 3 or 4 insns. + All other cases below are also at least 3 insns, so speed up the + code generator by not considering them and always using ISEL. */ + if (HAVE_ISEL) { + int isel, tab; + + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + + isel = tcg_to_isel[cond]; + + tcg_out_movi(s, type, arg0, 1); + if (isel & 1) { + /* arg0 = (bc ? 0 : 1) */ + tab = TAB(arg0, 0, arg0); + isel &= ~1; + } else { + /* arg0 = (bc ? 1 : 0) */ + tcg_out_movi(s, type, 0, 0); + tab = TAB(arg0, arg0, 0); } - else { - tcg_out32(s, ADDIC | TAI(arg0, arg, -1)); - tcg_out32(s, SUBFE | TAB(arg0, arg0, arg)); + tcg_out32(s, isel | tab); + return; + } + + invert = swap = false; + switch (cond) { + case TCG_COND_EQ: + /* Given that we can ignore the high bits in setcond_eq0, make + sure we go through the XORIS+XORI path in setcond_xor. */ + if (type == TCG_TYPE_I32) { + arg2 = (uint32_t)arg2; } - break; + arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); + tcg_out_setcond_eq0(s, type, arg0, arg1); + return; - case TCG_COND_GT: - case TCG_COND_GTU: - sh = 30; - crop = 0; - goto crtest; + case TCG_COND_NE: + if (type == TCG_TYPE_I32) { + tcg_out32(s, EXTSW | RS(arg1) | RA(0)); + arg1 = 0; + if (const_arg2) { + arg2 = (int32_t)arg2; + } else { + tcg_out32(s, EXTSW | RS(arg2) | RA(arg0)); + arg2 = arg0; + } + } + arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); + tcg_out_setcond_ne0(s, arg0, arg1); + return; - case TCG_COND_LT: + /* Reduce the ordered relations to GT. */ case TCG_COND_LTU: - sh = 29; - crop = 0; - goto crtest; - - case TCG_COND_GE: - case TCG_COND_GEU: - sh = 31; - crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_LT) | BB (7, CR_LT); - goto crtest; - - case TCG_COND_LE: + case TCG_COND_LT: + swap = true; + break; case TCG_COND_LEU: - sh = 31; - crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_GT) | BB (7, CR_GT); - crtest: - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); - if (crop) tcg_out32 (s, crop); - tcg_out32 (s, MFCR | RT (0)); - tcg_out_rlw(s, RLWINM, arg0, 0, sh, 31, 31); + case TCG_COND_LE: + invert = true; + break; + case TCG_COND_GEU: + case TCG_COND_GE: + swap = true, invert = true; + break; + case TCG_COND_GTU: + case TCG_COND_GT: break; - default: - tcg_abort (); + tcg_abort(); + } + + /* In 64-bit mode, carry-out is only generated from the 63-rd bit. + Thus 32-bit inputs must be sign-extended. */ + if (type == TCG_TYPE_I32) { + tcg_out32(s, EXTSW | RS(arg1) | RA(0)); + arg1 = 0; + if (const_arg2) { + arg2 = (int32_t)arg2; + } else { + tcg_out32(s, EXTSW | RS(arg2) | RA(arg0)); + arg2 = arg0; + } + } + if (swap) { + if (const_arg2) { + tcg_out_movi(s, TCG_TYPE_I64, 0, arg2); + arg2 = arg1; + arg1 = 0; + const_arg2 = false; + } else { + int t = arg1; + arg1 = arg2; + arg2 = t; + } + } + + /* X > Y implies Y - X generates a carry-out. This works for both + signed and unsigned comparisons. */ + if (const_arg2 && (int16_t)arg2 == arg2) { + tcg_out32(s, SUBFIC | TAI(0, arg1, arg2)); + } else { + if (const_arg2) { + tcg_out_movi(s, type, 0, arg2); + arg2 = 0; + } + tcg_out32(s, SUBFC | TAB(0, arg1, arg2)); + } + tcg_out_movi(s, type, arg0, 0); + tcg_out32(s, ADDE | TAB(arg0, arg0, arg0)); + if (invert) { + tcg_out32(s, XORI | SAI(arg0, arg0, 1)); } }