From patchwork Sun Oct 28 06:13:53 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Miller X-Patchwork-Id: 194649 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 298E62C008D for ; Sun, 28 Oct 2012 17:13:56 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751441Ab2J1GNz (ORCPT ); Sun, 28 Oct 2012 02:13:55 -0400 Received: from shards.monkeyblade.net ([149.20.54.216]:37152 "EHLO shards.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751374Ab2J1GNy (ORCPT ); Sun, 28 Oct 2012 02:13:54 -0400 Received: from localhost (cpe-66-108-116-58.nyc.res.rr.com [66.108.116.58]) by shards.monkeyblade.net (Postfix) with ESMTPSA id 374B3584BA6 for ; Sat, 27 Oct 2012 23:13:56 -0700 (PDT) Date: Sun, 28 Oct 2012 02:13:53 -0400 (EDT) Message-Id: <20121028.021353.459456106731235403.davem@davemloft.net> To: sparclinux@vger.kernel.org Subject: [PATCH 2/2] sparc64: Use pause instruction when available. From: David Miller X-Mailer: Mew version 6.5 on Emacs 24.1 / Mule 6.0 (HANACHIRUSATO) Mime-Version: 1.0 Sender: sparclinux-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: sparclinux@vger.kernel.org In atomic backoff and cpu_relax(), use the pause instruction found on SPARC-T4 and later. It makes the cpu strand unselectable for the given number of cycles, unless an intervening disrupting trap occurs. Signed-off-by: David S. Miller --- arch/sparc/include/asm/backoff.h | 32 +++++++++++++++++++------------- arch/sparc/include/asm/processor_64.h | 13 ++++++++++--- arch/sparc/kernel/entry.h | 7 +++++++ arch/sparc/kernel/setup_64.c | 21 +++++++++++++++++++++ arch/sparc/kernel/vmlinux.lds.S | 5 +++++ 5 files changed, 62 insertions(+), 16 deletions(-) diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h index 64b077b..20f01df 100644 --- a/arch/sparc/include/asm/backoff.h +++ b/arch/sparc/include/asm/backoff.h @@ -11,19 +11,25 @@ #define BACKOFF_LABEL(spin_label, continue_label) \ spin_label -#define BACKOFF_SPIN(reg, tmp, label) \ - mov reg, tmp; \ -88: rd %ccr, %g0; \ - rd %ccr, %g0; \ - rd %ccr, %g0; \ - brnz,pt tmp, 88b; \ - sub tmp, 1, tmp; \ - set BACKOFF_LIMIT, tmp; \ - cmp reg, tmp; \ - bg,pn %xcc, label; \ - nop; \ - ba,pt %xcc, label; \ - sllx reg, 1, reg; +#define BACKOFF_SPIN(reg, tmp, label) \ + mov reg, tmp; \ +88: rd %ccr, %g0; \ + rd %ccr, %g0; \ + rd %ccr, %g0; \ + .section .pause_patch,"ax"; \ + .word 88b; \ + sllx tmp, 7, tmp; \ + wr tmp, 0, %asr27; \ + clr tmp; \ + .previous; \ + brnz,pt tmp, 88b; \ + sub tmp, 1, tmp; \ + set BACKOFF_LIMIT, tmp; \ + cmp reg, tmp; \ + bg,pn %xcc, label; \ + nop; \ + ba,pt %xcc, label; \ + sllx reg, 1, reg; #else diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 9865634..9cdf52e 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -196,9 +196,16 @@ extern unsigned long get_wchan(struct task_struct *task); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) -#define cpu_relax() asm volatile("rd %%ccr, %%g0\n\t" \ - "rd %%ccr, %%g0\n\t" \ - "rd %%ccr, %%g0" \ +#define cpu_relax() asm volatile("\n99:\n\t" \ + "rd %%ccr, %%g0\n\t" \ + "rd %%ccr, %%g0\n\t" \ + "rd %%ccr, %%g0\n\t" \ + ".section .pause_patch,\"ax\"\n\t"\ + ".word 99b\n\t" \ + "wr %%g0, 128, %%asr27\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".previous" \ ::: "memory") /* Prefetch support. This is tuned for UltraSPARC-III and later. diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 0c218e4..51742df 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -59,6 +59,13 @@ struct popc_6insn_patch_entry { extern struct popc_6insn_patch_entry __popc_6insn_patch, __popc_6insn_patch_end; +struct pause_patch_entry { + unsigned int addr; + unsigned int insns[3]; +}; +extern struct pause_patch_entry __pause_patch, + __pause_patch_end; + extern void __init per_cpu_patch(void); extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, struct sun4v_1insn_patch_entry *); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 0800e71..b45cff4 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -316,6 +316,25 @@ static void __init popc_patch(void) } } +static void __init pause_patch(void) +{ + struct pause_patch_entry *p; + + p = &__pause_patch; + while (p < &__pause_patch_end) { + unsigned long i, addr = p->addr; + + for (i = 0; i < 3; i++) { + *(unsigned int *) (addr + (i * 4)) = p->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p++; + } +} + #ifdef CONFIG_SMP void __init boot_cpu_id_too_large(int cpu) { @@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void) if (sparc64_elf_hwcap & AV_SPARC_POPC) popc_patch(); + if (sparc64_elf_hwcap & AV_SPARC_PAUSE) + pause_patch(); } void __init setup_arch(char **cmdline_p) diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 89c2c29..847f9f7 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -132,6 +132,11 @@ SECTIONS *(.popc_6insn_patch) __popc_6insn_patch_end = .; } + .pause_patch : { + __pause_patch = .; + *(.pause_patch) + __pause_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) . = ALIGN(PAGE_SIZE);