diff mbox series

[RFC,v1,14/16] powerpc/32: implement fast entry for syscalls on non BOOKE

Message ID ba23b45ec5e4a0acb040cacc48949f7bc984b0ee.1549630193.git.christophe.leroy@c-s.fr (mailing list archive)
State Superseded
Headers show
Series powerpc/32: Implement fast syscall entry | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success next/apply_patch Successfully applied
snowpatch_ozlabs/checkpatch fail total: 54 errors, 2 warnings, 3 checks, 179 lines checked

Commit Message

Christophe Leroy Feb. 8, 2019, 12:52 p.m. UTC
This patch implements a fast entry for syscalls.

Syscalls don't have to preserve non volatile registers except LR.

This patch then implement a fast entry for syscalls, where
volatile registers get clobbered.

As this entry is dedicated to syscall it always sets MSR_EE
and warns in case MSR_EE was previously off

It also assumes that the call is always from user, system calls are
unexpected from kernel.

The overall series improves null_syscall selftest by 12,5% on an 83xx
and by 17% on a 8xx.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/kernel/entry_32.S | 42 +++++++++++++++++++++
 arch/powerpc/kernel/head_32.S  |  3 +-
 arch/powerpc/kernel/head_32.h  | 85 ++++++++++++++++++++++++++++++++++++++++--
 arch/powerpc/kernel/head_40x.S |  3 +-
 arch/powerpc/kernel/head_8xx.S |  3 +-
 5 files changed, 126 insertions(+), 10 deletions(-)

Comments

Christophe Leroy Feb. 10, 2019, 6:05 p.m. UTC | #1
Le 08/02/2019 à 13:52, Christophe Leroy a écrit :
> This patch implements a fast entry for syscalls.
> 
> Syscalls don't have to preserve non volatile registers except LR.
> 
> This patch then implement a fast entry for syscalls, where
> volatile registers get clobbered.
> 
> As this entry is dedicated to syscall it always sets MSR_EE
> and warns in case MSR_EE was previously off
> 
> It also assumes that the call is always from user, system calls are
> unexpected from kernel.
> 
> The overall series improves null_syscall selftest by 12,5% on an 83xx
> and by 17% on a 8xx.
> 
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>

The following addition is required to avoid build failure on 40x as 
global_dbcr0 is going to be accessed from head_40x.S

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 85f1fc88c237..987f0fafc999 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1247,6 +1247,7 @@ load_dbcr0:

  	.section .bss
  	.align	4
+	.global global_dbcr0
  global_dbcr0:
  	.space	8*NR_CPUS
  	.previous

Christophe

> ---
>   arch/powerpc/kernel/entry_32.S | 42 +++++++++++++++++++++
>   arch/powerpc/kernel/head_32.S  |  3 +-
>   arch/powerpc/kernel/head_32.h  | 85 ++++++++++++++++++++++++++++++++++++++++--
>   arch/powerpc/kernel/head_40x.S |  3 +-
>   arch/powerpc/kernel/head_8xx.S |  3 +-
>   5 files changed, 126 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> index 1e11528d45ae..137bd2103051 100644
> --- a/arch/powerpc/kernel/entry_32.S
> +++ b/arch/powerpc/kernel/entry_32.S
> @@ -335,6 +335,46 @@ stack_ovf:
>   	SYNC
>   	RFI
>   
> +#ifndef CONFIG_BOOKE	/* to be removed once BOOKE uses fast syscall entry */
> +#ifdef CONFIG_TRACE_IRQFLAGS
> +trace_syscall_entry_irq_off:
> +	/*
> +	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
> +	 * If from user mode there is only one stack frame on the stack, and
> +	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
> +	 * stack frame to make trace_hardirqs_on happy.
> +	 *
> +	 */
> +	stwu	r1,-32(r1)
> +
> +	/*
> +	 * Syscall shouldn't happen while interrupts are disabled,
> +	 * so let's do a warning here.
> +	 */
> +0:	trap
> +	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
> +	bl	trace_hardirqs_on
> +
> +	addi	r1,r1,32
> +
> +	/* Now enable for real */
> +	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
> +	mtmsr	r10
> +
> +	REST_GPR(0, r1)
> +	REST_4GPRS(3, r1)
> +	REST_2GPRS(7, r1)
> +	b	DoSyscall
> +#endif /* CONFIG_TRACE_IRQFLAGS */
> +
> +	.globl	transfer_to_syscall
> +transfer_to_syscall:
> +#ifdef CONFIG_TRACE_IRQFLAGS
> +	andi.	r12,r9,MSR_EE
> +	beq-	trace_syscall_entry_irq_off
> +#endif /* CONFIG_TRACE_IRQFLAGS */
> +#endif /* !CONFIG_BOOKE */
> +
>   /*
>    * Handle a system call.
>    */
> @@ -346,9 +386,11 @@ _GLOBAL(DoSyscall)
>   	stw	r3,ORIG_GPR3(r1)
>   	li	r12,0
>   	stw	r12,RESULT(r1)
> +#ifdef CONFIG_BOOKE	/* to be removed once BOOKE uses fast syscall entry */
>   	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
>   	rlwinm	r11,r11,0,4,2
>   	stw	r11,_CCR(r1)
> +#endif
>   
>   #ifdef CONFIG_TRACE_IRQFLAGS
>   	/* Make sure interrupts are enabled */
> diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
> index 3a1df9edf6da..7576e1374a69 100644
> --- a/arch/powerpc/kernel/head_32.S
> +++ b/arch/powerpc/kernel/head_32.S
> @@ -368,8 +368,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
>   	. = 0xc00
>   	DO_KVM  0xc00
>   SystemCall:
> -	EXCEPTION_PROLOG
> -	EXC_XFER_SYS(0xc00, DoSyscall)
> +	SYSCALL_ENTRY	0xc00
>   
>   /* Single step - not used on 601 */
>   	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
> diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
> index 14cb0af2f494..4a692553651f 100644
> --- a/arch/powerpc/kernel/head_32.h
> +++ b/arch/powerpc/kernel/head_32.h
> @@ -73,6 +73,87 @@
>   	SAVE_2GPRS(7, r11)
>   .endm
>   
> +.macro SYSCALL_ENTRY trapno
> +	mfspr	r12,SPRN_SPRG_THREAD
> +	mfcr	r10
> +	lwz	r11,TASK_STACK-THREAD(r12)
> +	mflr	r9
> +	addi	r11,r11,THREAD_SIZE - INT_FRAME_SIZE
> +	rlwinm	r10,r10,0,4,2	/* Clear SO bit in CR */
> +	tophys(r11,r11)
> +	stw	r10,_CCR(r11)		/* save registers */
> +	mfspr	r10,SPRN_SRR0
> +	stw	r9,_LINK(r11)
> +	mfspr	r9,SPRN_SRR1
> +	stw	r1,GPR1(r11)
> +	stw	r1,0(r11)
> +	tovirt(r1,r11)			/* set new kernel sp */
> +	stw	r10,_NIP(r11)
> +#ifdef CONFIG_40x
> +	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
> +#else
> +	LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
> +	MTMSRD(r10)			/* (except for mach check in rtas) */
> +#endif
> +	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
> +	stw	r2,GPR2(r11)
> +	addi	r10,r10,STACK_FRAME_REGS_MARKER@l
> +	stw	r9,_MSR(r11)
> +	li	r2, \trapno + 1
> +	stw	r10,8(r11)
> +	stw	r2,_TRAP(r11)
> +	SAVE_GPR(0, r11)
> +	SAVE_4GPRS(3, r11)
> +	SAVE_2GPRS(7, r11)
> +	addi	r11,r1,STACK_FRAME_OVERHEAD
> +	addi	r2,r12,-THREAD
> +	stw	r11,PT_REGS(r12)
> +#if defined(CONFIG_40x)
> +	/* Check to see if the dbcr0 register is set up to debug.  Use the
> +	   internal debug mode bit to do this. */
> +	lwz	r12,THREAD_DBCR0(r12)
> +	andis.	r12,r12,DBCR0_IDM@h
> +#endif
> +	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
> +#if defined(CONFIG_40x)
> +	beq+	3f
> +	/* From user and task is ptraced - load up global dbcr0 */
> +	li	r12,-1			/* clear all pending debug events */
> +	mtspr	SPRN_DBSR,r12
> +	lis	r11,global_dbcr0@ha
> +	tophys(r11,r11)
> +	addi	r11,r11,global_dbcr0@l
> +	lwz	r12,0(r11)
> +	mtspr	SPRN_DBCR0,r12
> +	lwz	r12,4(r11)
> +	addi	r12,r12,-1
> +	stw	r12,4(r11)
> +#endif
> +
> +3:
> +	tovirt(r2, r2)			/* set r2 to current */
> +	lis	r11, transfer_to_syscall@h
> +	ori	r11, r11, transfer_to_syscall@l
> +#ifdef CONFIG_TRACE_IRQFLAGS
> +	/*
> +	 * If MSR is changing we need to keep interrupts disabled at this point
> +	 * otherwise we might risk taking an interrupt before we tell lockdep
> +	 * they are enabled.
> +	 */
> +	LOAD_MSR_KERNEL(r10, MSR_KERNEL)
> +	rlwimi	r10, r9, 0, MSR_EE
> +#else
> +	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
> +#endif
> +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
> +	mtspr	SPRN_NRI, r0
> +#endif
> +	mtspr	SPRN_SRR1,r10
> +	mtspr	SPRN_SRR0,r11
> +	SYNC
> +	RFI				/* jump to handler, enable MMU */
> +.endm
> +
>   /*
>    * Note: code which follows this uses cr0.eq (set if from kernel),
>    * r11, r12 (SRR0), and r9 (SRR1).
> @@ -119,8 +200,4 @@
>   	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
>   			  ret_from_except)
>   
> -#define EXC_XFER_SYS(n, hdlr)		\
> -	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL | MSR_EE, transfer_to_handler, \
> -			  ret_from_except)
> -
>   #endif /* __HEAD_32_H__ */
> diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
> index 1afab9190147..f5704488c4fc 100644
> --- a/arch/powerpc/kernel/head_40x.S
> +++ b/arch/powerpc/kernel/head_40x.S
> @@ -348,8 +348,7 @@ _ENTRY(saved_ksp_limit)
>   
>   /* 0x0C00 - System Call Exception */
>   	START_EXCEPTION(0x0C00,	SystemCall)
> -	EXCEPTION_PROLOG
> -	EXC_XFER_SYS(0xc00, DoSyscall)
> +	SYSCALL_ENTRY	0xc00
>   
>   	EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD)
>   	EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
> diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
> index 336b9921c0da..362b2150a3a0 100644
> --- a/arch/powerpc/kernel/head_8xx.S
> +++ b/arch/powerpc/kernel/head_8xx.S
> @@ -185,8 +185,7 @@ Alignment:
>   /* System call */
>   	. = 0xc00
>   SystemCall:
> -	EXCEPTION_PROLOG
> -	EXC_XFER_SYS(0xc00, DoSyscall)
> +	SYSCALL_ENTRY	0xc00
>   
>   /* Single step - not used on 601 */
>   	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
> 

---
L'absence de virus dans ce courrier électronique a été vérifiée par le logiciel antivirus Avast.
https://www.avast.com/antivirus
diff mbox series

Patch

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 1e11528d45ae..137bd2103051 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -335,6 +335,46 @@  stack_ovf:
 	SYNC
 	RFI
 
+#ifndef CONFIG_BOOKE	/* to be removed once BOOKE uses fast syscall entry */
+#ifdef CONFIG_TRACE_IRQFLAGS
+trace_syscall_entry_irq_off:
+	/*
+	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
+	 * If from user mode there is only one stack frame on the stack, and
+	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
+	 * stack frame to make trace_hardirqs_on happy.
+	 *
+	 */
+	stwu	r1,-32(r1)
+
+	/*
+	 * Syscall shouldn't happen while interrupts are disabled,
+	 * so let's do a warning here.
+	 */
+0:	trap
+	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
+	bl	trace_hardirqs_on
+
+	addi	r1,r1,32
+
+	/* Now enable for real */
+	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
+	mtmsr	r10
+
+	REST_GPR(0, r1)
+	REST_4GPRS(3, r1)
+	REST_2GPRS(7, r1)
+	b	DoSyscall
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+	.globl	transfer_to_syscall
+transfer_to_syscall:
+#ifdef CONFIG_TRACE_IRQFLAGS
+	andi.	r12,r9,MSR_EE
+	beq-	trace_syscall_entry_irq_off
+#endif /* CONFIG_TRACE_IRQFLAGS */
+#endif /* !CONFIG_BOOKE */
+
 /*
  * Handle a system call.
  */
@@ -346,9 +386,11 @@  _GLOBAL(DoSyscall)
 	stw	r3,ORIG_GPR3(r1)
 	li	r12,0
 	stw	r12,RESULT(r1)
+#ifdef CONFIG_BOOKE	/* to be removed once BOOKE uses fast syscall entry */
 	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
 	rlwinm	r11,r11,0,4,2
 	stw	r11,_CCR(r1)
+#endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 	/* Make sure interrupts are enabled */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 3a1df9edf6da..7576e1374a69 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -368,8 +368,7 @@  END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
 	. = 0xc00
 	DO_KVM  0xc00
 SystemCall:
-	EXCEPTION_PROLOG
-	EXC_XFER_SYS(0xc00, DoSyscall)
+	SYSCALL_ENTRY	0xc00
 
 /* Single step - not used on 601 */
 	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 14cb0af2f494..4a692553651f 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -73,6 +73,87 @@ 
 	SAVE_2GPRS(7, r11)
 .endm
 
+.macro SYSCALL_ENTRY trapno
+	mfspr	r12,SPRN_SPRG_THREAD
+	mfcr	r10
+	lwz	r11,TASK_STACK-THREAD(r12)
+	mflr	r9
+	addi	r11,r11,THREAD_SIZE - INT_FRAME_SIZE
+	rlwinm	r10,r10,0,4,2	/* Clear SO bit in CR */
+	tophys(r11,r11)
+	stw	r10,_CCR(r11)		/* save registers */
+	mfspr	r10,SPRN_SRR0
+	stw	r9,_LINK(r11)
+	mfspr	r9,SPRN_SRR1
+	stw	r1,GPR1(r11)
+	stw	r1,0(r11)
+	tovirt(r1,r11)			/* set new kernel sp */
+	stw	r10,_NIP(r11)
+#ifdef CONFIG_40x
+	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
+#else
+	LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
+	MTMSRD(r10)			/* (except for mach check in rtas) */
+#endif
+	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+	stw	r2,GPR2(r11)
+	addi	r10,r10,STACK_FRAME_REGS_MARKER@l
+	stw	r9,_MSR(r11)
+	li	r2, \trapno + 1
+	stw	r10,8(r11)
+	stw	r2,_TRAP(r11)
+	SAVE_GPR(0, r11)
+	SAVE_4GPRS(3, r11)
+	SAVE_2GPRS(7, r11)
+	addi	r11,r1,STACK_FRAME_OVERHEAD
+	addi	r2,r12,-THREAD
+	stw	r11,PT_REGS(r12)
+#if defined(CONFIG_40x)
+	/* Check to see if the dbcr0 register is set up to debug.  Use the
+	   internal debug mode bit to do this. */
+	lwz	r12,THREAD_DBCR0(r12)
+	andis.	r12,r12,DBCR0_IDM@h
+#endif
+	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
+#if defined(CONFIG_40x)
+	beq+	3f
+	/* From user and task is ptraced - load up global dbcr0 */
+	li	r12,-1			/* clear all pending debug events */
+	mtspr	SPRN_DBSR,r12
+	lis	r11,global_dbcr0@ha
+	tophys(r11,r11)
+	addi	r11,r11,global_dbcr0@l
+	lwz	r12,0(r11)
+	mtspr	SPRN_DBCR0,r12
+	lwz	r12,4(r11)
+	addi	r12,r12,-1
+	stw	r12,4(r11)
+#endif
+
+3:
+	tovirt(r2, r2)			/* set r2 to current */
+	lis	r11, transfer_to_syscall@h
+	ori	r11, r11, transfer_to_syscall@l
+#ifdef CONFIG_TRACE_IRQFLAGS
+	/*
+	 * If MSR is changing we need to keep interrupts disabled at this point
+	 * otherwise we might risk taking an interrupt before we tell lockdep
+	 * they are enabled.
+	 */
+	LOAD_MSR_KERNEL(r10, MSR_KERNEL)
+	rlwimi	r10, r9, 0, MSR_EE
+#else
+	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
+#endif
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
+	mtspr	SPRN_NRI, r0
+#endif
+	mtspr	SPRN_SRR1,r10
+	mtspr	SPRN_SRR0,r11
+	SYNC
+	RFI				/* jump to handler, enable MMU */
+.endm
+
 /*
  * Note: code which follows this uses cr0.eq (set if from kernel),
  * r11, r12 (SRR0), and r9 (SRR1).
@@ -119,8 +200,4 @@ 
 	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
 			  ret_from_except)
 
-#define EXC_XFER_SYS(n, hdlr)		\
-	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL | MSR_EE, transfer_to_handler, \
-			  ret_from_except)
-
 #endif /* __HEAD_32_H__ */
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 1afab9190147..f5704488c4fc 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -348,8 +348,7 @@  _ENTRY(saved_ksp_limit)
 
 /* 0x0C00 - System Call Exception */
 	START_EXCEPTION(0x0C00,	SystemCall)
-	EXCEPTION_PROLOG
-	EXC_XFER_SYS(0xc00, DoSyscall)
+	SYSCALL_ENTRY	0xc00
 
 	EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD)
 	EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 336b9921c0da..362b2150a3a0 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -185,8 +185,7 @@  Alignment:
 /* System call */
 	. = 0xc00
 SystemCall:
-	EXCEPTION_PROLOG
-	EXC_XFER_SYS(0xc00, DoSyscall)
+	SYSCALL_ENTRY	0xc00
 
 /* Single step - not used on 601 */
 	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)