diff mbox series

[3/8] powerpc: Mark functions called inside uaccess blocks w/ 'notrace'

Message ID 20201015150159.28933-4-cmr@codefail.de (mailing list archive)
State Superseded
Headers show
Series Improve signal performance on PPC64 with KUAP | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/merge (118be7377c97e35c33819bcb3bbbae5a42a4ac43)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/next (a2d0230b91f7e23ceb5d8fb6a9799f30517ec33a)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linus/master (3e4fb4346c781068610d03c12b16c0cfb0fd24a3)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/fixes (0460534b532e5518c657c7d6492b9337d975eaa3)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linux-next (03d992bd2de6c7f2c9bbd4260ff104c0926ce3ff)
snowpatch_ozlabs/apply_patch fail Failed to apply to any branch

Commit Message

Christopher M. Riedl Oct. 15, 2020, 3:01 p.m. UTC
Functions called between user_*_access_begin() and user_*_access_end()
should be either inlined or marked 'notrace' to prevent leaving
userspace access exposed. Mark any such functions relevant to signal
handling so that subsequent patches can call them inside uaccess blocks.

Signed-off-by: Christopher M. Riedl <cmr@codefail.de>
---
 arch/powerpc/kernel/process.c | 20 ++++++++++----------
 arch/powerpc/mm/mem.c         |  4 ++--
 2 files changed, 12 insertions(+), 12 deletions(-)

Comments

Christoph Hellwig Oct. 16, 2020, 6:56 a.m. UTC | #1
On Thu, Oct 15, 2020 at 10:01:54AM -0500, Christopher M. Riedl wrote:
> Functions called between user_*_access_begin() and user_*_access_end()
> should be either inlined or marked 'notrace' to prevent leaving
> userspace access exposed. Mark any such functions relevant to signal
> handling so that subsequent patches can call them inside uaccess blocks.

I don't think running this much code with uaccess enabled is a good
idea.  Please refactor the code to reduce the criticial sections with
uaccess enabled.

Btw, does powerpc already have the objtool validation that we don't
accidentally jump out of unsafe uaccess critical sections?
Christophe Leroy Oct. 16, 2020, 7:02 a.m. UTC | #2
Le 15/10/2020 à 17:01, Christopher M. Riedl a écrit :
> Functions called between user_*_access_begin() and user_*_access_end()
> should be either inlined or marked 'notrace' to prevent leaving
> userspace access exposed. Mark any such functions relevant to signal
> handling so that subsequent patches can call them inside uaccess blocks.

Is it enough to mark it "notrace" ? I see that when I activate KASAN, there are still KASAN calls in 
those functions.

In my series for 32 bits, I re-ordered stuff in order to do all those calls before doing the 
_access_begin(), can't you do the same on PPC64 ? (See 
https://patchwork.ozlabs.org/project/linuxppc-dev/patch/f6eac65781b4a57220477c8864bca2b57f29a5d5.1597770847.git.christophe.leroy@csgroup.eu/)

Christophe

> 
> Signed-off-by: Christopher M. Riedl <cmr@codefail.de>
> ---
>   arch/powerpc/kernel/process.c | 20 ++++++++++----------
>   arch/powerpc/mm/mem.c         |  4 ++--
>   2 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index ba2c987b8403..bf5d9654bd2c 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -84,7 +84,7 @@ extern unsigned long _get_SP(void);
>    */
>   bool tm_suspend_disabled __ro_after_init = false;
>   
> -static void check_if_tm_restore_required(struct task_struct *tsk)
> +static void notrace check_if_tm_restore_required(struct task_struct *tsk)
>   {
>   	/*
>   	 * If we are saving the current thread's registers, and the
> @@ -151,7 +151,7 @@ void notrace __msr_check_and_clear(unsigned long bits)
>   EXPORT_SYMBOL(__msr_check_and_clear);
>   
>   #ifdef CONFIG_PPC_FPU
> -static void __giveup_fpu(struct task_struct *tsk)
> +static void notrace __giveup_fpu(struct task_struct *tsk)
>   {
>   	unsigned long msr;
>   
> @@ -163,7 +163,7 @@ static void __giveup_fpu(struct task_struct *tsk)
>   	tsk->thread.regs->msr = msr;
>   }
>   
> -void giveup_fpu(struct task_struct *tsk)
> +void notrace giveup_fpu(struct task_struct *tsk)
>   {
>   	check_if_tm_restore_required(tsk);
>   
> @@ -177,7 +177,7 @@ EXPORT_SYMBOL(giveup_fpu);
>    * Make sure the floating-point register state in the
>    * the thread_struct is up to date for task tsk.
>    */
> -void flush_fp_to_thread(struct task_struct *tsk)
> +void notrace flush_fp_to_thread(struct task_struct *tsk)
>   {
>   	if (tsk->thread.regs) {
>   		/*
> @@ -234,7 +234,7 @@ static inline void __giveup_fpu(struct task_struct *tsk) { }
>   #endif /* CONFIG_PPC_FPU */
>   
>   #ifdef CONFIG_ALTIVEC
> -static void __giveup_altivec(struct task_struct *tsk)
> +static void notrace __giveup_altivec(struct task_struct *tsk)
>   {
>   	unsigned long msr;
>   
> @@ -246,7 +246,7 @@ static void __giveup_altivec(struct task_struct *tsk)
>   	tsk->thread.regs->msr = msr;
>   }
>   
> -void giveup_altivec(struct task_struct *tsk)
> +void notrace giveup_altivec(struct task_struct *tsk)
>   {
>   	check_if_tm_restore_required(tsk);
>   
> @@ -285,7 +285,7 @@ EXPORT_SYMBOL(enable_kernel_altivec);
>    * Make sure the VMX/Altivec register state in the
>    * the thread_struct is up to date for task tsk.
>    */
> -void flush_altivec_to_thread(struct task_struct *tsk)
> +void notrace flush_altivec_to_thread(struct task_struct *tsk)
>   {
>   	if (tsk->thread.regs) {
>   		preempt_disable();
> @@ -300,7 +300,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
>   #endif /* CONFIG_ALTIVEC */
>   
>   #ifdef CONFIG_VSX
> -static void __giveup_vsx(struct task_struct *tsk)
> +static void notrace __giveup_vsx(struct task_struct *tsk)
>   {
>   	unsigned long msr = tsk->thread.regs->msr;
>   
> @@ -317,7 +317,7 @@ static void __giveup_vsx(struct task_struct *tsk)
>   		__giveup_altivec(tsk);
>   }
>   
> -static void giveup_vsx(struct task_struct *tsk)
> +static void notrace giveup_vsx(struct task_struct *tsk)
>   {
>   	check_if_tm_restore_required(tsk);
>   
> @@ -352,7 +352,7 @@ void enable_kernel_vsx(void)
>   }
>   EXPORT_SYMBOL(enable_kernel_vsx);
>   
> -void flush_vsx_to_thread(struct task_struct *tsk)
> +void notrace flush_vsx_to_thread(struct task_struct *tsk)
>   {
>   	if (tsk->thread.regs) {
>   		preempt_disable();
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index ddc32cc1b6cf..da2345a2abc6 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -378,7 +378,7 @@ static inline bool flush_coherent_icache(unsigned long addr)
>    * @start: the start address
>    * @stop: the stop address (exclusive)
>    */
> -static void invalidate_icache_range(unsigned long start, unsigned long stop)
> +static void notrace invalidate_icache_range(unsigned long start, unsigned long stop)
>   {
>   	unsigned long shift = l1_icache_shift();
>   	unsigned long bytes = l1_icache_bytes();
> @@ -402,7 +402,7 @@ static void invalidate_icache_range(unsigned long start, unsigned long stop)
>    * @start: the start address
>    * @stop: the stop address (exclusive)
>    */
> -void flush_icache_range(unsigned long start, unsigned long stop)
> +void notrace flush_icache_range(unsigned long start, unsigned long stop)
>   {
>   	if (flush_coherent_icache(start))
>   		return;
>
Peter Zijlstra Oct. 16, 2020, 9:41 a.m. UTC | #3
On Fri, Oct 16, 2020 at 07:56:16AM +0100, Christoph Hellwig wrote:
> On Thu, Oct 15, 2020 at 10:01:54AM -0500, Christopher M. Riedl wrote:
> > Functions called between user_*_access_begin() and user_*_access_end()
> > should be either inlined or marked 'notrace' to prevent leaving
> > userspace access exposed. Mark any such functions relevant to signal
> > handling so that subsequent patches can call them inside uaccess blocks.
> 
> I don't think running this much code with uaccess enabled is a good
> idea.  Please refactor the code to reduce the criticial sections with
> uaccess enabled.
> 
> Btw, does powerpc already have the objtool validation that we don't
> accidentally jump out of unsafe uaccess critical sections?

It does not, there was some effort on that a while ago, but I suspect
they're waiting for the ARM64 effort to land and build on that.
Christopher M. Riedl Oct. 20, 2020, 1:59 a.m. UTC | #4
On Fri Oct 16, 2020 at 4:02 AM CDT, Christophe Leroy wrote:
>
>
> Le 15/10/2020 à 17:01, Christopher M. Riedl a écrit :
> > Functions called between user_*_access_begin() and user_*_access_end()
> > should be either inlined or marked 'notrace' to prevent leaving
> > userspace access exposed. Mark any such functions relevant to signal
> > handling so that subsequent patches can call them inside uaccess blocks.
>
> Is it enough to mark it "notrace" ? I see that when I activate KASAN,
> there are still KASAN calls in
> those functions.
>

Maybe not enough after all :(

> In my series for 32 bits, I re-ordered stuff in order to do all those
> calls before doing the
> _access_begin(), can't you do the same on PPC64 ? (See
> https://patchwork.ozlabs.org/project/linuxppc-dev/patch/f6eac65781b4a57220477c8864bca2b57f29a5d5.1597770847.git.christophe.leroy@csgroup.eu/)
>

Yes, I will give this another shot in the next spin.

> Christophe
>
> > 
> > Signed-off-by: Christopher M. Riedl <cmr@codefail.de>
> > ---
> >   arch/powerpc/kernel/process.c | 20 ++++++++++----------
> >   arch/powerpc/mm/mem.c         |  4 ++--
> >   2 files changed, 12 insertions(+), 12 deletions(-)
> > 
> > diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> > index ba2c987b8403..bf5d9654bd2c 100644
> > --- a/arch/powerpc/kernel/process.c
> > +++ b/arch/powerpc/kernel/process.c
> > @@ -84,7 +84,7 @@ extern unsigned long _get_SP(void);
> >    */
> >   bool tm_suspend_disabled __ro_after_init = false;
> >   
> > -static void check_if_tm_restore_required(struct task_struct *tsk)
> > +static void notrace check_if_tm_restore_required(struct task_struct *tsk)
> >   {
> >   	/*
> >   	 * If we are saving the current thread's registers, and the
> > @@ -151,7 +151,7 @@ void notrace __msr_check_and_clear(unsigned long bits)
> >   EXPORT_SYMBOL(__msr_check_and_clear);
> >   
> >   #ifdef CONFIG_PPC_FPU
> > -static void __giveup_fpu(struct task_struct *tsk)
> > +static void notrace __giveup_fpu(struct task_struct *tsk)
> >   {
> >   	unsigned long msr;
> >   
> > @@ -163,7 +163,7 @@ static void __giveup_fpu(struct task_struct *tsk)
> >   	tsk->thread.regs->msr = msr;
> >   }
> >   
> > -void giveup_fpu(struct task_struct *tsk)
> > +void notrace giveup_fpu(struct task_struct *tsk)
> >   {
> >   	check_if_tm_restore_required(tsk);
> >   
> > @@ -177,7 +177,7 @@ EXPORT_SYMBOL(giveup_fpu);
> >    * Make sure the floating-point register state in the
> >    * the thread_struct is up to date for task tsk.
> >    */
> > -void flush_fp_to_thread(struct task_struct *tsk)
> > +void notrace flush_fp_to_thread(struct task_struct *tsk)
> >   {
> >   	if (tsk->thread.regs) {
> >   		/*
> > @@ -234,7 +234,7 @@ static inline void __giveup_fpu(struct task_struct *tsk) { }
> >   #endif /* CONFIG_PPC_FPU */
> >   
> >   #ifdef CONFIG_ALTIVEC
> > -static void __giveup_altivec(struct task_struct *tsk)
> > +static void notrace __giveup_altivec(struct task_struct *tsk)
> >   {
> >   	unsigned long msr;
> >   
> > @@ -246,7 +246,7 @@ static void __giveup_altivec(struct task_struct *tsk)
> >   	tsk->thread.regs->msr = msr;
> >   }
> >   
> > -void giveup_altivec(struct task_struct *tsk)
> > +void notrace giveup_altivec(struct task_struct *tsk)
> >   {
> >   	check_if_tm_restore_required(tsk);
> >   
> > @@ -285,7 +285,7 @@ EXPORT_SYMBOL(enable_kernel_altivec);
> >    * Make sure the VMX/Altivec register state in the
> >    * the thread_struct is up to date for task tsk.
> >    */
> > -void flush_altivec_to_thread(struct task_struct *tsk)
> > +void notrace flush_altivec_to_thread(struct task_struct *tsk)
> >   {
> >   	if (tsk->thread.regs) {
> >   		preempt_disable();
> > @@ -300,7 +300,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
> >   #endif /* CONFIG_ALTIVEC */
> >   
> >   #ifdef CONFIG_VSX
> > -static void __giveup_vsx(struct task_struct *tsk)
> > +static void notrace __giveup_vsx(struct task_struct *tsk)
> >   {
> >   	unsigned long msr = tsk->thread.regs->msr;
> >   
> > @@ -317,7 +317,7 @@ static void __giveup_vsx(struct task_struct *tsk)
> >   		__giveup_altivec(tsk);
> >   }
> >   
> > -static void giveup_vsx(struct task_struct *tsk)
> > +static void notrace giveup_vsx(struct task_struct *tsk)
> >   {
> >   	check_if_tm_restore_required(tsk);
> >   
> > @@ -352,7 +352,7 @@ void enable_kernel_vsx(void)
> >   }
> >   EXPORT_SYMBOL(enable_kernel_vsx);
> >   
> > -void flush_vsx_to_thread(struct task_struct *tsk)
> > +void notrace flush_vsx_to_thread(struct task_struct *tsk)
> >   {
> >   	if (tsk->thread.regs) {
> >   		preempt_disable();
> > diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> > index ddc32cc1b6cf..da2345a2abc6 100644
> > --- a/arch/powerpc/mm/mem.c
> > +++ b/arch/powerpc/mm/mem.c
> > @@ -378,7 +378,7 @@ static inline bool flush_coherent_icache(unsigned long addr)
> >    * @start: the start address
> >    * @stop: the stop address (exclusive)
> >    */
> > -static void invalidate_icache_range(unsigned long start, unsigned long stop)
> > +static void notrace invalidate_icache_range(unsigned long start, unsigned long stop)
> >   {
> >   	unsigned long shift = l1_icache_shift();
> >   	unsigned long bytes = l1_icache_bytes();
> > @@ -402,7 +402,7 @@ static void invalidate_icache_range(unsigned long start, unsigned long stop)
> >    * @start: the start address
> >    * @stop: the stop address (exclusive)
> >    */
> > -void flush_icache_range(unsigned long start, unsigned long stop)
> > +void notrace flush_icache_range(unsigned long start, unsigned long stop)
> >   {
> >   	if (flush_coherent_icache(start))
> >   		return;
> >
Michael Ellerman Oct. 20, 2020, 7:34 a.m. UTC | #5
Peter Zijlstra <peterz@infradead.org> writes:
> On Fri, Oct 16, 2020 at 07:56:16AM +0100, Christoph Hellwig wrote:
>> On Thu, Oct 15, 2020 at 10:01:54AM -0500, Christopher M. Riedl wrote:
>> > Functions called between user_*_access_begin() and user_*_access_end()
>> > should be either inlined or marked 'notrace' to prevent leaving
>> > userspace access exposed. Mark any such functions relevant to signal
>> > handling so that subsequent patches can call them inside uaccess blocks.
>> 
>> I don't think running this much code with uaccess enabled is a good
>> idea.  Please refactor the code to reduce the criticial sections with
>> uaccess enabled.
>> 
>> Btw, does powerpc already have the objtool validation that we don't
>> accidentally jump out of unsafe uaccess critical sections?
>
> It does not, there was some effort on that a while ago, but I suspect
> they're waiting for the ARM64 effort to land and build on that.

Right, we don't have objtool support.

We would definitely like objtool support at least for this uaccess
checking, I'm sure we have some escapes.

There was someone working on it in their own-time but last I heard that
was still WIP.

I didn't realise the ARM64 support was still not merged, so yeah having
that land first would probably simplify things, but we still need
someone who has time to work on it.

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ba2c987b8403..bf5d9654bd2c 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -84,7 +84,7 @@  extern unsigned long _get_SP(void);
  */
 bool tm_suspend_disabled __ro_after_init = false;
 
-static void check_if_tm_restore_required(struct task_struct *tsk)
+static void notrace check_if_tm_restore_required(struct task_struct *tsk)
 {
 	/*
 	 * If we are saving the current thread's registers, and the
@@ -151,7 +151,7 @@  void notrace __msr_check_and_clear(unsigned long bits)
 EXPORT_SYMBOL(__msr_check_and_clear);
 
 #ifdef CONFIG_PPC_FPU
-static void __giveup_fpu(struct task_struct *tsk)
+static void notrace __giveup_fpu(struct task_struct *tsk)
 {
 	unsigned long msr;
 
@@ -163,7 +163,7 @@  static void __giveup_fpu(struct task_struct *tsk)
 	tsk->thread.regs->msr = msr;
 }
 
-void giveup_fpu(struct task_struct *tsk)
+void notrace giveup_fpu(struct task_struct *tsk)
 {
 	check_if_tm_restore_required(tsk);
 
@@ -177,7 +177,7 @@  EXPORT_SYMBOL(giveup_fpu);
  * Make sure the floating-point register state in the
  * the thread_struct is up to date for task tsk.
  */
-void flush_fp_to_thread(struct task_struct *tsk)
+void notrace flush_fp_to_thread(struct task_struct *tsk)
 {
 	if (tsk->thread.regs) {
 		/*
@@ -234,7 +234,7 @@  static inline void __giveup_fpu(struct task_struct *tsk) { }
 #endif /* CONFIG_PPC_FPU */
 
 #ifdef CONFIG_ALTIVEC
-static void __giveup_altivec(struct task_struct *tsk)
+static void notrace __giveup_altivec(struct task_struct *tsk)
 {
 	unsigned long msr;
 
@@ -246,7 +246,7 @@  static void __giveup_altivec(struct task_struct *tsk)
 	tsk->thread.regs->msr = msr;
 }
 
-void giveup_altivec(struct task_struct *tsk)
+void notrace giveup_altivec(struct task_struct *tsk)
 {
 	check_if_tm_restore_required(tsk);
 
@@ -285,7 +285,7 @@  EXPORT_SYMBOL(enable_kernel_altivec);
  * Make sure the VMX/Altivec register state in the
  * the thread_struct is up to date for task tsk.
  */
-void flush_altivec_to_thread(struct task_struct *tsk)
+void notrace flush_altivec_to_thread(struct task_struct *tsk)
 {
 	if (tsk->thread.regs) {
 		preempt_disable();
@@ -300,7 +300,7 @@  EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-static void __giveup_vsx(struct task_struct *tsk)
+static void notrace __giveup_vsx(struct task_struct *tsk)
 {
 	unsigned long msr = tsk->thread.regs->msr;
 
@@ -317,7 +317,7 @@  static void __giveup_vsx(struct task_struct *tsk)
 		__giveup_altivec(tsk);
 }
 
-static void giveup_vsx(struct task_struct *tsk)
+static void notrace giveup_vsx(struct task_struct *tsk)
 {
 	check_if_tm_restore_required(tsk);
 
@@ -352,7 +352,7 @@  void enable_kernel_vsx(void)
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
 
-void flush_vsx_to_thread(struct task_struct *tsk)
+void notrace flush_vsx_to_thread(struct task_struct *tsk)
 {
 	if (tsk->thread.regs) {
 		preempt_disable();
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index ddc32cc1b6cf..da2345a2abc6 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -378,7 +378,7 @@  static inline bool flush_coherent_icache(unsigned long addr)
  * @start: the start address
  * @stop: the stop address (exclusive)
  */
-static void invalidate_icache_range(unsigned long start, unsigned long stop)
+static void notrace invalidate_icache_range(unsigned long start, unsigned long stop)
 {
 	unsigned long shift = l1_icache_shift();
 	unsigned long bytes = l1_icache_bytes();
@@ -402,7 +402,7 @@  static void invalidate_icache_range(unsigned long start, unsigned long stop)
  * @start: the start address
  * @stop: the stop address (exclusive)
  */
-void flush_icache_range(unsigned long start, unsigned long stop)
+void notrace flush_icache_range(unsigned long start, unsigned long stop)
 {
 	if (flush_coherent_icache(start))
 		return;