diff mbox series

[2/5] powerpc/futex: GUAP support for futex ops

Message ID 20181026063513.30806-3-ruscur@russell.cc (mailing list archive)
State Superseded
Headers show
Series Guarded Userspace Access Prevention on Radix | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success next/apply_patch Successfully applied
snowpatch_ozlabs/checkpatch success Test checkpatch on branch next

Commit Message

Russell Currey Oct. 26, 2018, 6:35 a.m. UTC
Wrap the futex operations in GUAP locks and unlocks.

Signed-off-by: Russell Currey <ruscur@russell.cc>
---
 arch/powerpc/include/asm/futex.h | 4 ++++
 1 file changed, 4 insertions(+)

Comments

Christophe Leroy Oct. 26, 2018, 4:32 p.m. UTC | #1
Russell Currey <ruscur@russell.cc> a écrit :

> Wrap the futex operations in GUAP locks and unlocks.

Does it means futex doesn't work anymore once only patch 1 is applied  
? If so, then you should split patch 1 in two parts and reorder  
patches so that guap can only be activated once all necessary changes  
are done. Otherwise the serie won't be bisectable

Christophe

>
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> ---
>  arch/powerpc/include/asm/futex.h | 4 ++++
>  1 file changed, 4 insertions(+)
>
> diff --git a/arch/powerpc/include/asm/futex.h  
> b/arch/powerpc/include/asm/futex.h
> index 94542776a62d..3aed640ee9ef 100644
> --- a/arch/powerpc/include/asm/futex.h
> +++ b/arch/powerpc/include/asm/futex.h
> @@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int  
> op, int oparg, int *oval,
>  {
>  	int oldval = 0, ret;
>
> +	unlock_user_access();
>  	pagefault_disable();
>
>  	switch (op) {
> @@ -62,6 +63,7 @@ static inline int arch_futex_atomic_op_inuser(int  
> op, int oparg, int *oval,
>  	if (!ret)
>  		*oval = oldval;
>
> +	lock_user_access();
>  	return ret;
>  }
>
> @@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
>  	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
>  		return -EFAULT;
>
> +	unlock_user_access();
>          __asm__ __volatile__ (
>          PPC_ATOMIC_ENTRY_BARRIER
>  "1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
> @@ -95,6 +98,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
>          : "cc", "memory");
>
>  	*uval = prev;
> +	lock_user_access();
>          return ret;
>  }
>
> --
> 2.19.1
Russell Currey Oct. 29, 2018, 1:08 a.m. UTC | #2
On Fri, 2018-10-26 at 18:32 +0200, LEROY Christophe wrote:
> Russell Currey <ruscur@russell.cc> a écrit :
> 
> > Wrap the futex operations in GUAP locks and unlocks.
> 
> Does it means futex doesn't work anymore once only patch 1 is
> applied  
> ? If so, then you should split patch 1 in two parts and reorder  
> patches so that guap can only be activated once all necessary
> changes  
> are done. Otherwise the serie won't be bisectable

Yeah, I agree.  I just wanted to remove some amount of breadth from
what already is one gigantic patch.  Bisectability is more important
than that, however.

- Russell

> 
> Christophe
> 
> > Signed-off-by: Russell Currey <ruscur@russell.cc>
> > ---
> >  arch/powerpc/include/asm/futex.h | 4 ++++
> >  1 file changed, 4 insertions(+)
> > 
> > diff --git a/arch/powerpc/include/asm/futex.h  
> > b/arch/powerpc/include/asm/futex.h
> > index 94542776a62d..3aed640ee9ef 100644
> > --- a/arch/powerpc/include/asm/futex.h
> > +++ b/arch/powerpc/include/asm/futex.h
> > @@ -35,6 +35,7 @@ static inline int
> > arch_futex_atomic_op_inuser(int  
> > op, int oparg, int *oval,
> >  {
> >  	int oldval = 0, ret;
> > 
> > +	unlock_user_access();
> >  	pagefault_disable();
> > 
> >  	switch (op) {
> > @@ -62,6 +63,7 @@ static inline int
> > arch_futex_atomic_op_inuser(int  
> > op, int oparg, int *oval,
> >  	if (!ret)
> >  		*oval = oldval;
> > 
> > +	lock_user_access();
> >  	return ret;
> >  }
> > 
> > @@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32
> > __user *uaddr,
> >  	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
> >  		return -EFAULT;
> > 
> > +	unlock_user_access();
> >          __asm__ __volatile__ (
> >          PPC_ATOMIC_ENTRY_BARRIER
> >  "1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
> > @@ -95,6 +98,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32
> > __user *uaddr,
> >          : "cc", "memory");
> > 
> >  	*uval = prev;
> > +	lock_user_access();
> >          return ret;
> >  }
> > 
> > --
> > 2.19.1
> 
>
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 94542776a62d..3aed640ee9ef 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -35,6 +35,7 @@  static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
 {
 	int oldval = 0, ret;
 
+	unlock_user_access();
 	pagefault_disable();
 
 	switch (op) {
@@ -62,6 +63,7 @@  static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
 	if (!ret)
 		*oval = oldval;
 
+	lock_user_access();
 	return ret;
 }
 
@@ -75,6 +77,7 @@  futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	unlock_user_access();
         __asm__ __volatile__ (
         PPC_ATOMIC_ENTRY_BARRIER
 "1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
@@ -95,6 +98,7 @@  futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
         : "cc", "memory");
 
 	*uval = prev;
+	lock_user_access();
         return ret;
 }