diff mbox series

[12/25] powerpc: ability to associate pkey to a vma

Message ID 1504910713-7094-21-git-send-email-linuxram@us.ibm.com (mailing list archive)
State Changes Requested
Headers show
Series powerpc: Free up RPAGE_RSV bits | expand

Commit Message

Ram Pai Sept. 8, 2017, 10:45 p.m. UTC
arch-independent code expects the arch to  map
a  pkey  into the vma's protection bit setting.
The patch provides that ability.

Signed-off-by: Ram Pai <linuxram@us.ibm.com>
---
 arch/powerpc/include/asm/mman.h  |    8 +++++++-
 arch/powerpc/include/asm/pkeys.h |   18 ++++++++++++++++++
 2 files changed, 25 insertions(+), 1 deletions(-)

Comments

Balbir Singh Oct. 18, 2017, 4:27 a.m. UTC | #1
On Fri,  8 Sep 2017 15:45:00 -0700
Ram Pai <linuxram@us.ibm.com> wrote:

> arch-independent code expects the arch to  map
> a  pkey  into the vma's protection bit setting.
> The patch provides that ability.
> 
> Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> ---
>  arch/powerpc/include/asm/mman.h  |    8 +++++++-
>  arch/powerpc/include/asm/pkeys.h |   18 ++++++++++++++++++
>  2 files changed, 25 insertions(+), 1 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
> index 30922f6..067eec2 100644
> --- a/arch/powerpc/include/asm/mman.h
> +++ b/arch/powerpc/include/asm/mman.h
> @@ -13,6 +13,7 @@
>  
>  #include <asm/cputable.h>
>  #include <linux/mm.h>
> +#include <linux/pkeys.h>
>  #include <asm/cpu_has_feature.h>
>  
>  /*
> @@ -22,7 +23,12 @@
>  static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
>  		unsigned long pkey)
>  {
> -	return (prot & PROT_SAO) ? VM_SAO : 0;
> +#ifdef CONFIG_PPC64_MEMORY_PROTECTION_KEYS
> +	return (((prot & PROT_SAO) ? VM_SAO : 0) |
> +			pkey_to_vmflag_bits(pkey));
> +#else
> +	return ((prot & PROT_SAO) ? VM_SAO : 0);
> +#endif
>  }
>  #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
>  
> diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
> index 0cf115f..f13e913 100644
> --- a/arch/powerpc/include/asm/pkeys.h
> +++ b/arch/powerpc/include/asm/pkeys.h
> @@ -23,6 +23,24 @@
>  #define VM_PKEY_BIT4	VM_HIGH_ARCH_4
>  #endif
>  
> +/* override any generic PKEY Permission defines */
> +#define PKEY_DISABLE_EXECUTE   0x4
> +#define PKEY_ACCESS_MASK       (PKEY_DISABLE_ACCESS |\
> +				PKEY_DISABLE_WRITE  |\
> +				PKEY_DISABLE_EXECUTE)
> +
> +static inline u64 pkey_to_vmflag_bits(u16 pkey)
> +{
> +	if (!pkey_inited)
> +		return 0x0UL;
> +
> +	return (((pkey & 0x1UL) ? VM_PKEY_BIT0 : 0x0UL) |
> +		((pkey & 0x2UL) ? VM_PKEY_BIT1 : 0x0UL) |
> +		((pkey & 0x4UL) ? VM_PKEY_BIT2 : 0x0UL) |
> +		((pkey & 0x8UL) ? VM_PKEY_BIT3 : 0x0UL) |
> +		((pkey & 0x10UL) ? VM_PKEY_BIT4 : 0x0UL));
> +}

Assuming that there is a linear order between VM_PKEY_BIT4 to
VM_PKEY_BIT0, the conditional checks can be removed

(pkey & 0x1fUL) << VM_PKEY_BIT0?


Balbir Singh
Ram Pai Oct. 18, 2017, 9:01 p.m. UTC | #2
On Wed, Oct 18, 2017 at 03:27:33PM +1100, Balbir Singh wrote:
> On Fri,  8 Sep 2017 15:45:00 -0700
> Ram Pai <linuxram@us.ibm.com> wrote:
> 
> > arch-independent code expects the arch to  map
> > a  pkey  into the vma's protection bit setting.
> > The patch provides that ability.
> > 
> > Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> > ---
> >  arch/powerpc/include/asm/mman.h  |    8 +++++++-
> >  arch/powerpc/include/asm/pkeys.h |   18 ++++++++++++++++++
> >  2 files changed, 25 insertions(+), 1 deletions(-)
> > 
> > diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
> > index 30922f6..067eec2 100644
> > --- a/arch/powerpc/include/asm/mman.h
> > +++ b/arch/powerpc/include/asm/mman.h
> > @@ -13,6 +13,7 @@
> >  
> >  #include <asm/cputable.h>
> >  #include <linux/mm.h>
> > +#include <linux/pkeys.h>
> >  #include <asm/cpu_has_feature.h>
> >  
> >  /*
> > @@ -22,7 +23,12 @@
> >  static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
> >  		unsigned long pkey)
> >  {
> > -	return (prot & PROT_SAO) ? VM_SAO : 0;
> > +#ifdef CONFIG_PPC64_MEMORY_PROTECTION_KEYS
> > +	return (((prot & PROT_SAO) ? VM_SAO : 0) |
> > +			pkey_to_vmflag_bits(pkey));
> > +#else
> > +	return ((prot & PROT_SAO) ? VM_SAO : 0);
> > +#endif
> >  }
> >  #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
> >  
> > diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
> > index 0cf115f..f13e913 100644
> > --- a/arch/powerpc/include/asm/pkeys.h
> > +++ b/arch/powerpc/include/asm/pkeys.h
> > @@ -23,6 +23,24 @@
> >  #define VM_PKEY_BIT4	VM_HIGH_ARCH_4
> >  #endif
> >  
> > +/* override any generic PKEY Permission defines */
> > +#define PKEY_DISABLE_EXECUTE   0x4
> > +#define PKEY_ACCESS_MASK       (PKEY_DISABLE_ACCESS |\
> > +				PKEY_DISABLE_WRITE  |\
> > +				PKEY_DISABLE_EXECUTE)
> > +
> > +static inline u64 pkey_to_vmflag_bits(u16 pkey)
> > +{
> > +	if (!pkey_inited)
> > +		return 0x0UL;
> > +
> > +	return (((pkey & 0x1UL) ? VM_PKEY_BIT0 : 0x0UL) |
> > +		((pkey & 0x2UL) ? VM_PKEY_BIT1 : 0x0UL) |
> > +		((pkey & 0x4UL) ? VM_PKEY_BIT2 : 0x0UL) |
> > +		((pkey & 0x8UL) ? VM_PKEY_BIT3 : 0x0UL) |
> > +		((pkey & 0x10UL) ? VM_PKEY_BIT4 : 0x0UL));
> > +}
> 
> Assuming that there is a linear order between VM_PKEY_BIT4 to
> VM_PKEY_BIT0, the conditional checks can be removed
> 
> (pkey & 0x1fUL) << VM_PKEY_BIT0?

yes. currently the are linear. But I am afraid it will break without
notice someday when someone decides to change the values of VM_PKEY_BITx to
be non-contiguous. I can put a BUILD_ASSERTION I suppose.  But thought
this will be safe.

RP

> 
> 
> Balbir Singh
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 30922f6..067eec2 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -13,6 +13,7 @@ 
 
 #include <asm/cputable.h>
 #include <linux/mm.h>
+#include <linux/pkeys.h>
 #include <asm/cpu_has_feature.h>
 
 /*
@@ -22,7 +23,12 @@ 
 static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
 		unsigned long pkey)
 {
-	return (prot & PROT_SAO) ? VM_SAO : 0;
+#ifdef CONFIG_PPC64_MEMORY_PROTECTION_KEYS
+	return (((prot & PROT_SAO) ? VM_SAO : 0) |
+			pkey_to_vmflag_bits(pkey));
+#else
+	return ((prot & PROT_SAO) ? VM_SAO : 0);
+#endif
 }
 #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
 
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 0cf115f..f13e913 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -23,6 +23,24 @@ 
 #define VM_PKEY_BIT4	VM_HIGH_ARCH_4
 #endif
 
+/* override any generic PKEY Permission defines */
+#define PKEY_DISABLE_EXECUTE   0x4
+#define PKEY_ACCESS_MASK       (PKEY_DISABLE_ACCESS |\
+				PKEY_DISABLE_WRITE  |\
+				PKEY_DISABLE_EXECUTE)
+
+static inline u64 pkey_to_vmflag_bits(u16 pkey)
+{
+	if (!pkey_inited)
+		return 0x0UL;
+
+	return (((pkey & 0x1UL) ? VM_PKEY_BIT0 : 0x0UL) |
+		((pkey & 0x2UL) ? VM_PKEY_BIT1 : 0x0UL) |
+		((pkey & 0x4UL) ? VM_PKEY_BIT2 : 0x0UL) |
+		((pkey & 0x8UL) ? VM_PKEY_BIT3 : 0x0UL) |
+		((pkey & 0x10UL) ? VM_PKEY_BIT4 : 0x0UL));
+}
+
 #define arch_max_pkey()  pkeys_total
 #define AMR_RD_BIT 0x1UL
 #define AMR_WR_BIT 0x2UL