Message ID | 20200408115926.1467567-19-hch@lst.de |
---|---|
State | Not Applicable |
Delegated to: | David Miller |
Headers | show |
Series | [01/28] x86/hyperv: use vmalloc_exec for the hypercall page | expand |
On Wed, Apr 08, 2020 at 01:59:16PM +0200, Christoph Hellwig wrote: > To help enforcing the W^X protection don't allow remapping existing > pages as executable. > > Based on patch from Peter Zijlstra <peterz@infradead.org>. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > arch/x86/include/asm/pgtable_types.h | 6 ++++++ > include/asm-generic/pgtable.h | 4 ++++ > mm/vmalloc.c | 2 +- > 3 files changed, 11 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h > index 947867f112ea..2e7c442cc618 100644 > --- a/arch/x86/include/asm/pgtable_types.h > +++ b/arch/x86/include/asm/pgtable_types.h > @@ -282,6 +282,12 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; > > typedef struct { pgdval_t pgd; } pgd_t; > > +static inline pgprot_t pgprot_nx(pgprot_t prot) > +{ > + return __pgprot(pgprot_val(prot) | _PAGE_NX); > +} > +#define pgprot_nx pgprot_nx > + > #ifdef CONFIG_X86_PAE I reckon for arm64 we can do similar in our <asm/pgtable.h>: #define pgprot_nx(pgprot_t prot) \ __pgprot_modify(prot, 0, PTE_PXN) ... matching the style of our existing pgprot_*() modifier helpers. Mark. > > /* > diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h > index 329b8c8ca703..8c5f9c29698b 100644 > --- a/include/asm-generic/pgtable.h > +++ b/include/asm-generic/pgtable.h > @@ -491,6 +491,10 @@ static inline int arch_unmap_one(struct mm_struct *mm, > #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) > #endif > > +#ifndef pgprot_nx > +#define pgprot_nx(prot) (prot) > +#endif > + > #ifndef pgprot_noncached > #define pgprot_noncached(prot) (prot) > #endif > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 7356b3f07bd8..334c75251ddb 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -2390,7 +2390,7 @@ void *vmap(struct page **pages, unsigned int count, > if (!area) > return NULL; > > - if (map_kernel_range((unsigned long)area->addr, size, prot, > + if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), > pages) < 0) { > vunmap(area->addr); > return NULL; > -- > 2.25.1 >
On Wed, Apr 08, 2020 at 01:38:36PM +0100, Mark Rutland wrote: > > +static inline pgprot_t pgprot_nx(pgprot_t prot) > > +{ > > + return __pgprot(pgprot_val(prot) | _PAGE_NX); > > +} > > +#define pgprot_nx pgprot_nx > > + > > #ifdef CONFIG_X86_PAE > > I reckon for arm64 we can do similar in our <asm/pgtable.h>: > > #define pgprot_nx(pgprot_t prot) \ > __pgprot_modify(prot, 0, PTE_PXN) > > ... matching the style of our existing pgprot_*() modifier helpers. I've added that for the next version with attribution to you.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 947867f112ea..2e7c442cc618 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -282,6 +282,12 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; typedef struct { pgdval_t pgd; } pgd_t; +static inline pgprot_t pgprot_nx(pgprot_t prot) +{ + return __pgprot(pgprot_val(prot) | _PAGE_NX); +} +#define pgprot_nx pgprot_nx + #ifdef CONFIG_X86_PAE /* diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 329b8c8ca703..8c5f9c29698b 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -491,6 +491,10 @@ static inline int arch_unmap_one(struct mm_struct *mm, #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif +#ifndef pgprot_nx +#define pgprot_nx(prot) (prot) +#endif + #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7356b3f07bd8..334c75251ddb 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2390,7 +2390,7 @@ void *vmap(struct page **pages, unsigned int count, if (!area) return NULL; - if (map_kernel_range((unsigned long)area->addr, size, prot, + if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), pages) < 0) { vunmap(area->addr); return NULL;
To help enforcing the W^X protection don't allow remapping existing pages as executable. Based on patch from Peter Zijlstra <peterz@infradead.org>. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/x86/include/asm/pgtable_types.h | 6 ++++++ include/asm-generic/pgtable.h | 4 ++++ mm/vmalloc.c | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-)