diff mbox series

ARC: Use 40-bit physical page mask for PAE

Message ID 20210426100801.41308-1-isaev@synopsys.com
State New
Headers show
Series ARC: Use 40-bit physical page mask for PAE | expand

Commit Message

Vladimir Isaev April 26, 2021, 10:08 a.m. UTC
32-bit PAGE_MASK can not be used as a mask for physical addresses
when PAE is enabled. PHYSICAL_PAGE_MASK must be used for physical
addresses instead of PAGE_MASK.

Signed-off-by: Vladimir Isaev <isaev@synopsys.com>
---
 arch/arc/include/asm/pgtable.h   | 12 +++---------
 arch/arc/include/uapi/asm/page.h |  7 +++++++
 arch/arc/mm/ioremap.c            |  4 ++--
 arch/arc/mm/tlb.c                |  2 +-
 4 files changed, 13 insertions(+), 12 deletions(-)

Comments

kernel test robot April 26, 2021, 2:39 p.m. UTC | #1
Hi Vladimir,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on v5.12]
[also build test ERROR on next-20210426]
[cannot apply to arc/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Vladimir-Isaev/ARC-Use-40-bit-physical-page-mask-for-PAE/20210426-180910
base:    9f4ad9e425a1d3b6a34617b8ea226d56a119a717
config: arc-allyesconfig (attached as .config)
compiler: arceb-elf-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/5c69bc74a4a7cd3c416b5d6d1809dd443220cbe0
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Vladimir-Isaev/ARC-Use-40-bit-physical-page-mask-for-PAE/20210426-180910
        git checkout 5c69bc74a4a7cd3c416b5d6d1809dd443220cbe0
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross W=1 ARCH=arc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> error: arch/arc/include/uapi/asm/page.h: leak CONFIG_ARC_HAS_PAE40 to user-space
--
>> error: arch/arc/include/uapi/asm/page.h: leak CONFIG_ARC_HAS_PAE40 to user-space
   make[2]: *** [scripts/Makefile.headersinst:63: usr/include/asm/page.h] Error 1
   make[2]: Target '__headers' not remade because of errors.
   make[1]: *** [Makefile:1338: headers] Error 2
   make[1]: Target 'headers_install' not remade because of errors.
   make: *** [Makefile:215: __sub-make] Error 2
   make: Target 'headers_install' not remade because of errors.
--
>> error: arch/arc/include/uapi/asm/page.h: leak CONFIG_ARC_HAS_PAE40 to user-space
   make[2]: *** [scripts/Makefile.headersinst:63: usr/include/asm/page.h] Error 1
   make[2]: Target '__headers' not remade because of errors.
   make[1]: *** [Makefile:1338: headers] Error 2
   make[1]: Target 'modules_prepare' not remade because of errors.
   make: *** [Makefile:215: __sub-make] Error 2
   make: Target 'modules_prepare' not remade because of errors.
--
   scripts/genksyms/parse.y: warning: 9 shift/reduce conflicts [-Wconflicts-sr]
   scripts/genksyms/parse.y: warning: 5 reduce/reduce conflicts [-Wconflicts-rr]
>> error: arch/arc/include/uapi/asm/page.h: leak CONFIG_ARC_HAS_PAE40 to user-space
   make[2]: *** [scripts/Makefile.headersinst:63: usr/include/asm/page.h] Error 1
   make[2]: Target '__headers' not remade because of errors.
   make[1]: *** [Makefile:1338: headers] Error 2
   make[1]: Target 'prepare' not remade because of errors.
   make: *** [Makefile:215: __sub-make] Error 2
   make: Target 'prepare' not remade because of errors.

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Vineet Gupta April 26, 2021, 4:29 p.m. UTC | #2
+CC Arnd

On 4/26/21 3:08 AM, Vladimir Isaev wrote:
> 32-bit PAGE_MASK can not be used as a mask for physical addresses
> when PAE is enabled. PHYSICAL_PAGE_MASK must be used for physical
> addresses instead of PAGE_MASK.

Can you provide a bit more context : like w/o this exit/munmap on 5.x 
kernels was crashing - with the actual stack trace.


> Signed-off-by: Vladimir Isaev <isaev@synopsys.com>

This also needs to be CC <stable>

> ---
>   arch/arc/include/asm/pgtable.h   | 12 +++---------
>   arch/arc/include/uapi/asm/page.h |  7 +++++++
>   arch/arc/mm/ioremap.c            |  4 ++--
>   arch/arc/mm/tlb.c                |  2 +-
>   4 files changed, 13 insertions(+), 12 deletions(-)
>
> diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
> index 163641726a2b..25c95fbc7021 100644
> --- a/arch/arc/include/asm/pgtable.h
> +++ b/arch/arc/include/asm/pgtable.h
> @@ -107,8 +107,8 @@
>   #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
>   
>   /* Set of bits not changed in pte_modify */
> -#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
> -
> +#define _PAGE_CHG_MASK	(PHYSICAL_PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
> +							       _PAGE_SPECIAL)

Bike shed: Can we call this PAGE_MASK_PHYS

>   /* More Abbrevaited helpers */
>   #define PAGE_U_NONE     __pgprot(___DEF)
>   #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
> @@ -132,13 +132,7 @@
>   #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
>   #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
>   
> -#ifdef CONFIG_ARC_HAS_PAE40
> -#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
> -#define MAX_POSSIBLE_PHYSMEM_BITS 40
> -#else
> -#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
> -#define MAX_POSSIBLE_PHYSMEM_BITS 32
> -#endif
> +#define PTE_BITS_NON_RWX_IN_PD1	(PHYSICAL_PAGE_MASK | _PAGE_CACHEABLE)
>   
>   /**************************************************************************
>    * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
> diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
> index 2a97e2718a21..8fecf2a2b592 100644
> --- a/arch/arc/include/uapi/asm/page.h
> +++ b/arch/arc/include/uapi/asm/page.h
> @@ -33,5 +33,12 @@
>   
>   #define PAGE_MASK	(~(PAGE_SIZE-1))
>   
> +#ifdef CONFIG_ARC_HAS_PAE40
> +#define MAX_POSSIBLE_PHYSMEM_BITS 40
> +#define PHYSICAL_PAGE_MASK	(0xff00000000ull | PAGE_MASK)
> +#else
> +#define MAX_POSSIBLE_PHYSMEM_BITS 32
> +#define PHYSICAL_PAGE_MASK	PAGE_MASK
> +#endif

Not a good idea as you already saw the kernel built bot complaining. 
Granted we have the old PAGE_SIZE cruft there, but that's not the 
precedent for adding more.

>   
>   #endif /* _UAPI__ASM_ARC_PAGE_H */
> diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
> index fac4adc90204..eb109d57d544 100644
> --- a/arch/arc/mm/ioremap.c
> +++ b/arch/arc/mm/ioremap.c
> @@ -71,8 +71,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
>   	prot = pgprot_noncached(prot);
>   
>   	/* Mappings have to be page-aligned */
> -	off = paddr & ~PAGE_MASK;

This is offset *within* a page so upper bits must not matter. In fact, 
with this a bogus offset like 0xFF_FFFFFFFF can turn into something 
weird such as 0xFF_00000000

> -	paddr &= PAGE_MASK;
> +	off = paddr & ~PHYSICAL_PAGE_MASK;
> +	paddr &= PHYSICAL_PAGE_MASK;

This change is OK but feels weird nonetheless. ioremap is intended for 
actual IO regions and not just making making normal pages uncached. I 
know you tried the devmem trick to do this but I don't think that is a 
"production" way to render uncached pages in PAE region.

>   	size = PAGE_ALIGN(end + 1) - paddr;
>   
>   	/*
> diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
> index 9bb3c24f3677..15a3b92e9e72 100644
> --- a/arch/arc/mm/tlb.c
> +++ b/arch/arc/mm/tlb.c
> @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
>   		      pte_t *ptep)
>   {
>   	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
> -	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
> +	phys_addr_t paddr = pte_val(*ptep) & PHYSICAL_PAGE_MASK;
>   	struct page *page = pfn_to_page(pte_pfn(*ptep));
>   
>   	create_tlb(vma, vaddr, ptep);
Vladimir Isaev April 26, 2021, 4:43 p.m. UTC | #3
Hi,

On Monday, April 26, 2021 7:30 PM, Vineet Gupta wrote:
> On 4/26/21 3:08 AM, Vladimir Isaev wrote:
> 
> >
> >   #endif /* _UAPI__ASM_ARC_PAGE_H */
> > diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
> > index fac4adc90204..eb109d57d544 100644
> > --- a/arch/arc/mm/ioremap.c
> > +++ b/arch/arc/mm/ioremap.c
> > @@ -71,8 +71,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr,
> unsigned long size,
> >   	prot = pgprot_noncached(prot);
> >
> >   	/* Mappings have to be page-aligned */
> > -	off = paddr & ~PAGE_MASK;
> 
> This is offset *within* a page so upper bits must not matter. In fact,
> with this a bogus offset like 0xFF_FFFFFFFF can turn into something
> weird such as 0xFF_00000000

I understand, but idea here is to use PHYSICAL_PAGE_MASK/PAGE_MASK_PHYS
for any phys_addr_t variable without thinking. So if off is actually offset for
virtual address we can use unsigned long for it.

Thank you,
Vladimir Isaev
Vineet Gupta April 26, 2021, 5:16 p.m. UTC | #4
On 4/26/21 9:43 AM, Vladimir Isaev wrote:
> Hi,
>
> On Monday, April 26, 2021 7:30 PM, Vineet Gupta wrote:
>> On 4/26/21 3:08 AM, Vladimir Isaev wrote:
>>
>>>    #endif /* _UAPI__ASM_ARC_PAGE_H */
>>> diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
>>> index fac4adc90204..eb109d57d544 100644
>>> --- a/arch/arc/mm/ioremap.c
>>> +++ b/arch/arc/mm/ioremap.c
>>> @@ -71,8 +71,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr,
>> unsigned long size,
>>>    	prot = pgprot_noncached(prot);
>>>
>>>    	/* Mappings have to be page-aligned */
>>> -	off = paddr & ~PAGE_MASK;
>> This is offset *within* a page so upper bits must not matter. In fact,
>> with this a bogus offset like 0xFF_FFFFFFFF can turn into something
>> weird such as 0xFF_00000000
> I understand, but idea here is to use PHYSICAL_PAGE_MASK/PAGE_MASK_PHYS
> for any phys_addr_t variable without thinking. So if off is actually offset for
> virtual address we can use unsigned long for it.

@off here is for a physical address, but is it intra-page still so 
doesn't matter if virtual or phy. Indeed it is a good idea to fix it to 
be unsigned int (not long since that is 64-bit attuned)

-Vineet
diff mbox series

Patch

diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 163641726a2b..25c95fbc7021 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -107,8 +107,8 @@ 
 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
 
 /* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
-
+#define _PAGE_CHG_MASK	(PHYSICAL_PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
+							       _PAGE_SPECIAL)
 /* More Abbrevaited helpers */
 #define PAGE_U_NONE     __pgprot(___DEF)
 #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
@@ -132,13 +132,7 @@ 
 #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
 #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
 
-#ifdef CONFIG_ARC_HAS_PAE40
-#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 40
-#else
-#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 32
-#endif
+#define PTE_BITS_NON_RWX_IN_PD1	(PHYSICAL_PAGE_MASK | _PAGE_CACHEABLE)
 
 /**************************************************************************
  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 2a97e2718a21..8fecf2a2b592 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -33,5 +33,12 @@ 
 
 #define PAGE_MASK	(~(PAGE_SIZE-1))
 
+#ifdef CONFIG_ARC_HAS_PAE40
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define PHYSICAL_PAGE_MASK	(0xff00000000ull | PAGE_MASK)
+#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#define PHYSICAL_PAGE_MASK	PAGE_MASK
+#endif
 
 #endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index fac4adc90204..eb109d57d544 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -71,8 +71,8 @@  void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 	prot = pgprot_noncached(prot);
 
 	/* Mappings have to be page-aligned */
-	off = paddr & ~PAGE_MASK;
-	paddr &= PAGE_MASK;
+	off = paddr & ~PHYSICAL_PAGE_MASK;
+	paddr &= PHYSICAL_PAGE_MASK;
 	size = PAGE_ALIGN(end + 1) - paddr;
 
 	/*
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 9bb3c24f3677..15a3b92e9e72 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -576,7 +576,7 @@  void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
 		      pte_t *ptep)
 {
 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
-	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
+	phys_addr_t paddr = pte_val(*ptep) & PHYSICAL_PAGE_MASK;
 	struct page *page = pfn_to_page(pte_pfn(*ptep));
 
 	create_tlb(vma, vaddr, ptep);