diff mbox series

[v2] ARC: Use 40-bit physical page mask for PAE

Message ID 20210427121237.2889-1-isaev@synopsys.com
State New
Headers show
Series [v2] ARC: Use 40-bit physical page mask for PAE | expand

Commit Message

Vladimir Isaev April 27, 2021, 12:12 p.m. UTC
32-bit PAGE_MASK can not be used as a mask for physical addresses
when PAE is enabled. PAGE_MASK_PHYS must be used for physical
addresses instead of PAGE_MASK.

Without this, init gets SIGSEGV if pte_modify was called:

potentially unexpected fatal signal 11.
Path: /bin/busybox
CPU: 0 PID: 1 Comm: init Not tainted 5.12.0-rc5-00003-g1e43c377a79f-dirty
Insn could not be fetched
    @No matching VMA found
ECR: 0x00040000 EFA: 0x00000000 ERET: 0x00000000
STAT: 0x80080082 [IE U     ]   BTA: 0x00000000
 SP: 0x5f9ffe44  FP: 0x00000000 BLK: 0xaf3d4
LPS: 0x000d093e LPE: 0x000d0950 LPC: 0x00000000
r00: 0x00000002 r01: 0x5f9fff14 r02: 0x5f9fff20
r03: 0x00000000 r04: 0x00000a48 r05: 0x001855b8
r06: 0x00186654 r07: 0x00010034 r08: 0x000000e2
r09: 0x00000007 r10: 0x00000000 r11: 0x00000000
r12: 0x00000000 r13: 0x00000001 r14: 0x00000002
r15: 0x5f9fff14 r16: 0x5f9fff20 r17: 0x001855d0
r18: 0x00000001 r19: 0x00000000 r20: 0x00000000
r21: 0x00000000 r22: 0x00000000 r23: 0x00000000
r24: 0x00000000 r25: 0x0018a488
Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b

Signed-off-by: Vladimir Isaev <isaev@synopsys.com>
Reported-by: kernel test robot <lkp@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: stable@vger.kernel.org
---
Changes for v2:
 - PHYSICAL_PAGE_MASK -> PAGE_MASK_PHYS
 - off variable in ioremap_prot is now unsigned int and uses PAGE_MASK
 - Revised commit message
---
 arch/arc/include/asm/page.h      | 12 ++++++++++++
 arch/arc/include/asm/pgtable.h   | 12 +++---------
 arch/arc/include/uapi/asm/page.h |  1 -
 arch/arc/mm/ioremap.c            |  5 +++--
 arch/arc/mm/tlb.c                |  2 +-
 5 files changed, 19 insertions(+), 13 deletions(-)

Comments

Vineet Gupta May 10, 2021, 7:49 p.m. UTC | #1
On 4/27/21 5:12 AM, Vladimir Isaev wrote:
> 32-bit PAGE_MASK can not be used as a mask for physical addresses
> when PAE is enabled. PAGE_MASK_PHYS must be used for physical
> addresses instead of PAGE_MASK.
>
> Without this, init gets SIGSEGV if pte_modify was called:
>
> potentially unexpected fatal signal 11.
> Path: /bin/busybox
> CPU: 0 PID: 1 Comm: init Not tainted 5.12.0-rc5-00003-g1e43c377a79f-dirty
> Insn could not be fetched
>      @No matching VMA found
> ECR: 0x00040000 EFA: 0x00000000 ERET: 0x00000000
> STAT: 0x80080082 [IE U     ]   BTA: 0x00000000
>   SP: 0x5f9ffe44  FP: 0x00000000 BLK: 0xaf3d4
> LPS: 0x000d093e LPE: 0x000d0950 LPC: 0x00000000
> r00: 0x00000002 r01: 0x5f9fff14 r02: 0x5f9fff20
> r03: 0x00000000 r04: 0x00000a48 r05: 0x001855b8
> r06: 0x00186654 r07: 0x00010034 r08: 0x000000e2
> r09: 0x00000007 r10: 0x00000000 r11: 0x00000000
> r12: 0x00000000 r13: 0x00000001 r14: 0x00000002
> r15: 0x5f9fff14 r16: 0x5f9fff20 r17: 0x001855d0
> r18: 0x00000001 r19: 0x00000000 r20: 0x00000000
> r21: 0x00000000 r22: 0x00000000 r23: 0x00000000
> r24: 0x00000000 r25: 0x0018a488
> Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b
>
> Signed-off-by: Vladimir Isaev <isaev@synopsys.com>
> Reported-by: kernel test robot <lkp@intel.com>
> Cc: Vineet Gupta <vgupta@synopsys.com>
> Cc: stable@vger.kernel.org

Applied to for-curr !

Thx,
-Vineet

> ---
> Changes for v2:
>   - PHYSICAL_PAGE_MASK -> PAGE_MASK_PHYS
>   - off variable in ioremap_prot is now unsigned int and uses PAGE_MASK
>   - Revised commit message
> ---
>   arch/arc/include/asm/page.h      | 12 ++++++++++++
>   arch/arc/include/asm/pgtable.h   | 12 +++---------
>   arch/arc/include/uapi/asm/page.h |  1 -
>   arch/arc/mm/ioremap.c            |  5 +++--
>   arch/arc/mm/tlb.c                |  2 +-
>   5 files changed, 19 insertions(+), 13 deletions(-)
>
> diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
> index ad9b7fe4dba3..4a9d33372fe2 100644
> --- a/arch/arc/include/asm/page.h
> +++ b/arch/arc/include/asm/page.h
> @@ -7,6 +7,18 @@
>   
>   #include <uapi/asm/page.h>
>   
> +#ifdef CONFIG_ARC_HAS_PAE40
> +
> +#define MAX_POSSIBLE_PHYSMEM_BITS	40
> +#define PAGE_MASK_PHYS			(0xff00000000ull | PAGE_MASK)
> +
> +#else /* CONFIG_ARC_HAS_PAE40 */
> +
> +#define MAX_POSSIBLE_PHYSMEM_BITS	32
> +#define PAGE_MASK_PHYS			PAGE_MASK
> +
> +#endif /* CONFIG_ARC_HAS_PAE40 */
> +
>   #ifndef __ASSEMBLY__
>   
>   #define clear_page(paddr)		memset((paddr), 0, PAGE_SIZE)
> diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
> index 163641726a2b..5878846f00cf 100644
> --- a/arch/arc/include/asm/pgtable.h
> +++ b/arch/arc/include/asm/pgtable.h
> @@ -107,8 +107,8 @@
>   #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
>   
>   /* Set of bits not changed in pte_modify */
> -#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
> -
> +#define _PAGE_CHG_MASK	(PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
> +							   _PAGE_SPECIAL)
>   /* More Abbrevaited helpers */
>   #define PAGE_U_NONE     __pgprot(___DEF)
>   #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
> @@ -132,13 +132,7 @@
>   #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
>   #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
>   
> -#ifdef CONFIG_ARC_HAS_PAE40
> -#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
> -#define MAX_POSSIBLE_PHYSMEM_BITS 40
> -#else
> -#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
> -#define MAX_POSSIBLE_PHYSMEM_BITS 32
> -#endif
> +#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK_PHYS | _PAGE_CACHEABLE)
>   
>   /**************************************************************************
>    * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
> diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
> index 2a97e2718a21..2a4ad619abfb 100644
> --- a/arch/arc/include/uapi/asm/page.h
> +++ b/arch/arc/include/uapi/asm/page.h
> @@ -33,5 +33,4 @@
>   
>   #define PAGE_MASK	(~(PAGE_SIZE-1))
>   
> -
>   #endif /* _UAPI__ASM_ARC_PAGE_H */
> diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
> index fac4adc90204..95c649fbc95a 100644
> --- a/arch/arc/mm/ioremap.c
> +++ b/arch/arc/mm/ioremap.c
> @@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
>   void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
>   			   unsigned long flags)
>   {
> +	unsigned int off;
>   	unsigned long vaddr;
>   	struct vm_struct *area;
> -	phys_addr_t off, end;
> +	phys_addr_t end;
>   	pgprot_t prot = __pgprot(flags);
>   
>   	/* Don't allow wraparound, zero size */
> @@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
>   
>   	/* Mappings have to be page-aligned */
>   	off = paddr & ~PAGE_MASK;
> -	paddr &= PAGE_MASK;
> +	paddr &= PAGE_MASK_PHYS;
>   	size = PAGE_ALIGN(end + 1) - paddr;
>   
>   	/*
> diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
> index 9bb3c24f3677..9c7c68247289 100644
> --- a/arch/arc/mm/tlb.c
> +++ b/arch/arc/mm/tlb.c
> @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
>   		      pte_t *ptep)
>   {
>   	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
> -	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
> +	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
>   	struct page *page = pfn_to_page(pte_pfn(*ptep));
>   
>   	create_tlb(vma, vaddr, ptep);
diff mbox series

Patch

diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index ad9b7fe4dba3..4a9d33372fe2 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -7,6 +7,18 @@ 
 
 #include <uapi/asm/page.h>
 
+#ifdef CONFIG_ARC_HAS_PAE40
+
+#define MAX_POSSIBLE_PHYSMEM_BITS	40
+#define PAGE_MASK_PHYS			(0xff00000000ull | PAGE_MASK)
+
+#else /* CONFIG_ARC_HAS_PAE40 */
+
+#define MAX_POSSIBLE_PHYSMEM_BITS	32
+#define PAGE_MASK_PHYS			PAGE_MASK
+
+#endif /* CONFIG_ARC_HAS_PAE40 */
+
 #ifndef __ASSEMBLY__
 
 #define clear_page(paddr)		memset((paddr), 0, PAGE_SIZE)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 163641726a2b..5878846f00cf 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -107,8 +107,8 @@ 
 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
 
 /* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
-
+#define _PAGE_CHG_MASK	(PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
+							   _PAGE_SPECIAL)
 /* More Abbrevaited helpers */
 #define PAGE_U_NONE     __pgprot(___DEF)
 #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
@@ -132,13 +132,7 @@ 
 #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
 #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
 
-#ifdef CONFIG_ARC_HAS_PAE40
-#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 40
-#else
-#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 32
-#endif
+#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK_PHYS | _PAGE_CACHEABLE)
 
 /**************************************************************************
  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 2a97e2718a21..2a4ad619abfb 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -33,5 +33,4 @@ 
 
 #define PAGE_MASK	(~(PAGE_SIZE-1))
 
-
 #endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index fac4adc90204..95c649fbc95a 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -53,9 +53,10 @@  EXPORT_SYMBOL(ioremap);
 void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 			   unsigned long flags)
 {
+	unsigned int off;
 	unsigned long vaddr;
 	struct vm_struct *area;
-	phys_addr_t off, end;
+	phys_addr_t end;
 	pgprot_t prot = __pgprot(flags);
 
 	/* Don't allow wraparound, zero size */
@@ -72,7 +73,7 @@  void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 
 	/* Mappings have to be page-aligned */
 	off = paddr & ~PAGE_MASK;
-	paddr &= PAGE_MASK;
+	paddr &= PAGE_MASK_PHYS;
 	size = PAGE_ALIGN(end + 1) - paddr;
 
 	/*
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 9bb3c24f3677..9c7c68247289 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -576,7 +576,7 @@  void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
 		      pte_t *ptep)
 {
 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
-	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
+	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
 	struct page *page = pfn_to_page(pte_pfn(*ptep));
 
 	create_tlb(vma, vaddr, ptep);