diff mbox series

linux-next: fix ups for clashes between akpm and powerpc trees

Message ID 20200603202655.0ad0eacc@canb.auug.org.au (mailing list archive)
State Not Applicable
Headers show
Series linux-next: fix ups for clashes between akpm and powerpc trees | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/merge (4d8244d005b2a92c872a0a993c3aa94b5842e56b)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/next (4336b9337824a60a0b10013c622caeee99460db5)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linus/master (d6f9469a03d832dcd17041ed67774ffb5f3e73b3)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/fixes (2f26ed1764b42a8c40d9c48441c73a70d805decf)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linux-next (0e21d4620dd047da7952f44a2e1ac777ded2d57e)
snowpatch_ozlabs/apply_patch fail Failed to apply to any branch

Commit Message

Stephen Rothwell June 3, 2020, 10:26 a.m. UTC
Hi all,

Some things turned up in the powerpc tree today that required some changes
to patches in the akpm tree and also the following fixup patch provided
(mostly) by Michael. I have applied this as a single patch today, but
parts of it should probably go in some other patches.

From: Stephen Rothwell <sfr@canb.auug.org.au>
Date: Wed, 3 Jun 2020 20:03:49 +1000
Subject: [PATCH] powerpc fixes for changes clashing with akpm tree changes

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h |  6 ++++++
 arch/powerpc/include/asm/nohash/32/pgtable.h | 10 +++++-----
 arch/powerpc/mm/kasan/8xx.c                  |  4 ++--
 arch/powerpc/mm/kasan/book3s_32.c            |  2 +-
 arch/powerpc/mm/nohash/8xx.c                 |  2 +-
 arch/powerpc/mm/pgtable.c                    |  2 +-
 arch/powerpc/mm/pgtable_32.c                 |  2 +-
 7 files changed, 17 insertions(+), 11 deletions(-)

Comments

Stephen Rothwell June 4, 2020, 6:52 a.m. UTC | #1
Hi all,

On Wed, 3 Jun 2020 20:26:55 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> Some things turned up in the powerpc tree today that required some changes
> to patches in the akpm tree and also the following fixup patch provided
> (mostly) by Michael. I have applied this as a single patch today, but
> parts of it should probably go in some other patches.
> 
> From: Stephen Rothwell <sfr@canb.auug.org.au>
> Date: Wed, 3 Jun 2020 20:03:49 +1000
> Subject: [PATCH] powerpc fixes for changes clashing with akpm tree changes
> 
> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
> ---

I applied this again today.  It is slightly different and now looks like this:

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 25c3cb8272c0..a6799723cd98 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1008,6 +1008,12 @@ extern struct page *p4d_page(p4d_t p4d);
 #define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
 #define p4d_page_vaddr(p4d)	__va(p4d_val(p4d) & ~P4D_MASKED_BITS)
 
+static inline unsigned long pgd_index(unsigned long address)
+{
+	return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
+}
+#define pgd_index pgd_index
+
 #define pte_ERROR(e) \
 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c188a6f64bcd..1927e1b653f2 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -205,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp)
 	*pmdp = __pmd(0);
 }
 
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
 /* to find an entry in a page-table-directory */
 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
@@ -241,7 +237,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
 	pte_basic_t old = pte_val(*p);
 	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
 	int num, i;
-	pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+	pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
 
 	if (!huge)
 		num = PAGE_SIZE / SZ_4K;
@@ -341,6 +337,10 @@ static inline int pte_young(pte_t pte)
 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 #endif
 
+#define pte_offset_kernel(dir, addr)	\
+	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
+				  pte_index(addr))
+
 /*
  * Encode and decode a swap entry.
  * Note that the bits we use in a PTE for representing a swap entry
diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
index db4ef44af22f..569d98a41881 100644
--- a/arch/powerpc/mm/kasan/8xx.c
+++ b/arch/powerpc/mm/kasan/8xx.c
@@ -10,7 +10,7 @@
 static int __init
 kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
 {
-	pmd_t *pmd = pmd_ptr_k(k_start);
+	pmd_t *pmd = pmd_off_k(k_start);
 	unsigned long k_cur, k_next;
 
 	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
@@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size)
 		return ret;
 
 	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
-		pmd_t *pmd = pmd_ptr_k(k_cur);
+		pmd_t *pmd = pmd_off_k(k_cur);
 		void *va = block + k_cur - k_start;
 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
 
diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
index 4bc491a4a1fd..a32b4640b9de 100644
--- a/arch/powerpc/mm/kasan/book3s_32.c
+++ b/arch/powerpc/mm/kasan/book3s_32.c
@@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size)
 	kasan_update_early_region(k_start, k_cur, __pte(0));
 
 	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
-		pmd_t *pmd = pmd_ptr_k(k_cur);
+		pmd_t *pmd = pmd_off_k(k_cur);
 		void *va = block + k_cur - k_start;
 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
 
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 286441bbbe49..92e8929cbe3e 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
 					     pgprot_t prot, int psize, bool new)
 {
-	pmd_t *pmdp = pmd_ptr_k(va);
+	pmd_t *pmdp = pmd_off_k(va);
 	pte_t *ptep;
 
 	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 45a0556089e8..1136257c3a99 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -264,7 +264,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 #if defined(CONFIG_PPC_8xx)
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
 {
-	pmd_t *pmd = pmd_ptr(mm, addr);
+	pmd_t *pmd = pmd_off(mm, addr);
 	pte_basic_t val;
 	pte_basic_t *entry = &ptep->pte;
 	int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index e2d054c9575e..6eb4eab79385 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -40,7 +40,7 @@ notrace void __init early_ioremap_init(void)
 {
 	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
 	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
-	pmd_t *pmdp = pmd_ptr_k(addr);
+	pmd_t *pmdp = pmd_off_k(addr);
 
 	for (; (s32)(FIXADDR_TOP - addr) > 0;
 	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
Stephen Rothwell June 4, 2020, 7:49 a.m. UTC | #2
Hi all,

On Thu, 4 Jun 2020 16:52:46 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
> index c188a6f64bcd..1927e1b653f2 100644
> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
> @@ -205,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp)
>  	*pmdp = __pmd(0);
>  }
>  
> -
> -/* to find an entry in a kernel page-table-directory */
> -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
> -
>  /* to find an entry in a page-table-directory */
>  #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
>  #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
> @@ -241,7 +237,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
>  	pte_basic_t old = pte_val(*p);
>  	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
>  	int num, i;
> -	pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
> +	pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
>  
>  	if (!huge)
>  		num = PAGE_SIZE / SZ_4K;
> @@ -341,6 +337,10 @@ static inline int pte_young(pte_t pte)
>  	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
>  #endif
>  
> +#define pte_offset_kernel(dir, addr)	\
> +	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
> +				  pte_index(addr))
> +
>  /*
>   * Encode and decode a swap entry.
>   * Note that the bits we use in a PTE for representing a swap entry

Sorry, that ended up:

diff --cc arch/powerpc/include/asm/nohash/32/pgtable.h
index 639f3b3713ec,eb8538c85077..1927e1b653f2
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@@ -204,13 -205,6 +205,9 @@@ static inline void pmd_clear(pmd_t *pmd
  	*pmdp = __pmd(0);
  }
  
- 
- /* to find an entry in a kernel page-table-directory */
- #define pgd_offset_k(address) pgd_offset(&init_mm, address)
- 
 +/* to find an entry in a page-table-directory */
 +#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
 +#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
  
  /*
   * PTE updates. This function is called whenever an existing
@@@ -240,7 -234,7 +237,7 @@@ static inline pte_basic_t pte_update(st
  	pte_basic_t old = pte_val(*p);
  	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
  	int num, i;
--	pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
++	pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
  
  	if (!huge)
  		num = PAGE_SIZE / SZ_4K;
@@@ -342,15 -334,6 +337,10 @@@ static inline int pte_young(pte_t pte
  	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
  #endif
  
- /* Find an entry in the third-level page table.. */
- #define pte_index(address)		\
- 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 +#define pte_offset_kernel(dir, addr)	\
 +	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
 +				  pte_index(addr))
- #define pte_offset_map(dir, addr)	pte_offset_kernel((dir), (addr))
- static inline void pte_unmap(pte_t *pte) { }
 +
  /*
   * Encode and decode a swap entry.
   * Note that the bits we use in a PTE for representing a swap entry
Stephen Rothwell June 4, 2020, 8:38 a.m. UTC | #3
Hi all,

On Thu, 4 Jun 2020 16:52:46 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 25c3cb8272c0..a6799723cd98 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1008,6 +1008,12 @@ extern struct page *p4d_page(p4d_t p4d);
>  #define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
>  #define p4d_page_vaddr(p4d)	__va(p4d_val(p4d) & ~P4D_MASKED_BITS)
>  
> +static inline unsigned long pgd_index(unsigned long address)
> +{
> +	return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
> +}
> +#define pgd_index pgd_index
> +
>  #define pte_ERROR(e) \
>  	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
>  #define pmd_ERROR(e) \

I have added that hunk to linux-next for tomorrow as a fix for
mm-consolidate-pgd_index-and-pgd_offset_k-definitions.

Its not strickly necessary, but Michael expressed a preference for the
inline function.  I was wondering if pgd_index "Must be a compile-time
constant" on one (or a few) architectures, then why not leave the
default as an inline function and special case it as a macro where
needed ...
Stephen Rothwell June 4, 2020, 8:45 a.m. UTC | #4
Hi all,

On Thu, 4 Jun 2020 16:52:46 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
> index db4ef44af22f..569d98a41881 100644
> --- a/arch/powerpc/mm/kasan/8xx.c
> +++ b/arch/powerpc/mm/kasan/8xx.c
> @@ -10,7 +10,7 @@
>  static int __init
>  kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
>  {
> -	pmd_t *pmd = pmd_ptr_k(k_start);
> +	pmd_t *pmd = pmd_off_k(k_start);
>  	unsigned long k_cur, k_next;
>  
>  	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
> @@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size)
>  		return ret;
>  
>  	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
> -		pmd_t *pmd = pmd_ptr_k(k_cur);
> +		pmd_t *pmd = pmd_off_k(k_cur);
>  		void *va = block + k_cur - k_start;
>  		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
>  
> diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
> index 4bc491a4a1fd..a32b4640b9de 100644
> --- a/arch/powerpc/mm/kasan/book3s_32.c
> +++ b/arch/powerpc/mm/kasan/book3s_32.c
> @@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size)
>  	kasan_update_early_region(k_start, k_cur, __pte(0));
>  
>  	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
> -		pmd_t *pmd = pmd_ptr_k(k_cur);
> +		pmd_t *pmd = pmd_off_k(k_cur);
>  		void *va = block + k_cur - k_start;
>  		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
>  
> diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
> index 286441bbbe49..92e8929cbe3e 100644
> --- a/arch/powerpc/mm/nohash/8xx.c
> +++ b/arch/powerpc/mm/nohash/8xx.c
> @@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
>  static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
>  					     pgprot_t prot, int psize, bool new)
>  {
> -	pmd_t *pmdp = pmd_ptr_k(va);
> +	pmd_t *pmdp = pmd_off_k(va);
>  	pte_t *ptep;
>  
>  	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
> index 45a0556089e8..1136257c3a99 100644
> --- a/arch/powerpc/mm/pgtable.c
> +++ b/arch/powerpc/mm/pgtable.c
> @@ -264,7 +264,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
>  #if defined(CONFIG_PPC_8xx)
>  void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
>  {
> -	pmd_t *pmd = pmd_ptr(mm, addr);
> +	pmd_t *pmd = pmd_off(mm, addr);
>  	pte_basic_t val;
>  	pte_basic_t *entry = &ptep->pte;
>  	int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
> diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
> index e2d054c9575e..6eb4eab79385 100644
> --- a/arch/powerpc/mm/pgtable_32.c
> +++ b/arch/powerpc/mm/pgtable_32.c
> @@ -40,7 +40,7 @@ notrace void __init early_ioremap_init(void)
>  {
>  	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
>  	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
> -	pmd_t *pmdp = pmd_ptr_k(addr);
> +	pmd_t *pmdp = pmd_off_k(addr);
>  
>  	for (; (s32)(FIXADDR_TOP - addr) > 0;
>  	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)

I have added the above hunks as to linux-next for tomorrow as a fix for
mm-pgtable-add-shortcuts-for-accessing-kernel-pmd-and-pte.
Stephen Rothwell June 4, 2020, 11:08 a.m. UTC | #5
Hi all,

On Thu, 4 Jun 2020 17:49:25 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> diff --cc arch/powerpc/include/asm/nohash/32/pgtable.h
> index 639f3b3713ec,eb8538c85077..1927e1b653f2
> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
> @@@ -204,13 -205,6 +205,9 @@@ static inline void pmd_clear(pmd_t *pmd
>   	*pmdp = __pmd(0);
>   }
>   
> - 
> - /* to find an entry in a kernel page-table-directory */
> - #define pgd_offset_k(address) pgd_offset(&init_mm, address)
> - 
>  +/* to find an entry in a page-table-directory */
>  +#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
>  +#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
>   
>   /*
>    * PTE updates. This function is called whenever an existing
> @@@ -240,7 -234,7 +237,7 @@@ static inline pte_basic_t pte_update(st
>   	pte_basic_t old = pte_val(*p);
>   	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
>   	int num, i;
> --	pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
> ++	pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
>   
>   	if (!huge)
>   		num = PAGE_SIZE / SZ_4K;

I have added those hunks (more or less) to linux-next for tomorrow as a
fix for mm-consolidate-pgd_index-and-pgd_offset_k-definitions.
Stephen Rothwell June 4, 2020, 11:22 a.m. UTC | #6
Hi all,

On Thu, 4 Jun 2020 17:49:25 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> diff --cc arch/powerpc/include/asm/nohash/32/pgtable.h
> index 639f3b3713ec,eb8538c85077..1927e1b653f2
> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
> @@@ -342,15 -334,6 +337,10 @@@ static inline int pte_young(pte_t pte
>   	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
>   #endif
>   
> - /* Find an entry in the third-level page table.. */
> - #define pte_index(address)		\
> - 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
>  +#define pte_offset_kernel(dir, addr)	\
>  +	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
>  +				  pte_index(addr))
> - #define pte_offset_map(dir, addr)	pte_offset_kernel((dir), (addr))
> - static inline void pte_unmap(pte_t *pte) { }
>  +
>   /*
>    * Encode and decode a swap entry.
>    * Note that the bits we use in a PTE for representing a swap entry

I have added this hunk (sort of - see below) to linux-next for tomorrow
as a fix for mm-consolidate-pte_index-and-pte_offset_-definitions.

From: Stephen Rothwell <sfr@canb.auug.org.au>
Date: Thu, 4 Jun 2020 21:16:19 +1000
Subject: [PATCH] mm-consolidate-pte_index-and-pte_offset_-definitions-fix

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
---
 arch/powerpc/include/asm/nohash/32/pgtable.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c188a6f64bcd..d94bcd117c5b 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -341,6 +341,10 @@ static inline int pte_young(pte_t pte)
 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 #endif
 
+#define pte_offset_kernel(dir, addr)	\
+	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
+				  pte_index(addr))
+
 /*
  * Encode and decode a swap entry.
  * Note that the bits we use in a PTE for representing a swap entry
Michael Ellerman June 4, 2020, 11:28 a.m. UTC | #7
Stephen Rothwell <sfr@canb.auug.org.au> writes:
> Hi all,
>
> On Thu, 4 Jun 2020 16:52:46 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>>
>> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> index 25c3cb8272c0..a6799723cd98 100644
>> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> @@ -1008,6 +1008,12 @@ extern struct page *p4d_page(p4d_t p4d);
>>  #define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
>>  #define p4d_page_vaddr(p4d)	__va(p4d_val(p4d) & ~P4D_MASKED_BITS)
>>  
>> +static inline unsigned long pgd_index(unsigned long address)
>> +{
>> +	return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
>> +}
>> +#define pgd_index pgd_index
>> +
>>  #define pte_ERROR(e) \
>>  	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
>>  #define pmd_ERROR(e) \
>
> I have added that hunk to linux-next for tomorrow as a fix for
> mm-consolidate-pgd_index-and-pgd_offset_k-definitions.
>
> Its not strickly necessary, but Michael expressed a preference for the
> inline function.

That was because we just recently converted it into a static inline to
avoid UBSAN warnings:

commit c2e929b18cea6cbf71364f22d742d9aad7f4677a
Author:     Qian Cai <cai@lca.pw>
AuthorDate: Thu Mar 5 23:48:52 2020 -0500

    powerpc/64s/pgtable: fix an undefined behaviour

    Booting a power9 server with hash MMU could trigger an undefined
    behaviour because pud_offset(p4d, 0) will do,

    0 >> (PAGE_SHIFT:16 + PTE_INDEX_SIZE:8 + H_PMD_INDEX_SIZE:10)

    Fix it by converting pud_index() and friends to static inline
    functions.

    UBSAN: shift-out-of-bounds in arch/powerpc/mm/ptdump/ptdump.c:282:15
    shift exponent 34 is too large for 32-bit type 'int'
    CPU: 6 PID: 1 Comm: swapper/0 Not tainted 5.6.0-rc4-next-20200303+ #13
    Call Trace:
    dump_stack+0xf4/0x164 (unreliable)
    ubsan_epilogue+0x18/0x78
    __ubsan_handle_shift_out_of_bounds+0x160/0x21c
    walk_pagetables+0x2cc/0x700
    walk_pud at arch/powerpc/mm/ptdump/ptdump.c:282
    (inlined by) walk_pagetables at arch/powerpc/mm/ptdump/ptdump.c:311
    ptdump_check_wx+0x8c/0xf0
    mark_rodata_ro+0x48/0x80
    kernel_init+0x74/0x194
    ret_from_kernel_thread+0x5c/0x74


> I was wondering if pgd_index "Must be a compile-time
> constant" on one (or a few) architectures, then why not leave the
> default as an inline function and special case it as a macro where
> needed ...

AIUI that requirement comes from x86 which has:

#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
...
#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
...
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *pgd;
	pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];


Which will produce a variable length array if pgd_index() isn't a
compile-time constant.

cheers
Michael Ellerman June 4, 2020, 12:43 p.m. UTC | #8
Stephen Rothwell <sfr@canb.auug.org.au> writes:
> Hi all,
>
> On Thu, 4 Jun 2020 16:52:46 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>>
>> diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
>> index db4ef44af22f..569d98a41881 100644
>> --- a/arch/powerpc/mm/kasan/8xx.c
>> +++ b/arch/powerpc/mm/kasan/8xx.c
>> @@ -10,7 +10,7 @@
>>  static int __init
>>  kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
>>  {
>> -	pmd_t *pmd = pmd_ptr_k(k_start);
>> +	pmd_t *pmd = pmd_off_k(k_start);
>>  	unsigned long k_cur, k_next;
>>  
>>  	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
>> @@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size)
>>  		return ret;
>>  
>>  	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
>> -		pmd_t *pmd = pmd_ptr_k(k_cur);
>> +		pmd_t *pmd = pmd_off_k(k_cur);
>>  		void *va = block + k_cur - k_start;
>>  		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
>>  
>> diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
>> index 4bc491a4a1fd..a32b4640b9de 100644
>> --- a/arch/powerpc/mm/kasan/book3s_32.c
>> +++ b/arch/powerpc/mm/kasan/book3s_32.c
>> @@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size)
>>  	kasan_update_early_region(k_start, k_cur, __pte(0));
>>  
>>  	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
>> -		pmd_t *pmd = pmd_ptr_k(k_cur);
>> +		pmd_t *pmd = pmd_off_k(k_cur);
>>  		void *va = block + k_cur - k_start;
>>  		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
>>  
>> diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
>> index 286441bbbe49..92e8929cbe3e 100644
>> --- a/arch/powerpc/mm/nohash/8xx.c
>> +++ b/arch/powerpc/mm/nohash/8xx.c
>> @@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
>>  static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
>>  					     pgprot_t prot, int psize, bool new)
>>  {
>> -	pmd_t *pmdp = pmd_ptr_k(va);
>> +	pmd_t *pmdp = pmd_off_k(va);
>>  	pte_t *ptep;
>>  
>>  	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
>> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
>> index 45a0556089e8..1136257c3a99 100644
>> --- a/arch/powerpc/mm/pgtable.c
>> +++ b/arch/powerpc/mm/pgtable.c
>> @@ -264,7 +264,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
>>  #if defined(CONFIG_PPC_8xx)
>>  void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
>>  {
>> -	pmd_t *pmd = pmd_ptr(mm, addr);
>> +	pmd_t *pmd = pmd_off(mm, addr);
>>  	pte_basic_t val;
>>  	pte_basic_t *entry = &ptep->pte;
>>  	int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
>> diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
>> index e2d054c9575e..6eb4eab79385 100644
>> --- a/arch/powerpc/mm/pgtable_32.c
>> +++ b/arch/powerpc/mm/pgtable_32.c
>> @@ -40,7 +40,7 @@ notrace void __init early_ioremap_init(void)
>>  {
>>  	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
>>  	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
>> -	pmd_t *pmdp = pmd_ptr_k(addr);
>> +	pmd_t *pmdp = pmd_off_k(addr);
>>  
>>  	for (; (s32)(FIXADDR_TOP - addr) > 0;
>>  	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
>
> I have added the above hunks as to linux-next for tomorrow as a fix for
> mm-pgtable-add-shortcuts-for-accessing-kernel-pmd-and-pte.

Looks good. Thanks.

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 25c3cb8272c0..a6799723cd98 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1008,6 +1008,12 @@  extern struct page *p4d_page(p4d_t p4d);
 #define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
 #define p4d_page_vaddr(p4d)	__va(p4d_val(p4d) & ~P4D_MASKED_BITS)
 
+static inline unsigned long pgd_index(unsigned long address)
+{
+	return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
+}
+#define pgd_index pgd_index
+
 #define pte_ERROR(e) \
 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c188a6f64bcd..1927e1b653f2 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -205,10 +205,6 @@  static inline void pmd_clear(pmd_t *pmdp)
 	*pmdp = __pmd(0);
 }
 
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
 /* to find an entry in a page-table-directory */
 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
@@ -241,7 +237,7 @@  static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
 	pte_basic_t old = pte_val(*p);
 	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
 	int num, i;
-	pmd_t *pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+	pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
 
 	if (!huge)
 		num = PAGE_SIZE / SZ_4K;
@@ -341,6 +337,10 @@  static inline int pte_young(pte_t pte)
 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 #endif
 
+#define pte_offset_kernel(dir, addr)	\
+	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
+				  pte_index(addr))
+
 /*
  * Encode and decode a swap entry.
  * Note that the bits we use in a PTE for representing a swap entry
diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
index db4ef44af22f..569d98a41881 100644
--- a/arch/powerpc/mm/kasan/8xx.c
+++ b/arch/powerpc/mm/kasan/8xx.c
@@ -10,7 +10,7 @@ 
 static int __init
 kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
 {
-	pmd_t *pmd = pmd_ptr_k(k_start);
+	pmd_t *pmd = pmd_off_k(k_start);
 	unsigned long k_cur, k_next;
 
 	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
@@ -59,7 +59,7 @@  int __init kasan_init_region(void *start, size_t size)
 		return ret;
 
 	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
-		pmd_t *pmd = pmd_ptr_k(k_cur);
+		pmd_t *pmd = pmd_off_k(k_cur);
 		void *va = block + k_cur - k_start;
 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
 
diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
index 4bc491a4a1fd..a32b4640b9de 100644
--- a/arch/powerpc/mm/kasan/book3s_32.c
+++ b/arch/powerpc/mm/kasan/book3s_32.c
@@ -46,7 +46,7 @@  int __init kasan_init_region(void *start, size_t size)
 	kasan_update_early_region(k_start, k_cur, __pte(0));
 
 	for (; k_cur < k_end; k_cur += PAGE_SIZE) {
-		pmd_t *pmd = pmd_ptr_k(k_cur);
+		pmd_t *pmd = pmd_off_k(k_cur);
 		void *va = block + k_cur - k_start;
 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
 
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 286441bbbe49..92e8929cbe3e 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -74,7 +74,7 @@  static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
 					     pgprot_t prot, int psize, bool new)
 {
-	pmd_t *pmdp = pmd_ptr_k(va);
+	pmd_t *pmdp = pmd_off_k(va);
 	pte_t *ptep;
 
 	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 45a0556089e8..1136257c3a99 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -264,7 +264,7 @@  int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 #if defined(CONFIG_PPC_8xx)
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
 {
-	pmd_t *pmd = pmd_ptr(mm, addr);
+	pmd_t *pmd = pmd_off(mm, addr);
 	pte_basic_t val;
 	pte_basic_t *entry = &ptep->pte;
 	int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index e2d054c9575e..6eb4eab79385 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -40,7 +40,7 @@  notrace void __init early_ioremap_init(void)
 {
 	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
 	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
-	pmd_t *pmdp = pmd_ptr_k(addr);
+	pmd_t *pmdp = pmd_off_k(addr);
 
 	for (; (s32)(FIXADDR_TOP - addr) > 0;
 	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)