diff mbox series

[v6,15/18] powerpc/64s: Always define arch unmapped area calls

Message ID 20211201144153.2456614-16-npiggin@gmail.com (mailing list archive)
State Accepted
Headers show
Series powerpc: Make hash MMU code build configurable | expand

Commit Message

Nicholas Piggin Dec. 1, 2021, 2:41 p.m. UTC
To avoid any functional changes to radix paths when building with hash
MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
arch get_unmapped_area calls on 64s platforms.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
 arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
 arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
 arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
 arch/powerpc/mm/slice.c                   | 20 ------------
 5 files changed, 51 insertions(+), 35 deletions(-)

Comments

Christophe Leroy Dec. 8, 2021, 9:38 a.m. UTC | #1
Le 01/12/2021 à 15:41, Nicholas Piggin a écrit :
> To avoid any functional changes to radix paths when building with hash
> MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
> arch get_unmapped_area calls on 64s platforms.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>   arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
>   arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
>   arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
>   arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
>   arch/powerpc/mm/slice.c                   | 20 ------------
>   5 files changed, 51 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
> index 674fe0e890dc..a7a0572f3846 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
> @@ -99,10 +99,6 @@
>    * Defines the address of the vmemap area, in its own region on
>    * hash table CPUs.
>    */
> -#ifdef CONFIG_PPC_MM_SLICES
> -#define HAVE_ARCH_UNMAPPED_AREA
> -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
> -#endif /* CONFIG_PPC_MM_SLICES */
>   
>   /* PTEIDX nibble */
>   #define _PTEIDX_SECONDARY	0x8
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> index c02f42d1031e..015d7d972d16 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -4,6 +4,12 @@
>   
>   #include <asm/page.h>
>   
> +#ifdef CONFIG_HUGETLB_PAGE
> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
> +#endif
> +#define HAVE_ARCH_UNMAPPED_AREA
> +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
> +
>   #ifndef __ASSEMBLY__
>   /*
>    * Page size definition
> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
> index 82d8b368ca6d..ddead41e2194 100644
> --- a/arch/powerpc/mm/hugetlbpage.c
> +++ b/arch/powerpc/mm/hugetlbpage.c
> @@ -542,20 +542,26 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>   	return page;
>   }
>   
> -#ifdef CONFIG_PPC_MM_SLICES
> +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
> +static inline int file_to_psize(struct file *file)
> +{
> +	struct hstate *hstate = hstate_file(file);
> +	return shift_to_mmu_psize(huge_page_shift(hstate));
> +}
> +
>   unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
>   					unsigned long len, unsigned long pgoff,
>   					unsigned long flags)
>   {
> -	struct hstate *hstate = hstate_file(file);
> -	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
> -
>   #ifdef CONFIG_PPC_RADIX_MMU
>   	if (radix_enabled())
>   		return radix__hugetlb_get_unmapped_area(file, addr, len,
>   						       pgoff, flags);
>   #endif
> -	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
> +#ifdef CONFIG_PPC_MM_SLICES
> +	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
> +#endif
> +	BUG();

We shouldn't had new instances of BUG().

BUILD_BUG() should do the trick here.

>   }
>   #endif
>   
> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
> index ae683fdc716c..c475cf810aa8 100644
> --- a/arch/powerpc/mm/mmap.c
> +++ b/arch/powerpc/mm/mmap.c
> @@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
>   	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
>   }
>   
> +#ifdef HAVE_ARCH_UNMAPPED_AREA
>   #ifdef CONFIG_PPC_RADIX_MMU
>   /*
>    * Same function as generic code used only for radix, because we don't need to overload
> @@ -181,11 +182,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
>   	 */
>   	return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
>   }
> +#endif
> +
> +unsigned long arch_get_unmapped_area(struct file *filp,
> +				     unsigned long addr,
> +				     unsigned long len,
> +				     unsigned long pgoff,
> +				     unsigned long flags)
> +{
> +#ifdef CONFIG_PPC_MM_SLICES
> +	return slice_get_unmapped_area(addr, len, flags,
> +				       mm_ctx_user_psize(&current->mm->context), 0);
> +#else
> +	BUG();

Same.

And the #else isn't needed

> +#endif
> +}
> +
> +unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> +					     const unsigned long addr0,
> +					     const unsigned long len,
> +					     const unsigned long pgoff,
> +					     const unsigned long flags)
> +{
> +#ifdef CONFIG_PPC_MM_SLICES
> +	return slice_get_unmapped_area(addr0, len, flags,
> +				       mm_ctx_user_psize(&current->mm->context), 1);
> +#else
> +	BUG();

Same

And the #else isn't needed

> +#endif
> +}
> +#endif /* HAVE_ARCH_UNMAPPED_AREA */
>   
>   static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
>   					unsigned long random_factor,
>   					struct rlimit *rlim_stack)
>   {
> +#ifdef CONFIG_PPC_RADIX_MMU
>   	if (mmap_is_legacy(rlim_stack)) {
>   		mm->mmap_base = TASK_UNMAPPED_BASE;
>   		mm->get_unmapped_area = radix__arch_get_unmapped_area;
> @@ -193,13 +225,9 @@ static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
>   		mm->mmap_base = mmap_base(random_factor, rlim_stack);
>   		mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
>   	}
> -}
> -#else
> -/* dummy */
> -extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
> -					unsigned long random_factor,
> -					struct rlimit *rlim_stack);
>   #endif
> +}
> +
>   /*
>    * This function, called very early during the creation of a new
>    * process VM image, sets up which VM layout function to use:
> diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
> index 82b45b1cb973..f42711f865f3 100644
> --- a/arch/powerpc/mm/slice.c
> +++ b/arch/powerpc/mm/slice.c
> @@ -639,26 +639,6 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
>   }
>   EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
>   
> -unsigned long arch_get_unmapped_area(struct file *filp,
> -				     unsigned long addr,
> -				     unsigned long len,
> -				     unsigned long pgoff,
> -				     unsigned long flags)
> -{
> -	return slice_get_unmapped_area(addr, len, flags,
> -				       mm_ctx_user_psize(&current->mm->context), 0);
> -}
> -
> -unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> -					     const unsigned long addr0,
> -					     const unsigned long len,
> -					     const unsigned long pgoff,
> -					     const unsigned long flags)
> -{
> -	return slice_get_unmapped_area(addr0, len, flags,
> -				       mm_ctx_user_psize(&current->mm->context), 1);
> -}
> -
>   unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
>   {
>   	unsigned char *psizes;
>
Christophe Leroy Dec. 8, 2021, 10 a.m. UTC | #2
Le 01/12/2021 à 15:41, Nicholas Piggin a écrit :
> To avoid any functional changes to radix paths when building with hash
> MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
> arch get_unmapped_area calls on 64s platforms.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>   arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
>   arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
>   arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
>   arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
>   arch/powerpc/mm/slice.c                   | 20 ------------
>   5 files changed, 51 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
> index 674fe0e890dc..a7a0572f3846 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
> @@ -99,10 +99,6 @@
>    * Defines the address of the vmemap area, in its own region on
>    * hash table CPUs.
>    */
> -#ifdef CONFIG_PPC_MM_SLICES
> -#define HAVE_ARCH_UNMAPPED_AREA
> -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
> -#endif /* CONFIG_PPC_MM_SLICES */
>   
>   /* PTEIDX nibble */
>   #define _PTEIDX_SECONDARY	0x8
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> index c02f42d1031e..015d7d972d16 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -4,6 +4,12 @@
>   
>   #include <asm/page.h>
>   
> +#ifdef CONFIG_HUGETLB_PAGE
> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
> +#endif
> +#define HAVE_ARCH_UNMAPPED_AREA
> +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
> +
>   #ifndef __ASSEMBLY__
>   /*
>    * Page size definition
> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
> index 82d8b368ca6d..ddead41e2194 100644
> --- a/arch/powerpc/mm/hugetlbpage.c
> +++ b/arch/powerpc/mm/hugetlbpage.c
> @@ -542,20 +542,26 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>   	return page;
>   }
>   
> -#ifdef CONFIG_PPC_MM_SLICES
> +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA

Could use CONFIG_PPC_BOOK3S_64 instead

> +static inline int file_to_psize(struct file *file)

'inline' is not needed.

> +{
> +	struct hstate *hstate = hstate_file(file);
> +	return shift_to_mmu_psize(huge_page_shift(hstate));
> +}
> +
>   unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
>   					unsigned long len, unsigned long pgoff,
>   					unsigned long flags)
>   {
> -	struct hstate *hstate = hstate_file(file);
> -	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
> -
>   #ifdef CONFIG_PPC_RADIX_MMU
>   	if (radix_enabled())
>   		return radix__hugetlb_get_unmapped_area(file, addr, len,
>   						       pgoff, flags);
>   #endif
> -	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
> +#ifdef CONFIG_PPC_MM_SLICES
> +	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
> +#endif
> +	BUG();
>   }
>   #endif
>   
> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
> index ae683fdc716c..c475cf810aa8 100644
> --- a/arch/powerpc/mm/mmap.c
> +++ b/arch/powerpc/mm/mmap.c
> @@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
>   	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
>   }
>   
> +#ifdef HAVE_ARCH_UNMAPPED_AREA

Could use CONFIG_PPC_BOOK3S_64 instead. Or better, put all that stuff in 
a file in mm/book3s64/ directory

>   #ifdef CONFIG_PPC_RADIX_MMU
>   /*
>    * Same function as generic code used only for radix, because we don't need to overload
> @@ -181,11 +182,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
>   	 */
>   	return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
>   }
> +#endif
> +
> +unsigned long arch_get_unmapped_area(struct file *filp,
> +				     unsigned long addr,
> +				     unsigned long len,
> +				     unsigned long pgoff,
> +				     unsigned long flags)
> +{
> +#ifdef CONFIG_PPC_MM_SLICES
> +	return slice_get_unmapped_area(addr, len, flags,
> +				       mm_ctx_user_psize(&current->mm->context), 0);
> +#else
> +	BUG();
> +#endif
> +}
> +
> +unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> +					     const unsigned long addr0,
> +					     const unsigned long len,
> +					     const unsigned long pgoff,
> +					     const unsigned long flags)
> +{
> +#ifdef CONFIG_PPC_MM_SLICES
> +	return slice_get_unmapped_area(addr0, len, flags,
> +				       mm_ctx_user_psize(&current->mm->context), 1);
> +#else
> +	BUG();
> +#endif
> +}
> +#endif /* HAVE_ARCH_UNMAPPED_AREA */
>   
>   static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
>   					unsigned long random_factor,
>   					struct rlimit *rlim_stack)
>   {
> +#ifdef CONFIG_PPC_RADIX_MMU
>   	if (mmap_is_legacy(rlim_stack)) {
>   		mm->mmap_base = TASK_UNMAPPED_BASE;
>   		mm->get_unmapped_area = radix__arch_get_unmapped_area;
> @@ -193,13 +225,9 @@ static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
>   		mm->mmap_base = mmap_base(random_factor, rlim_stack);
>   		mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
>   	}
> -}
> -#else
> -/* dummy */
> -extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
> -					unsigned long random_factor,
> -					struct rlimit *rlim_stack);
>   #endif
> +}
> +
>   /*
>    * This function, called very early during the creation of a new
>    * process VM image, sets up which VM layout function to use:
> diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
> index 82b45b1cb973..f42711f865f3 100644
> --- a/arch/powerpc/mm/slice.c
> +++ b/arch/powerpc/mm/slice.c
> @@ -639,26 +639,6 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
>   }
>   EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
>   
> -unsigned long arch_get_unmapped_area(struct file *filp,
> -				     unsigned long addr,
> -				     unsigned long len,
> -				     unsigned long pgoff,
> -				     unsigned long flags)
> -{
> -	return slice_get_unmapped_area(addr, len, flags,
> -				       mm_ctx_user_psize(&current->mm->context), 0);
> -}
> -
> -unsigned long arch_get_unmapped_area_topdown(struct file *filp,
> -					     const unsigned long addr0,
> -					     const unsigned long len,
> -					     const unsigned long pgoff,
> -					     const unsigned long flags)
> -{
> -	return slice_get_unmapped_area(addr0, len, flags,
> -				       mm_ctx_user_psize(&current->mm->context), 1);
> -}
> -
>   unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
>   {
>   	unsigned char *psizes;
>
Nicholas Piggin Dec. 9, 2021, 8:25 a.m. UTC | #3
Excerpts from Christophe Leroy's message of December 8, 2021 7:38 pm:
> 
> 
> Le 01/12/2021 à 15:41, Nicholas Piggin a écrit :
>> To avoid any functional changes to radix paths when building with hash
>> MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
>> arch get_unmapped_area calls on 64s platforms.
>> 
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
>>   arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
>>   arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
>>   arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
>>   arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
>>   arch/powerpc/mm/slice.c                   | 20 ------------
>>   5 files changed, 51 insertions(+), 35 deletions(-)
>> 
>> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
>> index 674fe0e890dc..a7a0572f3846 100644
>> --- a/arch/powerpc/include/asm/book3s/64/hash.h
>> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
>> @@ -99,10 +99,6 @@
>>    * Defines the address of the vmemap area, in its own region on
>>    * hash table CPUs.
>>    */
>> -#ifdef CONFIG_PPC_MM_SLICES
>> -#define HAVE_ARCH_UNMAPPED_AREA
>> -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>> -#endif /* CONFIG_PPC_MM_SLICES */
>>   
>>   /* PTEIDX nibble */
>>   #define _PTEIDX_SECONDARY	0x8
>> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
>> index c02f42d1031e..015d7d972d16 100644
>> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
>> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
>> @@ -4,6 +4,12 @@
>>   
>>   #include <asm/page.h>
>>   
>> +#ifdef CONFIG_HUGETLB_PAGE
>> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>> +#endif
>> +#define HAVE_ARCH_UNMAPPED_AREA
>> +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>> +
>>   #ifndef __ASSEMBLY__
>>   /*
>>    * Page size definition
>> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
>> index 82d8b368ca6d..ddead41e2194 100644
>> --- a/arch/powerpc/mm/hugetlbpage.c
>> +++ b/arch/powerpc/mm/hugetlbpage.c
>> @@ -542,20 +542,26 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>>   	return page;
>>   }
>>   
>> -#ifdef CONFIG_PPC_MM_SLICES
>> +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>> +static inline int file_to_psize(struct file *file)
>> +{
>> +	struct hstate *hstate = hstate_file(file);
>> +	return shift_to_mmu_psize(huge_page_shift(hstate));
>> +}
>> +
>>   unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
>>   					unsigned long len, unsigned long pgoff,
>>   					unsigned long flags)
>>   {
>> -	struct hstate *hstate = hstate_file(file);
>> -	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
>> -
>>   #ifdef CONFIG_PPC_RADIX_MMU
>>   	if (radix_enabled())
>>   		return radix__hugetlb_get_unmapped_area(file, addr, len,
>>   						       pgoff, flags);
>>   #endif
>> -	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
>> +#ifdef CONFIG_PPC_MM_SLICES
>> +	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
>> +#endif
>> +	BUG();
> 
> We shouldn't had new instances of BUG().
> 
> BUILD_BUG() should do the trick here.
> 
>>   }
>>   #endif
>>   
>> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
>> index ae683fdc716c..c475cf810aa8 100644
>> --- a/arch/powerpc/mm/mmap.c
>> +++ b/arch/powerpc/mm/mmap.c
>> @@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
>>   	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
>>   }
>>   
>> +#ifdef HAVE_ARCH_UNMAPPED_AREA
>>   #ifdef CONFIG_PPC_RADIX_MMU
>>   /*
>>    * Same function as generic code used only for radix, because we don't need to overload
>> @@ -181,11 +182,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
>>   	 */
>>   	return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
>>   }
>> +#endif
>> +
>> +unsigned long arch_get_unmapped_area(struct file *filp,
>> +				     unsigned long addr,
>> +				     unsigned long len,
>> +				     unsigned long pgoff,
>> +				     unsigned long flags)
>> +{
>> +#ifdef CONFIG_PPC_MM_SLICES
>> +	return slice_get_unmapped_area(addr, len, flags,
>> +				       mm_ctx_user_psize(&current->mm->context), 0);
>> +#else
>> +	BUG();
> 
> Same.
> 
> And the #else isn't needed
> 
>> +#endif
>> +}
>> +
>> +unsigned long arch_get_unmapped_area_topdown(struct file *filp,
>> +					     const unsigned long addr0,
>> +					     const unsigned long len,
>> +					     const unsigned long pgoff,
>> +					     const unsigned long flags)
>> +{
>> +#ifdef CONFIG_PPC_MM_SLICES
>> +	return slice_get_unmapped_area(addr0, len, flags,
>> +				       mm_ctx_user_psize(&current->mm->context), 1);
>> +#else
>> +	BUG();
> 
> Same
> 
> And the #else isn't needed

Fair enough. I'll see if mpe can squash in an incremental patch.

Thanks,
Nick
Christophe Leroy Dec. 9, 2021, 8:28 a.m. UTC | #4
Le 09/12/2021 à 09:25, Nicholas Piggin a écrit :
> Excerpts from Christophe Leroy's message of December 8, 2021 7:38 pm:
>>
>>
>> Le 01/12/2021 à 15:41, Nicholas Piggin a écrit :
>>> To avoid any functional changes to radix paths when building with hash
>>> MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
>>> arch get_unmapped_area calls on 64s platforms.
>>>
>>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>>> ---
>>>    arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
>>>    arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
>>>    arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
>>>    arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
>>>    arch/powerpc/mm/slice.c                   | 20 ------------
>>>    5 files changed, 51 insertions(+), 35 deletions(-)
>>>
>>> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
>>> index 674fe0e890dc..a7a0572f3846 100644
>>> --- a/arch/powerpc/include/asm/book3s/64/hash.h
>>> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
>>> @@ -99,10 +99,6 @@
>>>     * Defines the address of the vmemap area, in its own region on
>>>     * hash table CPUs.
>>>     */
>>> -#ifdef CONFIG_PPC_MM_SLICES
>>> -#define HAVE_ARCH_UNMAPPED_AREA
>>> -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>>> -#endif /* CONFIG_PPC_MM_SLICES */
>>>    
>>>    /* PTEIDX nibble */
>>>    #define _PTEIDX_SECONDARY	0x8
>>> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
>>> index c02f42d1031e..015d7d972d16 100644
>>> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
>>> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
>>> @@ -4,6 +4,12 @@
>>>    
>>>    #include <asm/page.h>
>>>    
>>> +#ifdef CONFIG_HUGETLB_PAGE
>>> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>>> +#endif
>>> +#define HAVE_ARCH_UNMAPPED_AREA
>>> +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>>> +
>>>    #ifndef __ASSEMBLY__
>>>    /*
>>>     * Page size definition
>>> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
>>> index 82d8b368ca6d..ddead41e2194 100644
>>> --- a/arch/powerpc/mm/hugetlbpage.c
>>> +++ b/arch/powerpc/mm/hugetlbpage.c
>>> @@ -542,20 +542,26 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>>>    	return page;
>>>    }
>>>    
>>> -#ifdef CONFIG_PPC_MM_SLICES
>>> +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>>> +static inline int file_to_psize(struct file *file)
>>> +{
>>> +	struct hstate *hstate = hstate_file(file);
>>> +	return shift_to_mmu_psize(huge_page_shift(hstate));
>>> +}
>>> +
>>>    unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
>>>    					unsigned long len, unsigned long pgoff,
>>>    					unsigned long flags)
>>>    {
>>> -	struct hstate *hstate = hstate_file(file);
>>> -	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
>>> -
>>>    #ifdef CONFIG_PPC_RADIX_MMU
>>>    	if (radix_enabled())
>>>    		return radix__hugetlb_get_unmapped_area(file, addr, len,
>>>    						       pgoff, flags);
>>>    #endif
>>> -	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
>>> +#ifdef CONFIG_PPC_MM_SLICES
>>> +	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
>>> +#endif
>>> +	BUG();
>>
>> We shouldn't had new instances of BUG().
>>
>> BUILD_BUG() should do the trick here.
>>
>>>    }
>>>    #endif
>>>    
>>> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
>>> index ae683fdc716c..c475cf810aa8 100644
>>> --- a/arch/powerpc/mm/mmap.c
>>> +++ b/arch/powerpc/mm/mmap.c
>>> @@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
>>>    	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
>>>    }
>>>    
>>> +#ifdef HAVE_ARCH_UNMAPPED_AREA
>>>    #ifdef CONFIG_PPC_RADIX_MMU
>>>    /*
>>>     * Same function as generic code used only for radix, because we don't need to overload
>>> @@ -181,11 +182,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
>>>    	 */
>>>    	return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
>>>    }
>>> +#endif
>>> +
>>> +unsigned long arch_get_unmapped_area(struct file *filp,
>>> +				     unsigned long addr,
>>> +				     unsigned long len,
>>> +				     unsigned long pgoff,
>>> +				     unsigned long flags)
>>> +{
>>> +#ifdef CONFIG_PPC_MM_SLICES
>>> +	return slice_get_unmapped_area(addr, len, flags,
>>> +				       mm_ctx_user_psize(&current->mm->context), 0);
>>> +#else
>>> +	BUG();
>>
>> Same.
>>
>> And the #else isn't needed
>>
>>> +#endif
>>> +}
>>> +
>>> +unsigned long arch_get_unmapped_area_topdown(struct file *filp,
>>> +					     const unsigned long addr0,
>>> +					     const unsigned long len,
>>> +					     const unsigned long pgoff,
>>> +					     const unsigned long flags)
>>> +{
>>> +#ifdef CONFIG_PPC_MM_SLICES
>>> +	return slice_get_unmapped_area(addr0, len, flags,
>>> +				       mm_ctx_user_psize(&current->mm->context), 1);
>>> +#else
>>> +	BUG();
>>
>> Same
>>
>> And the #else isn't needed
> 
> Fair enough. I'll see if mpe can squash in an incremental patch.
> 

Anyway, my follow-up series "Convert powerpc to default topdown mmap 
layout" cleans it up.

Christophe
Nicholas Piggin Dec. 9, 2021, 8:29 a.m. UTC | #5
Excerpts from Christophe Leroy's message of December 8, 2021 8:00 pm:
> 
> 
> Le 01/12/2021 à 15:41, Nicholas Piggin a écrit :
>> To avoid any functional changes to radix paths when building with hash
>> MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
>> arch get_unmapped_area calls on 64s platforms.
>> 
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
>>   arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
>>   arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
>>   arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
>>   arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
>>   arch/powerpc/mm/slice.c                   | 20 ------------
>>   5 files changed, 51 insertions(+), 35 deletions(-)
>> 
>> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
>> index 674fe0e890dc..a7a0572f3846 100644
>> --- a/arch/powerpc/include/asm/book3s/64/hash.h
>> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
>> @@ -99,10 +99,6 @@
>>    * Defines the address of the vmemap area, in its own region on
>>    * hash table CPUs.
>>    */
>> -#ifdef CONFIG_PPC_MM_SLICES
>> -#define HAVE_ARCH_UNMAPPED_AREA
>> -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>> -#endif /* CONFIG_PPC_MM_SLICES */
>>   
>>   /* PTEIDX nibble */
>>   #define _PTEIDX_SECONDARY	0x8
>> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
>> index c02f42d1031e..015d7d972d16 100644
>> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
>> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
>> @@ -4,6 +4,12 @@
>>   
>>   #include <asm/page.h>
>>   
>> +#ifdef CONFIG_HUGETLB_PAGE
>> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>> +#endif
>> +#define HAVE_ARCH_UNMAPPED_AREA
>> +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>> +
>>   #ifndef __ASSEMBLY__
>>   /*
>>    * Page size definition
>> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
>> index 82d8b368ca6d..ddead41e2194 100644
>> --- a/arch/powerpc/mm/hugetlbpage.c
>> +++ b/arch/powerpc/mm/hugetlbpage.c
>> @@ -542,20 +542,26 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>>   	return page;
>>   }
>>   
>> -#ifdef CONFIG_PPC_MM_SLICES
>> +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
> 
> Could use CONFIG_PPC_BOOK3S_64 instead

It was going to make it de-selectable with !HASH. I think your series 
cleans this stuff up so I dont' think it's a big deal.

> 
>> +static inline int file_to_psize(struct file *file)
> 
> 'inline' is not needed.

It is otherwise a !SLICES config causes it to give a defined but not 
used error.

> 
>> +{
>> +	struct hstate *hstate = hstate_file(file);
>> +	return shift_to_mmu_psize(huge_page_shift(hstate));
>> +}
>> +
>>   unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
>>   					unsigned long len, unsigned long pgoff,
>>   					unsigned long flags)
>>   {
>> -	struct hstate *hstate = hstate_file(file);
>> -	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
>> -
>>   #ifdef CONFIG_PPC_RADIX_MMU
>>   	if (radix_enabled())
>>   		return radix__hugetlb_get_unmapped_area(file, addr, len,
>>   						       pgoff, flags);
>>   #endif
>> -	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
>> +#ifdef CONFIG_PPC_MM_SLICES
>> +	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
>> +#endif
>> +	BUG();
>>   }
>>   #endif
>>   
>> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
>> index ae683fdc716c..c475cf810aa8 100644
>> --- a/arch/powerpc/mm/mmap.c
>> +++ b/arch/powerpc/mm/mmap.c
>> @@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
>>   	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
>>   }
>>   
>> +#ifdef HAVE_ARCH_UNMAPPED_AREA
> 
> Could use CONFIG_PPC_BOOK3S_64 instead. Or better, put all that stuff in 
> a file in mm/book3s64/ directory

Seeing as you cleaned it up with your series, probably doesn't matter
much.

Thanks,
Nick
Nicholas Piggin Dec. 9, 2021, 9:30 a.m. UTC | #6
Excerpts from Nicholas Piggin's message of December 9, 2021 6:25 pm:
> Excerpts from Christophe Leroy's message of December 8, 2021 7:38 pm:
>> 
>> 
>> Le 01/12/2021 à 15:41, Nicholas Piggin a écrit :
>>> To avoid any functional changes to radix paths when building with hash
>>> MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
>>> arch get_unmapped_area calls on 64s platforms.
>>> 
>>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>>> ---
>>>   arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
>>>   arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
>>>   arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
>>>   arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
>>>   arch/powerpc/mm/slice.c                   | 20 ------------
>>>   5 files changed, 51 insertions(+), 35 deletions(-)
>>> 
>>> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
>>> index 674fe0e890dc..a7a0572f3846 100644
>>> --- a/arch/powerpc/include/asm/book3s/64/hash.h
>>> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
>>> @@ -99,10 +99,6 @@
>>>    * Defines the address of the vmemap area, in its own region on
>>>    * hash table CPUs.
>>>    */
>>> -#ifdef CONFIG_PPC_MM_SLICES
>>> -#define HAVE_ARCH_UNMAPPED_AREA
>>> -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>>> -#endif /* CONFIG_PPC_MM_SLICES */
>>>   
>>>   /* PTEIDX nibble */
>>>   #define _PTEIDX_SECONDARY	0x8
>>> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
>>> index c02f42d1031e..015d7d972d16 100644
>>> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
>>> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
>>> @@ -4,6 +4,12 @@
>>>   
>>>   #include <asm/page.h>
>>>   
>>> +#ifdef CONFIG_HUGETLB_PAGE
>>> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>>> +#endif
>>> +#define HAVE_ARCH_UNMAPPED_AREA
>>> +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>>> +
>>>   #ifndef __ASSEMBLY__
>>>   /*
>>>    * Page size definition
>>> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
>>> index 82d8b368ca6d..ddead41e2194 100644
>>> --- a/arch/powerpc/mm/hugetlbpage.c
>>> +++ b/arch/powerpc/mm/hugetlbpage.c
>>> @@ -542,20 +542,26 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>>>   	return page;
>>>   }
>>>   
>>> -#ifdef CONFIG_PPC_MM_SLICES
>>> +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>>> +static inline int file_to_psize(struct file *file)
>>> +{
>>> +	struct hstate *hstate = hstate_file(file);
>>> +	return shift_to_mmu_psize(huge_page_shift(hstate));
>>> +}
>>> +
>>>   unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
>>>   					unsigned long len, unsigned long pgoff,
>>>   					unsigned long flags)
>>>   {
>>> -	struct hstate *hstate = hstate_file(file);
>>> -	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
>>> -
>>>   #ifdef CONFIG_PPC_RADIX_MMU
>>>   	if (radix_enabled())
>>>   		return radix__hugetlb_get_unmapped_area(file, addr, len,
>>>   						       pgoff, flags);
>>>   #endif
>>> -	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
>>> +#ifdef CONFIG_PPC_MM_SLICES
>>> +	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
>>> +#endif
>>> +	BUG();
>> 
>> We shouldn't had new instances of BUG().
>> 
>> BUILD_BUG() should do the trick here.
>> 
>>>   }
>>>   #endif
>>>   
>>> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
>>> index ae683fdc716c..c475cf810aa8 100644
>>> --- a/arch/powerpc/mm/mmap.c
>>> +++ b/arch/powerpc/mm/mmap.c
>>> @@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
>>>   	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
>>>   }
>>>   
>>> +#ifdef HAVE_ARCH_UNMAPPED_AREA
>>>   #ifdef CONFIG_PPC_RADIX_MMU
>>>   /*
>>>    * Same function as generic code used only for radix, because we don't need to overload
>>> @@ -181,11 +182,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
>>>   	 */
>>>   	return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
>>>   }
>>> +#endif
>>> +
>>> +unsigned long arch_get_unmapped_area(struct file *filp,
>>> +				     unsigned long addr,
>>> +				     unsigned long len,
>>> +				     unsigned long pgoff,
>>> +				     unsigned long flags)
>>> +{
>>> +#ifdef CONFIG_PPC_MM_SLICES
>>> +	return slice_get_unmapped_area(addr, len, flags,
>>> +				       mm_ctx_user_psize(&current->mm->context), 0);
>>> +#else
>>> +	BUG();
>> 
>> Same.
>> 
>> And the #else isn't needed
>> 
>>> +#endif
>>> +}
>>> +
>>> +unsigned long arch_get_unmapped_area_topdown(struct file *filp,
>>> +					     const unsigned long addr0,
>>> +					     const unsigned long len,
>>> +					     const unsigned long pgoff,
>>> +					     const unsigned long flags)
>>> +{
>>> +#ifdef CONFIG_PPC_MM_SLICES
>>> +	return slice_get_unmapped_area(addr0, len, flags,
>>> +				       mm_ctx_user_psize(&current->mm->context), 1);
>>> +#else
>>> +	BUG();
>> 
>> Same
>> 
>> And the #else isn't needed
> 
> Fair enough. I'll see if mpe can squash in an incremental patch.

Ah no we can't do that here because arch_get_unmapped_area* is not static
so BUILD_BUG() triggers.

I think we can just look at how it could be improved in future patches.

Thanks,
Nick
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 674fe0e890dc..a7a0572f3846 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -99,10 +99,6 @@ 
  * Defines the address of the vmemap area, in its own region on
  * hash table CPUs.
  */
-#ifdef CONFIG_PPC_MM_SLICES
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#endif /* CONFIG_PPC_MM_SLICES */
 
 /* PTEIDX nibble */
 #define _PTEIDX_SECONDARY	0x8
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index c02f42d1031e..015d7d972d16 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -4,6 +4,12 @@ 
 
 #include <asm/page.h>
 
+#ifdef CONFIG_HUGETLB_PAGE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
 #ifndef __ASSEMBLY__
 /*
  * Page size definition
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 82d8b368ca6d..ddead41e2194 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -542,20 +542,26 @@  struct page *follow_huge_pd(struct vm_area_struct *vma,
 	return page;
 }
 
-#ifdef CONFIG_PPC_MM_SLICES
+#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+static inline int file_to_psize(struct file *file)
+{
+	struct hstate *hstate = hstate_file(file);
+	return shift_to_mmu_psize(huge_page_shift(hstate));
+}
+
 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 					unsigned long len, unsigned long pgoff,
 					unsigned long flags)
 {
-	struct hstate *hstate = hstate_file(file);
-	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
-
 #ifdef CONFIG_PPC_RADIX_MMU
 	if (radix_enabled())
 		return radix__hugetlb_get_unmapped_area(file, addr, len,
 						       pgoff, flags);
 #endif
-	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
+#ifdef CONFIG_PPC_MM_SLICES
+	return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
+#endif
+	BUG();
 }
 #endif
 
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index ae683fdc716c..c475cf810aa8 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -80,6 +80,7 @@  static inline unsigned long mmap_base(unsigned long rnd,
 	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
 }
 
+#ifdef HAVE_ARCH_UNMAPPED_AREA
 #ifdef CONFIG_PPC_RADIX_MMU
 /*
  * Same function as generic code used only for radix, because we don't need to overload
@@ -181,11 +182,42 @@  radix__arch_get_unmapped_area_topdown(struct file *filp,
 	 */
 	return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
 }
+#endif
+
+unsigned long arch_get_unmapped_area(struct file *filp,
+				     unsigned long addr,
+				     unsigned long len,
+				     unsigned long pgoff,
+				     unsigned long flags)
+{
+#ifdef CONFIG_PPC_MM_SLICES
+	return slice_get_unmapped_area(addr, len, flags,
+				       mm_ctx_user_psize(&current->mm->context), 0);
+#else
+	BUG();
+#endif
+}
+
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+					     const unsigned long addr0,
+					     const unsigned long len,
+					     const unsigned long pgoff,
+					     const unsigned long flags)
+{
+#ifdef CONFIG_PPC_MM_SLICES
+	return slice_get_unmapped_area(addr0, len, flags,
+				       mm_ctx_user_psize(&current->mm->context), 1);
+#else
+	BUG();
+#endif
+}
+#endif /* HAVE_ARCH_UNMAPPED_AREA */
 
 static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
 					unsigned long random_factor,
 					struct rlimit *rlim_stack)
 {
+#ifdef CONFIG_PPC_RADIX_MMU
 	if (mmap_is_legacy(rlim_stack)) {
 		mm->mmap_base = TASK_UNMAPPED_BASE;
 		mm->get_unmapped_area = radix__arch_get_unmapped_area;
@@ -193,13 +225,9 @@  static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
 		mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
 	}
-}
-#else
-/* dummy */
-extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
-					unsigned long random_factor,
-					struct rlimit *rlim_stack);
 #endif
+}
+
 /*
  * This function, called very early during the creation of a new
  * process VM image, sets up which VM layout function to use:
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 82b45b1cb973..f42711f865f3 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -639,26 +639,6 @@  unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
 }
 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
 
-unsigned long arch_get_unmapped_area(struct file *filp,
-				     unsigned long addr,
-				     unsigned long len,
-				     unsigned long pgoff,
-				     unsigned long flags)
-{
-	return slice_get_unmapped_area(addr, len, flags,
-				       mm_ctx_user_psize(&current->mm->context), 0);
-}
-
-unsigned long arch_get_unmapped_area_topdown(struct file *filp,
-					     const unsigned long addr0,
-					     const unsigned long len,
-					     const unsigned long pgoff,
-					     const unsigned long flags)
-{
-	return slice_get_unmapped_area(addr0, len, flags,
-				       mm_ctx_user_psize(&current->mm->context), 1);
-}
-
 unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
 {
 	unsigned char *psizes;