diff mbox series

[RFC,1/5] powerpc/64s: update_mmu_cache inline the radix test

Message ID 20201219234813.830747-2-npiggin@gmail.com (mailing list archive)
State RFC
Headers show
Series powerpc/64s/radix: Use non-atomic ops for PTE | expand

Commit Message

Nicholas Piggin Dec. 19, 2020, 11:48 p.m. UTC
This allows the function to be entirely noped if hash support is
compiled out (not possible yet).

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/include/asm/book3s/pgtable.h | 11 ++++++++++-
 arch/powerpc/mm/book3s32/mmu.c            |  4 ++--
 arch/powerpc/mm/book3s64/hash_utils.c     |  7 ++-----
 3 files changed, 14 insertions(+), 8 deletions(-)

Comments

Christophe Leroy Dec. 20, 2020, 11:37 a.m. UTC | #1
Le 20/12/2020 à 00:48, Nicholas Piggin a écrit :
> This allows the function to be entirely noped if hash support is
> compiled out (not possible yet).
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>   arch/powerpc/include/asm/book3s/pgtable.h | 11 ++++++++++-
>   arch/powerpc/mm/book3s32/mmu.c            |  4 ++--
>   arch/powerpc/mm/book3s64/hash_utils.c     |  7 ++-----
>   3 files changed, 14 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
> index 0e1263455d73..914e9fc7b069 100644
> --- a/arch/powerpc/include/asm/book3s/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/pgtable.h
> @@ -35,7 +35,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>    * corresponding HPTE into the hash table ahead of time, instead of
>    * waiting for the inevitable extra hash-table miss exception.
>    */
> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
> +void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
> +
> +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
> +{
> +#ifdef CONFIG_PPC64

You shouldn't need that ifdef. radix_enabled() is always defined.

> +	if (radix_enabled())
> +		return;
> +#endif
> +	hash__update_mmu_cache(vma, address, ptep);
> +}
>   
>   #endif /* __ASSEMBLY__ */
>   #endif
> diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
> index 859e5bd603ac..c5a570ca37ff 100644
> --- a/arch/powerpc/mm/book3s32/mmu.c
> +++ b/arch/powerpc/mm/book3s32/mmu.c
> @@ -325,8 +325,8 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea)
>    *
>    * This must always be called with the pte lock held.
>    */
> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
> -		      pte_t *ptep)
> +void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
> +			    pte_t *ptep)

Now the limit is 100 chars per line. This should fit on a single line I think.

>   {
>   	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
>   		return;
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
> index 73b06adb6eeb..d52a3dee7cf2 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -1667,8 +1667,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
>    *
>    * This must always be called with the pte lock held.
>    */
> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
> -		      pte_t *ptep)
> +void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
> +			    pte_t *ptep)

Now the limit is 100 chars per line. This should fit on a single line I think.

>   {
>   	/*
>   	 * We don't need to worry about _PAGE_PRESENT here because we are
> @@ -1677,9 +1677,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>   	unsigned long trap;
>   	bool is_exec;
>   
> -	if (radix_enabled())
> -		return;
> -
>   	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
>   	if (!pte_young(*ptep) || address >= TASK_SIZE)
>   		return;
>
Nicholas Piggin Dec. 22, 2020, 3:32 a.m. UTC | #2
Excerpts from Christophe Leroy's message of December 20, 2020 9:37 pm:
> 
> 
> Le 20/12/2020 à 00:48, Nicholas Piggin a écrit :
>> This allows the function to be entirely noped if hash support is
>> compiled out (not possible yet).
>> 
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
>>   arch/powerpc/include/asm/book3s/pgtable.h | 11 ++++++++++-
>>   arch/powerpc/mm/book3s32/mmu.c            |  4 ++--
>>   arch/powerpc/mm/book3s64/hash_utils.c     |  7 ++-----
>>   3 files changed, 14 insertions(+), 8 deletions(-)
>> 
>> diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
>> index 0e1263455d73..914e9fc7b069 100644
>> --- a/arch/powerpc/include/asm/book3s/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/pgtable.h
>> @@ -35,7 +35,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>>    * corresponding HPTE into the hash table ahead of time, instead of
>>    * waiting for the inevitable extra hash-table miss exception.
>>    */
>> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
>> +void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
>> +
>> +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
>> +{
>> +#ifdef CONFIG_PPC64
> 
> You shouldn't need that ifdef. radix_enabled() is always defined.

True, thanks.

>> +	if (radix_enabled())
>> +		return;
>> +#endif
>> +	hash__update_mmu_cache(vma, address, ptep);
>> +}
>>   
>>   #endif /* __ASSEMBLY__ */
>>   #endif
>> diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
>> index 859e5bd603ac..c5a570ca37ff 100644
>> --- a/arch/powerpc/mm/book3s32/mmu.c
>> +++ b/arch/powerpc/mm/book3s32/mmu.c
>> @@ -325,8 +325,8 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea)
>>    *
>>    * This must always be called with the pte lock held.
>>    */
>> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>> -		      pte_t *ptep)
>> +void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>> +			    pte_t *ptep)
> 
> Now the limit is 100 chars per line. This should fit on a single line I think.

I never quite know what to do here. The Linux limit is 100 but 80 is 
still preferred AFAIK (e.g., don't make lots of lines beyond 80), but 
80-100 can be used in some cases when splitting the line doesn't improve 
readability on 80 colums.

This does (slightly) improve readability.

Thanks,
Nick

> 
>>   {
>>   	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
>>   		return;
>> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
>> index 73b06adb6eeb..d52a3dee7cf2 100644
>> --- a/arch/powerpc/mm/book3s64/hash_utils.c
>> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
>> @@ -1667,8 +1667,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
>>    *
>>    * This must always be called with the pte lock held.
>>    */
>> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>> -		      pte_t *ptep)
>> +void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>> +			    pte_t *ptep)
> 
> Now the limit is 100 chars per line. This should fit on a single line I think.
> 
>>   {
>>   	/*
>>   	 * We don't need to worry about _PAGE_PRESENT here because we are
>> @@ -1677,9 +1677,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>>   	unsigned long trap;
>>   	bool is_exec;
>>   
>> -	if (radix_enabled())
>> -		return;
>> -
>>   	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
>>   	if (!pte_young(*ptep) || address >= TASK_SIZE)
>>   		return;
>> 
>
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 0e1263455d73..914e9fc7b069 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -35,7 +35,16 @@  extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  * corresponding HPTE into the hash table ahead of time, instead of
  * waiting for the inevitable extra hash-table miss exception.
  */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+#ifdef CONFIG_PPC64
+	if (radix_enabled())
+		return;
+#endif
+	hash__update_mmu_cache(vma, address, ptep);
+}
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 859e5bd603ac..c5a570ca37ff 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -325,8 +325,8 @@  static void hash_preload(struct mm_struct *mm, unsigned long ea)
  *
  * This must always be called with the pte lock held.
  */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
-		      pte_t *ptep)
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+			    pte_t *ptep)
 {
 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
 		return;
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 73b06adb6eeb..d52a3dee7cf2 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1667,8 +1667,8 @@  static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
  *
  * This must always be called with the pte lock held.
  */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
-		      pte_t *ptep)
+void hash__update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+			    pte_t *ptep)
 {
 	/*
 	 * We don't need to worry about _PAGE_PRESENT here because we are
@@ -1677,9 +1677,6 @@  void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 	unsigned long trap;
 	bool is_exec;
 
-	if (radix_enabled())
-		return;
-
 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
 	if (!pte_young(*ptep) || address >= TASK_SIZE)
 		return;