diff mbox series

[2/6] s390/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT

Message ID 20220603101411.488970-3-anshuman.khandual@arm.com
State New
Headers show
Series mm/mmap: Enable more platforms with ARCH_HAS_VM_GET_PAGE_PROT | expand

Commit Message

Anshuman Khandual June 3, 2022, 10:14 a.m. UTC
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.

Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: linux-s390@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Acked-by: Sven Schnelle <svens@linux.ibm.com>
Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/s390/Kconfig               |  1 +
 arch/s390/include/asm/pgtable.h | 17 -----------------
 arch/s390/mm/mmap.c             | 33 +++++++++++++++++++++++++++++++++
 3 files changed, 34 insertions(+), 17 deletions(-)

Comments

Christophe Leroy June 3, 2022, 12:25 p.m. UTC | #1
Le 03/06/2022 à 12:14, Anshuman Khandual a écrit :
> This defines and exports a platform specific custom vm_get_page_prot() via
> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> macros can be dropped which are no longer needed.
> 
> Cc: Heiko Carstens <hca@linux.ibm.com>
> Cc: Vasily Gorbik <gor@linux.ibm.com>
> Cc: linux-s390@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Acked-by: Sven Schnelle <svens@linux.ibm.com>
> Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>   arch/s390/Kconfig               |  1 +
>   arch/s390/include/asm/pgtable.h | 17 -----------------
>   arch/s390/mm/mmap.c             | 33 +++++++++++++++++++++++++++++++++
>   3 files changed, 34 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
> index b17239ae7bd4..cdcf678deab1 100644
> --- a/arch/s390/Kconfig
> +++ b/arch/s390/Kconfig
> @@ -81,6 +81,7 @@ config S390
>   	select ARCH_HAS_SYSCALL_WRAPPER
>   	select ARCH_HAS_UBSAN_SANITIZE_ALL
>   	select ARCH_HAS_VDSO_DATA
> +	select ARCH_HAS_VM_GET_PAGE_PROT
>   	select ARCH_HAVE_NMI_SAFE_CMPXCHG
>   	select ARCH_INLINE_READ_LOCK
>   	select ARCH_INLINE_READ_LOCK_BH
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index a397b072a580..c63a05b5368a 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -424,23 +424,6 @@ static inline int is_module_addr(void *addr)
>    * implies read permission.
>    */
>            /*xwr*/
> -#define __P000	PAGE_NONE
> -#define __P001	PAGE_RO
> -#define __P010	PAGE_RO
> -#define __P011	PAGE_RO
> -#define __P100	PAGE_RX
> -#define __P101	PAGE_RX
> -#define __P110	PAGE_RX
> -#define __P111	PAGE_RX
> -
> -#define __S000	PAGE_NONE
> -#define __S001	PAGE_RO
> -#define __S010	PAGE_RW
> -#define __S011	PAGE_RW
> -#define __S100	PAGE_RX
> -#define __S101	PAGE_RX
> -#define __S110	PAGE_RWX
> -#define __S111	PAGE_RWX
>   
>   /*
>    * Segment entry (large page) protection definitions.
> diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
> index d545f5c39f7e..11d75b8d5ec0 100644
> --- a/arch/s390/mm/mmap.c
> +++ b/arch/s390/mm/mmap.c
> @@ -188,3 +188,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
>   		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
>   	}
>   }
> +
> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
> +{
> +	switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> +	case VM_NONE:
> +		return PAGE_NONE;
> +	case VM_READ:
> +	case VM_WRITE:
> +	case VM_WRITE | VM_READ:
> +		return PAGE_RO;
> +	case VM_EXEC:
> +	case VM_EXEC | VM_READ:
> +	case VM_EXEC | VM_WRITE:
> +	case VM_EXEC | VM_WRITE | VM_READ:
> +		return PAGE_RX;
> +	case VM_SHARED:
> +		return PAGE_NONE;
> +	case VM_SHARED | VM_READ:
> +		return PAGE_RO;
> +	case VM_SHARED | VM_WRITE:
> +	case VM_SHARED | VM_WRITE | VM_READ:
> +		return PAGE_RW;
> +	case VM_SHARED | VM_EXEC:
> +	case VM_SHARED | VM_EXEC | VM_READ:
> +		return PAGE_RX;
> +	case VM_SHARED | VM_EXEC | VM_WRITE:
> +	case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
> +		return PAGE_RWX;
> +	default:
> +		BUILD_BUG();
> +	}
> +}
> +EXPORT_SYMBOL(vm_get_page_prot);

Wasn't it demonstrated in previous discussions that a switch/case is 
suboptimal compared to a table cell read ?

In order to get rid of the _Sxxx/_Pxxx macros, my preference would go to 
having architectures provide their own protection_map[] table, and keep 
the generic vm_get_page_prot() for the architectures would don't need a 
specific version of it.

This comment applies to all following patches as well.
Anshuman Khandual June 5, 2022, 9:58 a.m. UTC | #2
On 6/3/22 17:55, Christophe Leroy wrote:
> 
> 
> Le 03/06/2022 à 12:14, Anshuman Khandual a écrit :
>> This defines and exports a platform specific custom vm_get_page_prot() via
>> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
>> macros can be dropped which are no longer needed.
>>
>> Cc: Heiko Carstens <hca@linux.ibm.com>
>> Cc: Vasily Gorbik <gor@linux.ibm.com>
>> Cc: linux-s390@vger.kernel.org
>> Cc: linux-kernel@vger.kernel.org
>> Acked-by: Sven Schnelle <svens@linux.ibm.com>
>> Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>>   arch/s390/Kconfig               |  1 +
>>   arch/s390/include/asm/pgtable.h | 17 -----------------
>>   arch/s390/mm/mmap.c             | 33 +++++++++++++++++++++++++++++++++
>>   3 files changed, 34 insertions(+), 17 deletions(-)
>>
>> diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
>> index b17239ae7bd4..cdcf678deab1 100644
>> --- a/arch/s390/Kconfig
>> +++ b/arch/s390/Kconfig
>> @@ -81,6 +81,7 @@ config S390
>>   	select ARCH_HAS_SYSCALL_WRAPPER
>>   	select ARCH_HAS_UBSAN_SANITIZE_ALL
>>   	select ARCH_HAS_VDSO_DATA
>> +	select ARCH_HAS_VM_GET_PAGE_PROT
>>   	select ARCH_HAVE_NMI_SAFE_CMPXCHG
>>   	select ARCH_INLINE_READ_LOCK
>>   	select ARCH_INLINE_READ_LOCK_BH
>> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
>> index a397b072a580..c63a05b5368a 100644
>> --- a/arch/s390/include/asm/pgtable.h
>> +++ b/arch/s390/include/asm/pgtable.h
>> @@ -424,23 +424,6 @@ static inline int is_module_addr(void *addr)
>>    * implies read permission.
>>    */
>>            /*xwr*/
>> -#define __P000	PAGE_NONE
>> -#define __P001	PAGE_RO
>> -#define __P010	PAGE_RO
>> -#define __P011	PAGE_RO
>> -#define __P100	PAGE_RX
>> -#define __P101	PAGE_RX
>> -#define __P110	PAGE_RX
>> -#define __P111	PAGE_RX
>> -
>> -#define __S000	PAGE_NONE
>> -#define __S001	PAGE_RO
>> -#define __S010	PAGE_RW
>> -#define __S011	PAGE_RW
>> -#define __S100	PAGE_RX
>> -#define __S101	PAGE_RX
>> -#define __S110	PAGE_RWX
>> -#define __S111	PAGE_RWX
>>   
>>   /*
>>    * Segment entry (large page) protection definitions.
>> diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
>> index d545f5c39f7e..11d75b8d5ec0 100644
>> --- a/arch/s390/mm/mmap.c
>> +++ b/arch/s390/mm/mmap.c
>> @@ -188,3 +188,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
>>   		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
>>   	}
>>   }
>> +
>> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
>> +{
>> +	switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
>> +	case VM_NONE:
>> +		return PAGE_NONE;
>> +	case VM_READ:
>> +	case VM_WRITE:
>> +	case VM_WRITE | VM_READ:
>> +		return PAGE_RO;
>> +	case VM_EXEC:
>> +	case VM_EXEC | VM_READ:
>> +	case VM_EXEC | VM_WRITE:
>> +	case VM_EXEC | VM_WRITE | VM_READ:
>> +		return PAGE_RX;
>> +	case VM_SHARED:
>> +		return PAGE_NONE;
>> +	case VM_SHARED | VM_READ:
>> +		return PAGE_RO;
>> +	case VM_SHARED | VM_WRITE:
>> +	case VM_SHARED | VM_WRITE | VM_READ:
>> +		return PAGE_RW;
>> +	case VM_SHARED | VM_EXEC:
>> +	case VM_SHARED | VM_EXEC | VM_READ:
>> +		return PAGE_RX;
>> +	case VM_SHARED | VM_EXEC | VM_WRITE:
>> +	case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
>> +		return PAGE_RWX;
>> +	default:
>> +		BUILD_BUG();
>> +	}
>> +}
>> +EXPORT_SYMBOL(vm_get_page_prot);
> 
> Wasn't it demonstrated in previous discussions that a switch/case is 
> suboptimal compared to a table cell read ?

Right but all these platform patches here were acked from respective
platform folks. I assumed that they might have valued the simplicity
in switch case statements, while also dropping off the __SXXX/__PXXX
macros, which is the final objective. Looks like that assumption was
not accurate.

> 
> In order to get rid of the _Sxxx/_Pxxx macros, my preference would go to 
> having architectures provide their own protection_map[] table, and keep 
> the generic vm_get_page_prot() for the architectures would don't need a 
> specific version of it.

I will try and rework the patches as suggested.

> 
> This comment applies to all following patches as well.
diff mbox series

Patch

diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b17239ae7bd4..cdcf678deab1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -81,6 +81,7 @@  config S390
 	select ARCH_HAS_SYSCALL_WRAPPER
 	select ARCH_HAS_UBSAN_SANITIZE_ALL
 	select ARCH_HAS_VDSO_DATA
+	select ARCH_HAS_VM_GET_PAGE_PROT
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_INLINE_READ_LOCK
 	select ARCH_INLINE_READ_LOCK_BH
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index a397b072a580..c63a05b5368a 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -424,23 +424,6 @@  static inline int is_module_addr(void *addr)
  * implies read permission.
  */
          /*xwr*/
-#define __P000	PAGE_NONE
-#define __P001	PAGE_RO
-#define __P010	PAGE_RO
-#define __P011	PAGE_RO
-#define __P100	PAGE_RX
-#define __P101	PAGE_RX
-#define __P110	PAGE_RX
-#define __P111	PAGE_RX
-
-#define __S000	PAGE_NONE
-#define __S001	PAGE_RO
-#define __S010	PAGE_RW
-#define __S011	PAGE_RW
-#define __S100	PAGE_RX
-#define __S101	PAGE_RX
-#define __S110	PAGE_RWX
-#define __S111	PAGE_RWX
 
 /*
  * Segment entry (large page) protection definitions.
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index d545f5c39f7e..11d75b8d5ec0 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -188,3 +188,36 @@  void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+	switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+	case VM_NONE:
+		return PAGE_NONE;
+	case VM_READ:
+	case VM_WRITE:
+	case VM_WRITE | VM_READ:
+		return PAGE_RO;
+	case VM_EXEC:
+	case VM_EXEC | VM_READ:
+	case VM_EXEC | VM_WRITE:
+	case VM_EXEC | VM_WRITE | VM_READ:
+		return PAGE_RX;
+	case VM_SHARED:
+		return PAGE_NONE;
+	case VM_SHARED | VM_READ:
+		return PAGE_RO;
+	case VM_SHARED | VM_WRITE:
+	case VM_SHARED | VM_WRITE | VM_READ:
+		return PAGE_RW;
+	case VM_SHARED | VM_EXEC:
+	case VM_SHARED | VM_EXEC | VM_READ:
+		return PAGE_RX;
+	case VM_SHARED | VM_EXEC | VM_WRITE:
+	case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+		return PAGE_RWX;
+	default:
+		BUILD_BUG();
+	}
+}
+EXPORT_SYMBOL(vm_get_page_prot);