diff mbox series

[v2,08/16] mm/vmemmap: Improve vmemmap_can_optimize and allow architectures to override

Message ID 20230616110826.344417-9-aneesh.kumar@linux.ibm.com (mailing list archive)
State Changes Requested
Headers show
Series Add support for DAX vmemmap optimization for ppc64 | expand

Commit Message

Aneesh Kumar K V June 16, 2023, 11:08 a.m. UTC
dax vmemmap optimization requires a minimum of 2 PAGE_SIZE area within
vmemmap such that tail page mapping can point to the second PAGE_SIZE area.
Enforce that in vmemmap_can_optimize() function.

Architectures like powerpc also want to enable vmemmap optimization
conditionally (only with radix MMU translation). Hence allow architecture
override.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 include/linux/mm.h | 30 ++++++++++++++++++++++++++----
 mm/mm_init.c       |  2 +-
 2 files changed, 27 insertions(+), 5 deletions(-)

Comments

Joao Martins June 20, 2023, 11:53 a.m. UTC | #1
On 16/06/2023 12:08, Aneesh Kumar K.V wrote:
> dax vmemmap optimization requires a minimum of 2 PAGE_SIZE area within
> vmemmap such that tail page mapping can point to the second PAGE_SIZE area.
> Enforce that in vmemmap_can_optimize() function.
> 
> Architectures like powerpc also want to enable vmemmap optimization
> conditionally (only with radix MMU translation). Hence allow architecture
> override.
> 
This makes sense. The enforcing here is not just for correctness but because you
want to use VMEMMAP_RESERVE_NR supposedly?

I would suggest having two patches one for the refactor and another one for the
override, but I don't feel particularly strongly about it.

> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
>  include/linux/mm.h | 30 ++++++++++++++++++++++++++----
>  mm/mm_init.c       |  2 +-
>  2 files changed, 27 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 27ce77080c79..9a45e61cd83f 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -31,6 +31,8 @@
>  #include <linux/memremap.h>
>  #include <linux/slab.h>
>  
> +#include <asm/page.h>
> +

Why is this include needed?

>  struct mempolicy;
>  struct anon_vma;
>  struct anon_vma_chain;
> @@ -3550,13 +3552,33 @@ void vmemmap_free(unsigned long start, unsigned long end,
>  		struct vmem_altmap *altmap);
>  #endif
>  
> +#define VMEMMAP_RESERVE_NR	2

see below

>  #ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
> -static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
> -					   struct dev_pagemap *pgmap)
> +static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
> +					  struct dev_pagemap *pgmap)
>  {
> -	return is_power_of_2(sizeof(struct page)) &&
> -		pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
> +	if (pgmap) {
> +		unsigned long nr_pages;
> +		unsigned long nr_vmemmap_pages;
> +
> +		nr_pages = pgmap_vmemmap_nr(pgmap);
> +		nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
> +		/*
> +		 * For vmemmap optimization with DAX we need minimum 2 vmemmap



> +		 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
> +		 */
> +		return is_power_of_2(sizeof(struct page)) &&
> +			(nr_vmemmap_pages > VMEMMAP_RESERVE_NR) && !altmap;
> +	}

It would be more readable (i.e. less identation) if you just reverse this:

	unsigned long nr_vmemmap_pages;

	if (!pgmap || !is_power_of_2(sizeof(struct page))
		return false;

	nr_vmemmap_pages = ((pgmap_vmemmap_nr(pgmap) *
			     sizeof(struct page)) >> PAGE_SHIFT);

	/*
	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
	 */
	return (nr_vmemmap_pages > VMEMMAP_RESERVE_NR) && !altmap;


> +	return false;
>  }
> +/*
> + * If we don't have an architecture override, use the generic rule
> + */
> +#ifndef vmemmap_can_optimize
> +#define vmemmap_can_optimize __vmemmap_can_optimize
> +#endif
> +

sparse-vmemmap code is trivial to change to use dedup a single vmemmap page
(e.g. to align with hugetlb), hopefully the architecture override to do. this is
to say whether VMEMMAP_RESERVE_NR should have similar to above?

>  #else
>  static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
>  					   struct dev_pagemap *pgmap)
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 7f7f9c677854..d1676afc94f1 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1020,7 +1020,7 @@ static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
>  	if (!vmemmap_can_optimize(altmap, pgmap))
>  		return pgmap_vmemmap_nr(pgmap);
>  
> -	return 2 * (PAGE_SIZE / sizeof(struct page));
> +	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
>  }
>  
>  static void __ref memmap_init_compound(struct page *head,
Aneesh Kumar K V June 20, 2023, 2:29 p.m. UTC | #2
Joao Martins <joao.m.martins@oracle.com> writes:

> On 16/06/2023 12:08, Aneesh Kumar K.V wrote:
>> dax vmemmap optimization requires a minimum of 2 PAGE_SIZE area within
>> vmemmap such that tail page mapping can point to the second PAGE_SIZE area.
>> Enforce that in vmemmap_can_optimize() function.
>> 
>> Architectures like powerpc also want to enable vmemmap optimization
>> conditionally (only with radix MMU translation). Hence allow architecture
>> override.
>> 
> This makes sense. The enforcing here is not just for correctness but because you
> want to use VMEMMAP_RESERVE_NR supposedly?
>
> I would suggest having two patches one for the refactor and another one for the
> override, but I don't feel particularly strongly about it.
>

I will wait for feedback from others. If we have others also suggesting
for the split patch I will do that.

>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>>  include/linux/mm.h | 30 ++++++++++++++++++++++++++----
>>  mm/mm_init.c       |  2 +-
>>  2 files changed, 27 insertions(+), 5 deletions(-)
>> 
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index 27ce77080c79..9a45e61cd83f 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -31,6 +31,8 @@
>>  #include <linux/memremap.h>
>>  #include <linux/slab.h>
>>  
>> +#include <asm/page.h>
>> +
>
> Why is this include needed?

That was for PAGE_SHIFT. But then we do pull that through other include
dependencies. I will see if i can drop that.

>
>>  struct mempolicy;
>>  struct anon_vma;
>>  struct anon_vma_chain;
>> @@ -3550,13 +3552,33 @@ void vmemmap_free(unsigned long start, unsigned long end,
>>  		struct vmem_altmap *altmap);
>>  #endif
>>  
>> +#define VMEMMAP_RESERVE_NR	2
>
> see below
>
>>  #ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
>> -static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
>> -					   struct dev_pagemap *pgmap)
>> +static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
>> +					  struct dev_pagemap *pgmap)
>>  {
>> -	return is_power_of_2(sizeof(struct page)) &&
>> -		pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
>> +	if (pgmap) {
>> +		unsigned long nr_pages;
>> +		unsigned long nr_vmemmap_pages;
>> +
>> +		nr_pages = pgmap_vmemmap_nr(pgmap);
>> +		nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
>> +		/*
>> +		 * For vmemmap optimization with DAX we need minimum 2 vmemmap
>
>
>
>> +		 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
>> +		 */
>> +		return is_power_of_2(sizeof(struct page)) &&
>> +			(nr_vmemmap_pages > VMEMMAP_RESERVE_NR) && !altmap;
>> +	}
>
> It would be more readable (i.e. less identation) if you just reverse this:
>
> 	unsigned long nr_vmemmap_pages;
>
> 	if (!pgmap || !is_power_of_2(sizeof(struct page))
> 		return false;
>
> 	nr_vmemmap_pages = ((pgmap_vmemmap_nr(pgmap) *
> 			     sizeof(struct page)) >> PAGE_SHIFT);
>
> 	/*
> 	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
> 	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
> 	 */
> 	return (nr_vmemmap_pages > VMEMMAP_RESERVE_NR) && !altmap;
>
>

Will update



>> +	return false;
>>  }
>> +/*
>> + * If we don't have an architecture override, use the generic rule
>> + */
>> +#ifndef vmemmap_can_optimize
>> +#define vmemmap_can_optimize __vmemmap_can_optimize
>> +#endif
>> +
>
> sparse-vmemmap code is trivial to change to use dedup a single vmemmap page
> (e.g. to align with hugetlb), hopefully the architecture override to do. this is
> to say whether VMEMMAP_RESERVE_NR should have similar to above?

VMEMMAP_RESERVE_NR was added to avoid the usage of `2` in the below code.
The reason we need the arch override was to add an additional check on
ppc64 as shown by patch

https://lore.kernel.org/linux-mm/20230616110826.344417-16-aneesh.kumar@linux.ibm.com/

bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
{
	if (radix_enabled())
		return __vmemmap_can_optimize(altmap, pgmap);

	return false;
}


>
>>  #else
>>  static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
>>  					   struct dev_pagemap *pgmap)
>> diff --git a/mm/mm_init.c b/mm/mm_init.c
>> index 7f7f9c677854..d1676afc94f1 100644
>> --- a/mm/mm_init.c
>> +++ b/mm/mm_init.c
>> @@ -1020,7 +1020,7 @@ static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
>>  	if (!vmemmap_can_optimize(altmap, pgmap))
>>  		return pgmap_vmemmap_nr(pgmap);
>>  
>> -	return 2 * (PAGE_SIZE / sizeof(struct page));
>> +	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
>>  }
>>  
>>  static void __ref memmap_init_compound(struct page *head,
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 27ce77080c79..9a45e61cd83f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -31,6 +31,8 @@ 
 #include <linux/memremap.h>
 #include <linux/slab.h>
 
+#include <asm/page.h>
+
 struct mempolicy;
 struct anon_vma;
 struct anon_vma_chain;
@@ -3550,13 +3552,33 @@  void vmemmap_free(unsigned long start, unsigned long end,
 		struct vmem_altmap *altmap);
 #endif
 
+#define VMEMMAP_RESERVE_NR	2
 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
-static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
-					   struct dev_pagemap *pgmap)
+static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
+					  struct dev_pagemap *pgmap)
 {
-	return is_power_of_2(sizeof(struct page)) &&
-		pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
+	if (pgmap) {
+		unsigned long nr_pages;
+		unsigned long nr_vmemmap_pages;
+
+		nr_pages = pgmap_vmemmap_nr(pgmap);
+		nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
+		/*
+		 * For vmemmap optimization with DAX we need minimum 2 vmemmap
+		 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
+		 */
+		return is_power_of_2(sizeof(struct page)) &&
+			(nr_vmemmap_pages > VMEMMAP_RESERVE_NR) && !altmap;
+	}
+	return false;
 }
+/*
+ * If we don't have an architecture override, use the generic rule
+ */
+#ifndef vmemmap_can_optimize
+#define vmemmap_can_optimize __vmemmap_can_optimize
+#endif
+
 #else
 static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
 					   struct dev_pagemap *pgmap)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 7f7f9c677854..d1676afc94f1 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1020,7 +1020,7 @@  static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
 	if (!vmemmap_can_optimize(altmap, pgmap))
 		return pgmap_vmemmap_nr(pgmap);
 
-	return 2 * (PAGE_SIZE / sizeof(struct page));
+	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
 }
 
 static void __ref memmap_init_compound(struct page *head,