diff mbox series

[04/10,net-next] page_pool: Add API to update numa node and flush page caches

Message ID 20191016225028.2100206-5-jonathan.lemon@gmail.com
State Changes Requested
Delegated to: David Miller
Headers show
Series page_pool cleanups | expand

Commit Message

Jonathan Lemon Oct. 16, 2019, 10:50 p.m. UTC
From: Saeed Mahameed <saeedm@mellanox.com>

Add page_pool_update_nid() to be called from drivers when they detect
numa node changes.

It will do:
1) Flush the pool's page cache and ptr_ring.
2) Update page pool nid value to start allocating from the new numa
node.

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
---
 include/net/page_pool.h | 10 ++++++++++
 net/core/page_pool.c    | 16 +++++++++++-----
 2 files changed, 21 insertions(+), 5 deletions(-)

Comments

Ilias Apalodimas Oct. 17, 2019, 12:06 p.m. UTC | #1
Hi Saeed,

On Wed, Oct 16, 2019 at 03:50:22PM -0700, Jonathan Lemon wrote:
> From: Saeed Mahameed <saeedm@mellanox.com>
> 
> Add page_pool_update_nid() to be called from drivers when they detect
> numa node changes.
> 
> It will do:
> 1) Flush the pool's page cache and ptr_ring.
> 2) Update page pool nid value to start allocating from the new numa
> node.
> 
> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
> Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
> ---
>  include/net/page_pool.h | 10 ++++++++++
>  net/core/page_pool.c    | 16 +++++++++++-----
>  2 files changed, 21 insertions(+), 5 deletions(-)
> 
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 2cbcdbdec254..fb13cf6055ff 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -226,4 +226,14 @@ static inline bool page_pool_put(struct page_pool *pool)
>  	return refcount_dec_and_test(&pool->user_cnt);
>  }
>  
> +/* Only safe from napi context or when user guarantees it is thread safe */
> +void __page_pool_flush(struct page_pool *pool);

This should be called per packet right? Any noticeable impact on performance?

> +static inline void page_pool_update_nid(struct page_pool *pool, int new_nid)
> +{
> +	if (unlikely(pool->p.nid != new_nid)) {
> +		/* TODO: Add statistics/trace */
> +		__page_pool_flush(pool);
> +		pool->p.nid = new_nid;
> +	}
> +}
>  #endif /* _NET_PAGE_POOL_H */
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 5bc65587f1c4..678cf85f273a 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -373,16 +373,13 @@ void __page_pool_free(struct page_pool *pool)
>  }
>  EXPORT_SYMBOL(__page_pool_free);
>  
> -/* Request to shutdown: release pages cached by page_pool, and check
> - * for in-flight pages
> - */
> -bool __page_pool_request_shutdown(struct page_pool *pool)
> +void __page_pool_flush(struct page_pool *pool)
>  {
>  	struct page *page;
>  
>  	/* Empty alloc cache, assume caller made sure this is
>  	 * no-longer in use, and page_pool_alloc_pages() cannot be
> -	 * call concurrently.
> +	 * called concurrently.
>  	 */
>  	while (pool->alloc.count) {
>  		page = pool->alloc.cache[--pool->alloc.count];
> @@ -393,6 +390,15 @@ bool __page_pool_request_shutdown(struct page_pool *pool)
>  	 * be in-flight.
>  	 */
>  	__page_pool_empty_ring(pool);
> +}
> +EXPORT_SYMBOL(__page_pool_flush);

A later patch removes this, do we actually need it here?

> +
> +/* Request to shutdown: release pages cached by page_pool, and check
> + * for in-flight pages
> + */
> +bool __page_pool_request_shutdown(struct page_pool *pool)
> +{
> +	__page_pool_flush(pool);
>  
>  	return __page_pool_safe_to_destroy(pool);
>  }
> -- 
> 2.17.1
> 


Thanks
/Ilias
Saeed Mahameed Oct. 18, 2019, 9:07 p.m. UTC | #2
On Thu, 2019-10-17 at 15:06 +0300, Ilias Apalodimas wrote:
> Hi Saeed,
> 
> On Wed, Oct 16, 2019 at 03:50:22PM -0700, Jonathan Lemon wrote:
> > From: Saeed Mahameed <saeedm@mellanox.com>
> > 
> > Add page_pool_update_nid() to be called from drivers when they
> > detect
> > numa node changes.
> > 
> > It will do:
> > 1) Flush the pool's page cache and ptr_ring.
> > 2) Update page pool nid value to start allocating from the new numa
> > node.
> > 
> > Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
> > Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
> > ---
> >  include/net/page_pool.h | 10 ++++++++++
> >  net/core/page_pool.c    | 16 +++++++++++-----
> >  2 files changed, 21 insertions(+), 5 deletions(-)
> > 
> > diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> > index 2cbcdbdec254..fb13cf6055ff 100644
> > --- a/include/net/page_pool.h
> > +++ b/include/net/page_pool.h
> > @@ -226,4 +226,14 @@ static inline bool page_pool_put(struct
> > page_pool *pool)
> >  	return refcount_dec_and_test(&pool->user_cnt);
> >  }
> >  
> > +/* Only safe from napi context or when user guarantees it is
> > thread safe */
> > +void __page_pool_flush(struct page_pool *pool);
> 
> This should be called per packet right? Any noticeable impact on
> performance?
> 
no, once per napi and only if a change in numa node is detected, so
very very rare !

> > +static inline void page_pool_update_nid(struct page_pool *pool,
> > int new_nid)
> > +{
> > +	if (unlikely(pool->p.nid != new_nid)) {
> > +		/* TODO: Add statistics/trace */
> > +		__page_pool_flush(pool);
> > +		pool->p.nid = new_nid;
> > +	}
> > +}
> >  #endif /* _NET_PAGE_POOL_H */
> > diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> > index 5bc65587f1c4..678cf85f273a 100644
> > --- a/net/core/page_pool.c
> > +++ b/net/core/page_pool.c
> > @@ -373,16 +373,13 @@ void __page_pool_free(struct page_pool *pool)
> >  }
> >  EXPORT_SYMBOL(__page_pool_free);
> >  
> > -/* Request to shutdown: release pages cached by page_pool, and
> > check
> > - * for in-flight pages
> > - */
> > -bool __page_pool_request_shutdown(struct page_pool *pool)
> > +void __page_pool_flush(struct page_pool *pool)
> >  {
> >  	struct page *page;
> >  
> >  	/* Empty alloc cache, assume caller made sure this is
> >  	 * no-longer in use, and page_pool_alloc_pages() cannot be
> > -	 * call concurrently.
> > +	 * called concurrently.
> >  	 */
> >  	while (pool->alloc.count) {
> >  		page = pool->alloc.cache[--pool->alloc.count];
> > @@ -393,6 +390,15 @@ bool __page_pool_request_shutdown(struct
> > page_pool *pool)
> >  	 * be in-flight.
> >  	 */
> >  	__page_pool_empty_ring(pool);
> > +}
> > +EXPORT_SYMBOL(__page_pool_flush);
> 
> A later patch removes this, do we actually need it here?

I agree, Jonathan changed the design of my last patch in this series
and this became redundant as he is going to do lazy release of unwanted
pages, rather than flushing the cache.

> 
> > +
> > +/* Request to shutdown: release pages cached by page_pool, and
> > check
> > + * for in-flight pages
> > + */
> > +bool __page_pool_request_shutdown(struct page_pool *pool)
> > +{
> > +	__page_pool_flush(pool);
> >  
> >  	return __page_pool_safe_to_destroy(pool);
> >  }
> > -- 
> > 2.17.1
> > 
> 
> Thanks
> /Ilias
Jonathan Lemon Oct. 18, 2019, 11:38 p.m. UTC | #3
On 18 Oct 2019, at 14:07, Saeed Mahameed wrote:

> On Thu, 2019-10-17 at 15:06 +0300, Ilias Apalodimas wrote:
>> Hi Saeed,
>>
>> On Wed, Oct 16, 2019 at 03:50:22PM -0700, Jonathan Lemon wrote:
>>> From: Saeed Mahameed <saeedm@mellanox.com>
>>>
>>> Add page_pool_update_nid() to be called from drivers when they
>>> detect
>>> numa node changes.
>>>
>>> It will do:
>>> 1) Flush the pool's page cache and ptr_ring.
>>> 2) Update page pool nid value to start allocating from the new numa
>>> node.
>>>
>>> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
>>> Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
>>> ---
>>>  include/net/page_pool.h | 10 ++++++++++
>>>  net/core/page_pool.c    | 16 +++++++++++-----
>>>  2 files changed, 21 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
>>> index 2cbcdbdec254..fb13cf6055ff 100644
>>> --- a/include/net/page_pool.h
>>> +++ b/include/net/page_pool.h
>>> @@ -226,4 +226,14 @@ static inline bool page_pool_put(struct
>>> page_pool *pool)
>>>  	return refcount_dec_and_test(&pool->user_cnt);
>>>  }
>>>
>>> +/* Only safe from napi context or when user guarantees it is
>>> thread safe */
>>> +void __page_pool_flush(struct page_pool *pool);
>>
>> This should be called per packet right? Any noticeable impact on
>> performance?
>>
> no, once per napi and only if a change in numa node is detected, so
> very very rare !
>
>>> +static inline void page_pool_update_nid(struct page_pool *pool,
>>> int new_nid)
>>> +{
>>> +	if (unlikely(pool->p.nid != new_nid)) {
>>> +		/* TODO: Add statistics/trace */
>>> +		__page_pool_flush(pool);
>>> +		pool->p.nid = new_nid;
>>> +	}
>>> +}
>>>  #endif /* _NET_PAGE_POOL_H */
>>> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
>>> index 5bc65587f1c4..678cf85f273a 100644
>>> --- a/net/core/page_pool.c
>>> +++ b/net/core/page_pool.c
>>> @@ -373,16 +373,13 @@ void __page_pool_free(struct page_pool *pool)
>>>  }
>>>  EXPORT_SYMBOL(__page_pool_free);
>>>
>>> -/* Request to shutdown: release pages cached by page_pool, and
>>> check
>>> - * for in-flight pages
>>> - */
>>> -bool __page_pool_request_shutdown(struct page_pool *pool)
>>> +void __page_pool_flush(struct page_pool *pool)
>>>  {
>>>  	struct page *page;
>>>
>>>  	/* Empty alloc cache, assume caller made sure this is
>>>  	 * no-longer in use, and page_pool_alloc_pages() cannot be
>>> -	 * call concurrently.
>>> +	 * called concurrently.
>>>  	 */
>>>  	while (pool->alloc.count) {
>>>  		page = pool->alloc.cache[--pool->alloc.count];
>>> @@ -393,6 +390,15 @@ bool __page_pool_request_shutdown(struct
>>> page_pool *pool)
>>>  	 * be in-flight.
>>>  	 */
>>>  	__page_pool_empty_ring(pool);
>>> +}
>>> +EXPORT_SYMBOL(__page_pool_flush);
>>
>> A later patch removes this, do we actually need it here?
>
> I agree, Jonathan changed the design of my last patch in this series
> and this became redundant as he is going to do lazy release of unwanted
> pages, rather than flushing the cache.

Yeah - I didn't want to take the latency hit when the node changed,
and would prefer to just amortize the cost over time.
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 2cbcdbdec254..fb13cf6055ff 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -226,4 +226,14 @@  static inline bool page_pool_put(struct page_pool *pool)
 	return refcount_dec_and_test(&pool->user_cnt);
 }
 
+/* Only safe from napi context or when user guarantees it is thread safe */
+void __page_pool_flush(struct page_pool *pool);
+static inline void page_pool_update_nid(struct page_pool *pool, int new_nid)
+{
+	if (unlikely(pool->p.nid != new_nid)) {
+		/* TODO: Add statistics/trace */
+		__page_pool_flush(pool);
+		pool->p.nid = new_nid;
+	}
+}
 #endif /* _NET_PAGE_POOL_H */
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 5bc65587f1c4..678cf85f273a 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -373,16 +373,13 @@  void __page_pool_free(struct page_pool *pool)
 }
 EXPORT_SYMBOL(__page_pool_free);
 
-/* Request to shutdown: release pages cached by page_pool, and check
- * for in-flight pages
- */
-bool __page_pool_request_shutdown(struct page_pool *pool)
+void __page_pool_flush(struct page_pool *pool)
 {
 	struct page *page;
 
 	/* Empty alloc cache, assume caller made sure this is
 	 * no-longer in use, and page_pool_alloc_pages() cannot be
-	 * call concurrently.
+	 * called concurrently.
 	 */
 	while (pool->alloc.count) {
 		page = pool->alloc.cache[--pool->alloc.count];
@@ -393,6 +390,15 @@  bool __page_pool_request_shutdown(struct page_pool *pool)
 	 * be in-flight.
 	 */
 	__page_pool_empty_ring(pool);
+}
+EXPORT_SYMBOL(__page_pool_flush);
+
+/* Request to shutdown: release pages cached by page_pool, and check
+ * for in-flight pages
+ */
+bool __page_pool_request_shutdown(struct page_pool *pool)
+{
+	__page_pool_flush(pool);
 
 	return __page_pool_safe_to_destroy(pool);
 }