diff mbox series

[net-next] page_pool: fix logic in __page_pool_get_cached

Message ID 20190813174509.494723-1-jonathan.lemon@gmail.com
State Accepted
Delegated to: David Miller
Headers show
Series [net-next] page_pool: fix logic in __page_pool_get_cached | expand

Commit Message

Jonathan Lemon Aug. 13, 2019, 5:45 p.m. UTC
__page_pool_get_cached() will return NULL when the ring is
empty, even if there are pages present in the lookaside cache.

It is also possible to refill the cache, and then return a
NULL page.

Restructure the logic so eliminate both cases.

Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
---
 net/core/page_pool.c | 39 ++++++++++++++++-----------------------
 1 file changed, 16 insertions(+), 23 deletions(-)

Comments

Jesper Dangaard Brouer Aug. 14, 2019, 12:29 p.m. UTC | #1
On Tue, 13 Aug 2019 10:45:09 -0700
Jonathan Lemon <jonathan.lemon@gmail.com> wrote:

> __page_pool_get_cached() will return NULL when the ring is
> empty, even if there are pages present in the lookaside cache.
> 
> It is also possible to refill the cache, and then return a
> NULL page.
> 
> Restructure the logic so eliminate both cases.
> 
> Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>

Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>

Thanks for catching and improving this!
Ilias Apalodimas Aug. 14, 2019, 12:49 p.m. UTC | #2
Hi Jonathan,

Thanks!

On Tue, Aug 13, 2019 at 10:45:09AM -0700, Jonathan Lemon wrote:
> __page_pool_get_cached() will return NULL when the ring is
> empty, even if there are pages present in the lookaside cache.
> 
> It is also possible to refill the cache, and then return a
> NULL page.
> 
> Restructure the logic so eliminate both cases.

Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>

> 
> Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
> ---
>  net/core/page_pool.c | 39 ++++++++++++++++-----------------------
>  1 file changed, 16 insertions(+), 23 deletions(-)
> 
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 68510eb869ea..de09a74a39a4 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -82,12 +82,9 @@ EXPORT_SYMBOL(page_pool_create);
>  static struct page *__page_pool_get_cached(struct page_pool *pool)
>  {
>  	struct ptr_ring *r = &pool->ring;
> +	bool refill = false;
>  	struct page *page;
>  
> -	/* Quicker fallback, avoid locks when ring is empty */
> -	if (__ptr_ring_empty(r))
> -		return NULL;
> -
>  	/* Test for safe-context, caller should provide this guarantee */
>  	if (likely(in_serving_softirq())) {
>  		if (likely(pool->alloc.count)) {
> @@ -95,27 +92,23 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
>  			page = pool->alloc.cache[--pool->alloc.count];
>  			return page;
>  		}
> -		/* Slower-path: Alloc array empty, time to refill
> -		 *
> -		 * Open-coded bulk ptr_ring consumer.
> -		 *
> -		 * Discussion: the ring consumer lock is not really
> -		 * needed due to the softirq/NAPI protection, but
> -		 * later need the ability to reclaim pages on the
> -		 * ring. Thus, keeping the locks.
> -		 */
> -		spin_lock(&r->consumer_lock);
> -		while ((page = __ptr_ring_consume(r))) {
> -			if (pool->alloc.count == PP_ALLOC_CACHE_REFILL)
> -				break;
> -			pool->alloc.cache[pool->alloc.count++] = page;
> -		}
> -		spin_unlock(&r->consumer_lock);
> -		return page;
> +		refill = true;
>  	}
>  
> -	/* Slow-path: Get page from locked ring queue */
> -	page = ptr_ring_consume(&pool->ring);
> +	/* Quicker fallback, avoid locks when ring is empty */
> +	if (__ptr_ring_empty(r))
> +		return NULL;
> +
> +	/* Slow-path: Get page from locked ring queue,
> +	 * refill alloc array if requested.
> +	 */
> +	spin_lock(&r->consumer_lock);
> +	page = __ptr_ring_consume(r);
> +	if (refill)
> +		pool->alloc.count = __ptr_ring_consume_batched(r,
> +							pool->alloc.cache,
> +							PP_ALLOC_CACHE_REFILL);
> +	spin_unlock(&r->consumer_lock);
>  	return page;
>  }
>  
> -- 
> 2.17.1
>
David Miller Aug. 15, 2019, 7:28 p.m. UTC | #3
From: Jonathan Lemon <jonathan.lemon@gmail.com>
Date: Tue, 13 Aug 2019 10:45:09 -0700

> __page_pool_get_cached() will return NULL when the ring is
> empty, even if there are pages present in the lookaside cache.
> 
> It is also possible to refill the cache, and then return a
> NULL page.
> 
> Restructure the logic so eliminate both cases.
> 
> Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>

Applied.
diff mbox series

Patch

diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 68510eb869ea..de09a74a39a4 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -82,12 +82,9 @@  EXPORT_SYMBOL(page_pool_create);
 static struct page *__page_pool_get_cached(struct page_pool *pool)
 {
 	struct ptr_ring *r = &pool->ring;
+	bool refill = false;
 	struct page *page;
 
-	/* Quicker fallback, avoid locks when ring is empty */
-	if (__ptr_ring_empty(r))
-		return NULL;
-
 	/* Test for safe-context, caller should provide this guarantee */
 	if (likely(in_serving_softirq())) {
 		if (likely(pool->alloc.count)) {
@@ -95,27 +92,23 @@  static struct page *__page_pool_get_cached(struct page_pool *pool)
 			page = pool->alloc.cache[--pool->alloc.count];
 			return page;
 		}
-		/* Slower-path: Alloc array empty, time to refill
-		 *
-		 * Open-coded bulk ptr_ring consumer.
-		 *
-		 * Discussion: the ring consumer lock is not really
-		 * needed due to the softirq/NAPI protection, but
-		 * later need the ability to reclaim pages on the
-		 * ring. Thus, keeping the locks.
-		 */
-		spin_lock(&r->consumer_lock);
-		while ((page = __ptr_ring_consume(r))) {
-			if (pool->alloc.count == PP_ALLOC_CACHE_REFILL)
-				break;
-			pool->alloc.cache[pool->alloc.count++] = page;
-		}
-		spin_unlock(&r->consumer_lock);
-		return page;
+		refill = true;
 	}
 
-	/* Slow-path: Get page from locked ring queue */
-	page = ptr_ring_consume(&pool->ring);
+	/* Quicker fallback, avoid locks when ring is empty */
+	if (__ptr_ring_empty(r))
+		return NULL;
+
+	/* Slow-path: Get page from locked ring queue,
+	 * refill alloc array if requested.
+	 */
+	spin_lock(&r->consumer_lock);
+	page = __ptr_ring_consume(r);
+	if (refill)
+		pool->alloc.count = __ptr_ring_consume_batched(r,
+							pool->alloc.cache,
+							PP_ALLOC_CACHE_REFILL);
+	spin_unlock(&r->consumer_lock);
 	return page;
 }