diff mbox series

[bpf] xdp: Handle MEM_TYPE_XSK_BUFF_POOL correctly in xdp_return_buff()

Message ID 20201127171726.123627-1-bjorn.topel@gmail.com
State Superseded
Headers show
Series [bpf] xdp: Handle MEM_TYPE_XSK_BUFF_POOL correctly in xdp_return_buff() | expand

Commit Message

Björn Töpel Nov. 27, 2020, 5:17 p.m. UTC
From: Björn Töpel <bjorn.topel@intel.com>

It turns out that it does exist a path where xdp_return_buff() is
being passed an XDP buffer of type MEM_TYPE_XSK_BUFF_POOL. This path
is when AF_XDP zero-copy mode is enabled, and a buffer is redirected
to a DEVMAP with an attached XDP program that drops the buffer.

This change simply puts the handling of MEM_TYPE_XSK_BUFF_POOL back
into xdp_return_buff().

Reported-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Fixes: 82c41671ca4f ("xdp: Simplify xdp_return_{frame, frame_rx_napi, buff}")
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
---
 net/core/xdp.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)


base-commit: 9a44bc9449cfe7e39dbadf537ff669fb007a9e63

Comments

Maxim Mikityanskiy Nov. 30, 2020, 8:54 a.m. UTC | #1
On 2020-11-27 19:17, Björn Töpel wrote:
> From: Björn Töpel <bjorn.topel@intel.com>
> 
> It turns out that it does exist a path where xdp_return_buff() is
> being passed an XDP buffer of type MEM_TYPE_XSK_BUFF_POOL. This path
> is when AF_XDP zero-copy mode is enabled, and a buffer is redirected
> to a DEVMAP with an attached XDP program that drops the buffer.
> 
> This change simply puts the handling of MEM_TYPE_XSK_BUFF_POOL back
> into xdp_return_buff().
> 
> Reported-by: Maxim Mikityanskiy <maximmi@nvidia.com>
> Fixes: 82c41671ca4f ("xdp: Simplify xdp_return_{frame, frame_rx_napi, buff}")
> Signed-off-by: Björn Töpel <bjorn.topel@intel.com>

Thanks for addressing this!

Acked-by: Maxim Mikityanskiy <maximmi@nvidia.com>

> ---
>   net/core/xdp.c | 17 ++++++++++-------
>   1 file changed, 10 insertions(+), 7 deletions(-)
> 
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index 48aba933a5a8..491ad569a79c 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -335,11 +335,10 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
>    * scenarios (e.g. queue full), it is possible to return the xdp_frame
>    * while still leveraging this protection.  The @napi_direct boolean
>    * is used for those calls sites.  Thus, allowing for faster recycling
> - * of xdp_frames/pages in those cases. This path is never used by the
> - * MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
> - * the switch-statement.
> + * of xdp_frames/pages in those cases.
>    */
> -static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
> +static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
> +			 struct xdp_buff *xdp)
>   {
>   	struct xdp_mem_allocator *xa;
>   	struct page *page;
> @@ -361,6 +360,10 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
>   		page = virt_to_page(data); /* Assumes order0 page*/
>   		put_page(page);
>   		break;
> +	case MEM_TYPE_XSK_BUFF_POOL:
> +		/* NB! Only valid from an xdp_buff! */
> +		xsk_buff_free(xdp);
> +		break;
>   	default:
>   		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
>   		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
> @@ -370,19 +373,19 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
>   
>   void xdp_return_frame(struct xdp_frame *xdpf)
>   {
> -	__xdp_return(xdpf->data, &xdpf->mem, false);
> +	__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
>   }
>   EXPORT_SYMBOL_GPL(xdp_return_frame);
>   
>   void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
>   {
> -	__xdp_return(xdpf->data, &xdpf->mem, true);
> +	__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
>   }
>   EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
>   
>   void xdp_return_buff(struct xdp_buff *xdp)
>   {
> -	__xdp_return(xdp->data, &xdp->rxq->mem, true);
> +	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
>   }
>   
>   /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
> 
> base-commit: 9a44bc9449cfe7e39dbadf537ff669fb007a9e63
>
patchwork-bot+netdevbpf@kernel.org Nov. 30, 2020, 10:10 p.m. UTC | #2
Hello:

This patch was applied to bpf/bpf.git (refs/heads/master):

On Fri, 27 Nov 2020 18:17:26 +0100 you wrote:
> From: Björn Töpel <bjorn.topel@intel.com>
> 
> It turns out that it does exist a path where xdp_return_buff() is
> being passed an XDP buffer of type MEM_TYPE_XSK_BUFF_POOL. This path
> is when AF_XDP zero-copy mode is enabled, and a buffer is redirected
> to a DEVMAP with an attached XDP program that drops the buffer.
> 
> [...]

Here is the summary with links:
  - [bpf] xdp: Handle MEM_TYPE_XSK_BUFF_POOL correctly in xdp_return_buff()
    https://git.kernel.org/bpf/bpf/c/ed1182dc004d

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
diff mbox series

Patch

diff --git a/net/core/xdp.c b/net/core/xdp.c
index 48aba933a5a8..491ad569a79c 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -335,11 +335,10 @@  EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
  * scenarios (e.g. queue full), it is possible to return the xdp_frame
  * while still leveraging this protection.  The @napi_direct boolean
  * is used for those calls sites.  Thus, allowing for faster recycling
- * of xdp_frames/pages in those cases. This path is never used by the
- * MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
- * the switch-statement.
+ * of xdp_frames/pages in those cases.
  */
-static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
+static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+			 struct xdp_buff *xdp)
 {
 	struct xdp_mem_allocator *xa;
 	struct page *page;
@@ -361,6 +360,10 @@  static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
 		page = virt_to_page(data); /* Assumes order0 page*/
 		put_page(page);
 		break;
+	case MEM_TYPE_XSK_BUFF_POOL:
+		/* NB! Only valid from an xdp_buff! */
+		xsk_buff_free(xdp);
+		break;
 	default:
 		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
 		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
@@ -370,19 +373,19 @@  static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
 
 void xdp_return_frame(struct xdp_frame *xdpf)
 {
-	__xdp_return(xdpf->data, &xdpf->mem, false);
+	__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
 }
 EXPORT_SYMBOL_GPL(xdp_return_frame);
 
 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
 {
-	__xdp_return(xdpf->data, &xdpf->mem, true);
+	__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
 }
 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
 
 void xdp_return_buff(struct xdp_buff *xdp)
 {
-	__xdp_return(xdp->data, &xdp->rxq->mem, true);
+	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
 }
 
 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */