diff mbox series

[bpf-next,V3,11/14] virtio_net: setup xdp_rxq_info

Message ID 151471811006.30703.5247549257959215999.stgit@firesoul
State Superseded, archived
Delegated to: BPF Maintainers
Headers show
Series xdp: new XDP rx-queue info concept | expand

Commit Message

Jesper Dangaard Brouer Dec. 31, 2017, 11:01 a.m. UTC
The virtio_net driver doesn't dynamically change the RX-ring queue
layout and backing pages, but instead reject XDP setup if all the
conditions for XDP is not meet.  Thus, the xdp_rxq_info also remains
fairly static.  This allow us to simply add the reg/unreg to
net_device open/close functions.

Driver hook points for xdp_rxq_info:
 * reg  : virtnet_open
 * unreg: virtnet_close

V3:
 - bugfix, also setup xdp.rxq in receive_mergeable()
 - Tested bpf-sample prog inside guest on a virtio_net device

Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: virtualization@lists.linux-foundation.org
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
 drivers/net/virtio_net.c |   14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

Comments

Jason Wang Jan. 2, 2018, 3:38 a.m. UTC | #1
On 2017年12月31日 19:01, Jesper Dangaard Brouer wrote:
> The virtio_net driver doesn't dynamically change the RX-ring queue
> layout and backing pages, but instead reject XDP setup if all the
> conditions for XDP is not meet.  Thus, the xdp_rxq_info also remains
> fairly static.  This allow us to simply add the reg/unreg to
> net_device open/close functions.
>
> Driver hook points for xdp_rxq_info:
>   * reg  : virtnet_open
>   * unreg: virtnet_close
>
> V3:
>   - bugfix, also setup xdp.rxq in receive_mergeable()
>   - Tested bpf-sample prog inside guest on a virtio_net device
>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Cc: virtualization@lists.linux-foundation.org
> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
> ---
>   drivers/net/virtio_net.c |   14 +++++++++++++-
>   1 file changed, 13 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 6fb7b658a6cc..ed8299343728 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -31,6 +31,7 @@
>   #include <linux/average.h>
>   #include <linux/filter.h>
>   #include <net/route.h>
> +#include <net/xdp.h>
>   
>   static int napi_weight = NAPI_POLL_WEIGHT;
>   module_param(napi_weight, int, 0444);
> @@ -115,6 +116,8 @@ struct receive_queue {
>   
>   	/* Name of this receive queue: input.$index */
>   	char name[40];
> +
> +	struct xdp_rxq_info xdp_rxq;
>   };
>   
>   struct virtnet_info {
> @@ -559,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
>   		xdp.data = xdp.data_hard_start + xdp_headroom;
>   		xdp_set_data_meta_invalid(&xdp);
>   		xdp.data_end = xdp.data + len;
> +		xdp.rxq = &rq->xdp_rxq;
>   		orig_data = xdp.data;
>   		act = bpf_prog_run_xdp(xdp_prog, &xdp);
>   
> @@ -692,6 +696,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>   		xdp.data = data + vi->hdr_len;
>   		xdp_set_data_meta_invalid(&xdp);
>   		xdp.data_end = xdp.data + (len - vi->hdr_len);
> +		xdp.rxq = &rq->xdp_rxq;
> +
>   		act = bpf_prog_run_xdp(xdp_prog, &xdp);
>   
>   		if (act != XDP_PASS)
> @@ -1225,13 +1231,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
>   static int virtnet_open(struct net_device *dev)
>   {
>   	struct virtnet_info *vi = netdev_priv(dev);
> -	int i;
> +	int i, err;
>   
>   	for (i = 0; i < vi->max_queue_pairs; i++) {
>   		if (i < vi->curr_queue_pairs)
>   			/* Make sure we have some buffers: if oom use wq. */
>   			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
>   				schedule_delayed_work(&vi->refill, 0);
> +
> +		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
> +		if (err < 0)
> +			return err;
> +
>   		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
>   		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
>   	}
> @@ -1560,6 +1571,7 @@ static int virtnet_close(struct net_device *dev)
>   	cancel_delayed_work_sync(&vi->refill);
>   
>   	for (i = 0; i < vi->max_queue_pairs; i++) {
> +		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
>   		napi_disable(&vi->rq[i].napi);
>   		virtnet_napi_tx_disable(&vi->sq[i].napi);
>   	}
>

Reviewed-by: Jason Wang <jasowang@redhat.com>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6fb7b658a6cc..ed8299343728 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,6 +31,7 @@ 
 #include <linux/average.h>
 #include <linux/filter.h>
 #include <net/route.h>
+#include <net/xdp.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -115,6 +116,8 @@  struct receive_queue {
 
 	/* Name of this receive queue: input.$index */
 	char name[40];
+
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct virtnet_info {
@@ -559,6 +562,7 @@  static struct sk_buff *receive_small(struct net_device *dev,
 		xdp.data = xdp.data_hard_start + xdp_headroom;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + len;
+		xdp.rxq = &rq->xdp_rxq;
 		orig_data = xdp.data;
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
@@ -692,6 +696,8 @@  static struct sk_buff *receive_mergeable(struct net_device *dev,
 		xdp.data = data + vi->hdr_len;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + (len - vi->hdr_len);
+		xdp.rxq = &rq->xdp_rxq;
+
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
 		if (act != XDP_PASS)
@@ -1225,13 +1231,18 @@  static int virtnet_poll(struct napi_struct *napi, int budget)
 static int virtnet_open(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	int i;
+	int i, err;
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		if (i < vi->curr_queue_pairs)
 			/* Make sure we have some buffers: if oom use wq. */
 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
 				schedule_delayed_work(&vi->refill, 0);
+
+		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+		if (err < 0)
+			return err;
+
 		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
 		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
 	}
@@ -1560,6 +1571,7 @@  static int virtnet_close(struct net_device *dev)
 	cancel_delayed_work_sync(&vi->refill);
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
+		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
 		napi_disable(&vi->rq[i].napi);
 		virtnet_napi_tx_disable(&vi->sq[i].napi);
 	}