From patchwork Fri Jan 13 02:51:23 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: John Fastabend X-Patchwork-Id: 714744 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3v06bM0thpz9t1F for ; Fri, 13 Jan 2017 13:51:43 +1100 (AEDT) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=gmail.com header.i=@gmail.com header.b="lWsJGt1S"; dkim-atps=neutral Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751090AbdAMCvl (ORCPT ); Thu, 12 Jan 2017 21:51:41 -0500 Received: from mail-pf0-f196.google.com ([209.85.192.196]:34159 "EHLO mail-pf0-f196.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750877AbdAMCvk (ORCPT ); Thu, 12 Jan 2017 21:51:40 -0500 Received: by mail-pf0-f196.google.com with SMTP id y143so6067704pfb.1 for ; Thu, 12 Jan 2017 18:51:40 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=from:subject:to:cc:date:message-id:in-reply-to:references :user-agent:mime-version:content-transfer-encoding; bh=54yHbuxaEZruAi02UtBSlIFq3XH11S63A8eRFtqO5N0=; b=lWsJGt1SNKuJ1Y6rzkyIz82xPk+4QlVGkf5VxFANbsgtpBOEMrzsjxIV3DpgCSupiS lGOeapgoO2jznACaFe38aa4TaQbvGLk8nq0WkMthmNPExSBcuEUpfqy7WoccqI8qWQYG x0leVeAK2MR5rGxXzzrUu2At2m5Qch61cPF4MIeghxIkztXGV5Y+DjBxVTzqfxeXDfNI xXOmyN1f9VFqlYio2RWYPz3e8jx4yY+X4as9F0QRI73xzwo4ug2zIyR9P0d2F7dSEmoR PRECJhQBEwGBtFEUj7Fy8/aNzB1ZmEOYqUDZTQ/q5+hdHDXwX/BOaPtm30CDhyeAiLoB c6CA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:subject:to:cc:date:message-id:in-reply-to :references:user-agent:mime-version:content-transfer-encoding; bh=54yHbuxaEZruAi02UtBSlIFq3XH11S63A8eRFtqO5N0=; b=cXqJn3ueZQzK1fxjT1MpsK0mHoj+sNfnqYtPG9kGUN9ByThueNI61P5FuI9ezrp70N idFuXupPkKKlL7533C5YmYZ2V+fuuJ/6zWci/ynjf+smgR3tcTIbyM/+2wmn1Axa5YGW m80s1CD2X1TYxYZtnEGNQQ9yia4nYXAcwuAwcXyxrbpC8GGMXhzYGnGevP34m4IqEC5Y quhhGi6tluBvaxe4kiowkbiVgLwfDVUfb2yQv4lEiXS+02lNQvCeqGWChESsvaAmi+3E UHQ6BXgFhY8k7JO/u41pJbobn0/72Vpn4UxUnYXl7uk5Idil/rnCjHY82RQbC26v15WF oEeg== X-Gm-Message-State: AIkVDXIO4132c5aSs3FMe5L9kgb2tsfKnfz6WRGlWpTxVnQMeRycHv6ETgTsau1kqdOOZw== X-Received: by 10.99.149.1 with SMTP id p1mr21703188pgd.21.1484275899611; Thu, 12 Jan 2017 18:51:39 -0800 (PST) Received: from [127.0.1.1] ([72.168.145.15]) by smtp.gmail.com with ESMTPSA id w25sm24717347pge.9.2017.01.12.18.51.30 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Thu, 12 Jan 2017 18:51:38 -0800 (PST) From: John Fastabend X-Google-Original-From: John Fastabend Subject: [net PATCH v3 3/5] virtio_net: factor out xdp handler for readability To: jasowang@redhat.com, mst@redhat.com Cc: john.r.fastabend@intel.com, netdev@vger.kernel.org, john.fastabend@gmail.com, alexei.starovoitov@gmail.com, daniel@iogearbox.net Date: Thu, 12 Jan 2017 18:51:23 -0800 Message-ID: <20170113025123.4535.75989.stgit@john-Precision-Tower-5810> In-Reply-To: <20170113024908.4535.8835.stgit@john-Precision-Tower-5810> References: <20170113024908.4535.8835.stgit@john-Precision-Tower-5810> User-Agent: StGit/0.17.1-dirty MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org At this point the do_xdp_prog is mostly if/else branches handling the different modes of virtio_net. So remove it and handle running the program in the per mode handlers. Signed-off-by: John Fastabend --- drivers/net/virtio_net.c | 76 +++++++++++++++++----------------------------- 1 file changed, 28 insertions(+), 48 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 43cb2e0..ec54644 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -388,49 +388,6 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi, virtqueue_kick(sq->vq); } -static u32 do_xdp_prog(struct virtnet_info *vi, - struct receive_queue *rq, - struct bpf_prog *xdp_prog, - void *data, int len) -{ - int hdr_padded_len; - struct xdp_buff xdp; - void *buf; - unsigned int qp; - u32 act; - - if (vi->mergeable_rx_bufs) { - hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); - xdp.data = data + hdr_padded_len; - xdp.data_end = xdp.data + (len - vi->hdr_len); - buf = data; - } else { /* small buffers */ - struct sk_buff *skb = data; - - xdp.data = skb->data; - xdp.data_end = xdp.data + len; - buf = skb->data; - } - - act = bpf_prog_run_xdp(xdp_prog, &xdp); - switch (act) { - case XDP_PASS: - return XDP_PASS; - case XDP_TX: - qp = vi->curr_queue_pairs - - vi->xdp_queue_pairs + - smp_processor_id(); - xdp.data = buf; - virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); - return XDP_TX; - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - case XDP_DROP: - return XDP_DROP; - } -} - static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -446,19 +403,30 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + struct xdp_buff xdp; + unsigned int qp; u32 act; if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) goto err_xdp; - act = do_xdp_prog(vi, rq, xdp_prog, skb, len); + + xdp.data = skb->data; + xdp.data_end = xdp.data + len; + act = bpf_prog_run_xdp(xdp_prog, &xdp); switch (act) { case XDP_PASS: break; case XDP_TX: + qp = vi->curr_queue_pairs - + vi->xdp_queue_pairs + + smp_processor_id(); + virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, skb); rcu_read_unlock(); goto xdp_xmit; - case XDP_DROP: default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + case XDP_DROP: goto err_xdp; } } @@ -575,7 +543,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { + int desc_room = sizeof(struct virtio_net_hdr_mrg_rxbuf); struct page *xdp_page; + struct xdp_buff xdp; + unsigned int qp; + void *data; u32 act; /* This happens when rx buffer size is underestimated */ @@ -598,8 +570,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(hdr->hdr.gso_type)) goto err_xdp; - act = do_xdp_prog(vi, rq, xdp_prog, - page_address(xdp_page) + offset, len); + data = page_address(xdp_page) + offset; + xdp.data = data + desc_room; + xdp.data_end = xdp.data + (len - vi->hdr_len); + act = bpf_prog_run_xdp(xdp_prog, &xdp); switch (act) { case XDP_PASS: /* We can only create skb based on xdp_page. */ @@ -613,13 +587,19 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: + qp = vi->curr_queue_pairs - + vi->xdp_queue_pairs + + smp_processor_id(); + virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) goto err_xdp; rcu_read_unlock(); goto xdp_xmit; - case XDP_DROP: default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);