From patchwork Sat Feb 4 18:20:33 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Eric Dumazet X-Patchwork-Id: 724123 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3vG29x615Gz9s0m for ; Sun, 5 Feb 2017 05:21:25 +1100 (AEDT) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=google.com header.i=@google.com header.b="lzpYb3ss"; dkim-atps=neutral Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751726AbdBDSVX (ORCPT ); Sat, 4 Feb 2017 13:21:23 -0500 Received: from mail-pg0-f41.google.com ([74.125.83.41]:32885 "EHLO mail-pg0-f41.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751716AbdBDSVW (ORCPT ); Sat, 4 Feb 2017 13:21:22 -0500 Received: by mail-pg0-f41.google.com with SMTP id 204so15933240pge.0 for ; Sat, 04 Feb 2017 10:21:22 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20161025; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=OJCK6bVSCb9MTKsb+irfuvIvDNRxiYL/Mpt5tSLcYQY=; b=lzpYb3ss2yojqlMNBF9daI7DbJZKZESkXjgVR7WZ61NsB36cu+Ge8bKI64Fslcm7Ue ApshLwU4BfGnnI1BDi1BugEhfd9JHXAzcWJiQZYdfBsbKpVLKevQzkoFd8vZhEZVFYD5 fYplBCwZOcdG7grJ31e9mX0aeHkKdxfrP84GkyxTxqjZXlPkoAA+MEEZlZGTNFrBMs58 QlvAnVpyKfQMDcm80FEOsPwxy8TCOBJyc4woKhJM6318+Glmrb2R+Y4qhja5eU1rLYcF YTj/pNsDDLpHiSQ39pNUY5fPdq36o15A7S6Fd+BcbtRDRxTzFfwUcqzD8XQ5DCGSq+Cm 6uQQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=OJCK6bVSCb9MTKsb+irfuvIvDNRxiYL/Mpt5tSLcYQY=; b=GqkakDA5tHU+nAsjIu6uAoRXrniKQnNIKiWZv4l3jbMrvTSvij2aYKSnQAXx0x5dj9 te28DZZkBWJtcAmTwnR2iQqZbVkloRPdlFf9cZKlztQRBw28J+/JEfCaMKjpTqcpPbQ+ b+tTCgNJzJsP+ttG1c6jBzo6AiZxfSrQ5bALCC6hTvjpAyhPecA07Q2HtyqD16LXqbTq GnkjRSr38w3aQFhz3kFRFOrMstQizYDESPPrebaSRbux/EpkJiQ8TWPeR+S70mdZ3k7a UPiQdaCC1p2xpyJcgBcGkj82Hc4VCWy4rCh3iU94IDEeZAXhpbLaU0Gt3w41HOXtI4hk JT3g== X-Gm-Message-State: AIkVDXKl3P/Viroo3ZEs1+MsCHBaDtKs+5VS34zajqmWPUsdhg4IRVHzmiZWD7hJZj/vBIbY X-Received: by 10.84.248.10 with SMTP id p10mr5041751pll.87.1486232481333; Sat, 04 Feb 2017 10:21:21 -0800 (PST) Received: from localhost ([2620:0:1000:3012:d537:7fd4:76bd:9074]) by smtp.gmail.com with ESMTPSA id r21sm76685434pfd.95.2017.02.04.10.21.20 (version=TLS1_2 cipher=AES128-SHA bits=128/128); Sat, 04 Feb 2017 10:21:20 -0800 (PST) From: Eric Dumazet To: "David S . Miller" Cc: netdev , Eric Dumazet , Eric Dumazet Subject: [PATCH net-next 4/9] amd8111e: add GRO support Date: Sat, 4 Feb 2017 10:20:33 -0800 Message-Id: <20170204182038.3752-5-edumazet@google.com> X-Mailer: git-send-email 2.11.0.483.g087da7b7c-goog In-Reply-To: <20170204182038.3752-1-edumazet@google.com> References: <20170204182038.3752-1-edumazet@google.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Use napi_complete_done() instead of __napi_complete() to : 1) Get support of gro_flush_timeout if opt-in 2) Not rearm interrupts for busy-polling users. 3) use standard NAPI API. 4) get rid of baroque code and ease maintenance. Signed-off-by: Eric Dumazet --- drivers/net/ethernet/amd/amd8111e.c | 165 ++++++++++++++++-------------------- 1 file changed, 73 insertions(+), 92 deletions(-) diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9595f1bc535b73306720e4d0005fe58d2025..27b18af29863c38c5308c58701bd46c305b2 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -695,125 +695,106 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) void __iomem *mmio = lp->mmio; struct sk_buff *skb,*new_skb; int min_pkt_len, status; - unsigned int intr0; int num_rx_pkt = 0; short pkt_len; #if AMD8111E_VLAN_TAG_USED short vtag; #endif - int rx_pkt_limit = budget; - unsigned long flags; - if (rx_pkt_limit <= 0) - goto rx_not_empty; + while (num_rx_pkt < budget) { + status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); + if (status & OWN_BIT) + break; - do{ - /* process receive packets until we use the quota. - * If we own the next entry, it's a new packet. Send it up. + /* There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with + * full-sized * buffers it's possible for a + * jabber packet to use two buffers, with only + * the last correctly noting the error. */ - while(1) { - status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); - if (status & OWN_BIT) - break; - - /* There is a tricky error noted by John Murphy, - * to Russ Nelson: Even with - * full-sized * buffers it's possible for a - * jabber packet to use two buffers, with only - * the last correctly noting the error. - */ - if(status & ERR_BIT) { - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - /* check for STP and ENP */ - if(!((status & STP_BIT) && (status & ENP_BIT))){ - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; + if (status & ERR_BIT) { + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + /* check for STP and ENP */ + if (!((status & STP_BIT) && (status & ENP_BIT))){ + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; #if AMD8111E_VLAN_TAG_USED - vtag = status & TT_MASK; - /*MAC will strip vlan tag*/ - if (vtag != 0) - min_pkt_len =MIN_PKT_LEN - 4; + vtag = status & TT_MASK; + /* MAC will strip vlan tag */ + if (vtag != 0) + min_pkt_len = MIN_PKT_LEN - 4; else #endif - min_pkt_len =MIN_PKT_LEN; + min_pkt_len = MIN_PKT_LEN; - if (pkt_len < min_pkt_len) { - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } - if(--rx_pkt_limit < 0) - goto rx_not_empty; - new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); - if (!new_skb) { - /* if allocation fail, - * ignore that pkt and go to next one - */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } + if (pkt_len < min_pkt_len) { + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); + if (!new_skb) { + /* if allocation fail, + * ignore that pkt and go to next one + */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } - skb_reserve(new_skb, 2); - skb = lp->rx_skbuff[rx_index]; - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], - lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[rx_index] = new_skb; - lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, - new_skb->data, - lp->rx_buff_len-2, - PCI_DMA_FROMDEVICE); + skb_reserve(new_skb, 2); + skb = lp->rx_skbuff[rx_index]; + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], + lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[rx_index] = new_skb; + lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, + new_skb->data, + lp->rx_buff_len-2, + PCI_DMA_FROMDEVICE); - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, dev); #if AMD8111E_VLAN_TAG_USED - if (vtag == TT_VLAN_TAGGED){ - u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - } -#endif - netif_receive_skb(skb); - /*COAL update rx coalescing parameters*/ - lp->coal_conf.rx_packets++; - lp->coal_conf.rx_bytes += pkt_len; - num_rx_pkt++; - - err_next_pkt: - lp->rx_ring[rx_index].buff_phy_addr - = cpu_to_le32(lp->rx_dma_addr[rx_index]); - lp->rx_ring[rx_index].buff_count = - cpu_to_le16(lp->rx_buff_len-2); - wmb(); - lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); - rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + if (vtag == TT_VLAN_TAGGED){ + u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - /* Check the interrupt status register for more packets in the - * mean time. Process them since we have not used up our quota. - */ - intr0 = readl(mmio + INT0); - /*Ack receive packets */ - writel(intr0 & RINT0,mmio + INT0); +#endif + napi_gro_receive(napi, skb); + /* COAL update rx coalescing parameters */ + lp->coal_conf.rx_packets++; + lp->coal_conf.rx_bytes += pkt_len; + num_rx_pkt++; + +err_next_pkt: + lp->rx_ring[rx_index].buff_phy_addr + = cpu_to_le32(lp->rx_dma_addr[rx_index]); + lp->rx_ring[rx_index].buff_count = + cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); + rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + } - } while(intr0 & RINT0); + if ((num_rx_pkt < budget) && + napi_complete_done(napi, num_rx_pkt)) { + unsigned long flags; - if (rx_pkt_limit > 0) { /* Receive descriptor is empty now */ spin_lock_irqsave(&lp->lock, flags); - __napi_complete(napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); } -rx_not_empty: return num_rx_pkt; }