From patchwork Wed Oct 28 01:52:07 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Kirsher, Jeffrey T" X-Patchwork-Id: 37037 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by ozlabs.org (Postfix) with ESMTP id 37C39B7BE9 for ; Wed, 28 Oct 2009 12:56:01 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754986AbZJ1Bzv (ORCPT ); Tue, 27 Oct 2009 21:55:51 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1754857AbZJ1Bzv (ORCPT ); Tue, 27 Oct 2009 21:55:51 -0400 Received: from qmta01.westchester.pa.mail.comcast.net ([76.96.62.16]:43823 "EHLO QMTA01.westchester.pa.mail.comcast.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754349AbZJ1Bzu (ORCPT ); Tue, 27 Oct 2009 21:55:50 -0400 Received: from OMTA10.westchester.pa.mail.comcast.net ([76.96.62.28]) by QMTA01.westchester.pa.mail.comcast.net with comcast id xv3r1c04n0cZkys511vvLV; Wed, 28 Oct 2009 01:55:55 +0000 Received: from localhost.localdomain ([63.64.152.142]) by OMTA10.westchester.pa.mail.comcast.net with comcast id y1vd1c00N34bfcX3W1vg7c; Wed, 28 Oct 2009 01:55:53 +0000 From: Jeff Kirsher Subject: [net-next-2.6 PATCH 09/20] igb: move rx_buffer_len into the ring structure To: davem@davemloft.net Cc: netdev@vger.kernel.org, gospo@redhat.com, Alexander Duyck , Jeff Kirsher Date: Tue, 27 Oct 2009 18:52:07 -0700 Message-ID: <20091028015206.12470.50840.stgit@localhost.localdomain> In-Reply-To: <20091028014858.12470.99520.stgit@localhost.localdomain> References: <20091028014858.12470.99520.stgit@localhost.localdomain> User-Agent: StGIT/0.14.3 MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Alexander Duyck This patch moves the rx_buffer_len value into the ring structure. This allows greater flexibility and the option of doing things such as supporting packet split only on some queues, or enabling virtualization. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher --- drivers/net/igb/igb.h | 3 +-- drivers/net/igb/igb_main.c | 41 ++++++++++++++++++++++------------------- 2 files changed, 23 insertions(+), 21 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index de26862..00ff274 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -198,7 +198,7 @@ struct igb_ring { /* RX */ struct { struct igb_rx_queue_stats rx_stats; - u64 rx_queue_drops; + u32 rx_buffer_len; }; }; }; @@ -218,7 +218,6 @@ struct igb_adapter { struct vlan_group *vlgrp; u16 mng_vlan_id; u32 bd_number; - u32 rx_buffer_len; u32 wol; u32 en_mng_pt; u16 link_speed; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ff16b7a..04e860d 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -443,6 +443,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ring->count = adapter->rx_ring_count; ring->queue_index = i; ring->pdev = adapter->pdev; + ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; } igb_cache_ring_register(adapter); @@ -1863,7 +1864,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) adapter->tx_ring_count = IGB_DEFAULT_TXD; adapter->rx_ring_count = IGB_DEFAULT_RXD; - adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; @@ -2358,8 +2358,8 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter, writel(0, ring->tail); /* set descriptor configuration */ - if (adapter->rx_buffer_len < IGB_RXBUFFER_1024) { - srrctl = ALIGN(adapter->rx_buffer_len, 64) << + if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { + srrctl = ALIGN(ring->rx_buffer_len, 64) << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 srrctl |= IGB_RXBUFFER_16384 >> @@ -2370,7 +2370,7 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter, #endif srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } else { - srrctl = ALIGN(adapter->rx_buffer_len, 1024) >> + srrctl = ALIGN(ring->rx_buffer_len, 1024) >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; } @@ -2619,7 +2619,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) **/ static void igb_clean_rx_ring(struct igb_ring *rx_ring) { - struct igb_adapter *adapter = rx_ring->q_vector->adapter; struct igb_buffer *buffer_info; unsigned long size; unsigned int i; @@ -2632,7 +2631,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) if (buffer_info->dma) { pci_unmap_single(rx_ring->pdev, buffer_info->dma, - adapter->rx_buffer_len, + rx_ring->rx_buffer_len, PCI_DMA_FROMDEVICE); buffer_info->dma = 0; } @@ -3746,6 +3745,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) { struct igb_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + u32 rx_buffer_len, i; if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { @@ -3763,9 +3763,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) /* igb_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; - if (netif_running(netdev)) - igb_down(adapter); - /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. @@ -3773,16 +3770,22 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) */ if (max_frame <= IGB_RXBUFFER_1024) - adapter->rx_buffer_len = IGB_RXBUFFER_1024; + rx_buffer_len = IGB_RXBUFFER_1024; else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) - adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; else - adapter->rx_buffer_len = IGB_RXBUFFER_128; + rx_buffer_len = IGB_RXBUFFER_128; + + if (netif_running(netdev)) + igb_down(adapter); dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].rx_buffer_len = rx_buffer_len; + if (netif_running(netdev)) igb_up(adapter); else @@ -4828,7 +4831,7 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); } -static inline u16 igb_get_hlen(struct igb_adapter *adapter, +static inline u16 igb_get_hlen(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc) { /* HW will not DMA in data larger than the given buffer, even if it @@ -4837,8 +4840,8 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter, */ u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > adapter->rx_buffer_len) - hlen = adapter->rx_buffer_len; + if (hlen > rx_ring->rx_buffer_len) + hlen = rx_ring->rx_buffer_len; return hlen; } @@ -4888,14 +4891,14 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, if (buffer_info->dma) { pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_buffer_len, + rx_ring->rx_buffer_len, PCI_DMA_FROMDEVICE); buffer_info->dma = 0; - if (adapter->rx_buffer_len >= IGB_RXBUFFER_1024) { + if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { skb_put(skb, length); goto send_up; } - skb_put(skb, igb_get_hlen(adapter, rx_desc)); + skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); } if (length) { @@ -5034,7 +5037,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; - bufsz = adapter->rx_buffer_len; + bufsz = rx_ring->rx_buffer_len; while (cleaned_count--) { rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);