Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/982291/?format=api
{ "id": 982291, "url": "http://patchwork.ozlabs.org/api/patches/982291/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20181011071722.1799-1-sasha.neftin@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20181011071722.1799-1-sasha.neftin@intel.com>", "list_archive_url": null, "date": "2018-10-11T07:17:22", "name": "[v8,06/11] igc: Add transmit and receive fastpath and interrupt handlers", "commit_ref": null, "pull_url": null, "state": "accepted", "archived": false, "hash": "e706c93f9557a46b3a022beb327faa6b54bf7701", "submitter": { "id": 69860, "url": "http://patchwork.ozlabs.org/api/people/69860/?format=api", "name": "Sasha Neftin", "email": "sasha.neftin@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20181011071722.1799-1-sasha.neftin@intel.com/mbox/", "series": [ { "id": 70170, "url": "http://patchwork.ozlabs.org/api/series/70170/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=70170", "date": "2018-10-11T07:17:13", "name": "[v8,01/11] igc: Add skeletal frame for Intel(R) 2.5G Ethernet Controller support.", "version": 8, "mbox": "http://patchwork.ozlabs.org/series/70170/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/982291/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/982291/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 42W2Ng0k0Dz9sj2\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 11 Oct 2018 18:17:39 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 6FF60880D2;\n\tThu, 11 Oct 2018 07:17:37 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id hL-tYyiLmIvK; Thu, 11 Oct 2018 07:17:31 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id B14B0880E2;\n\tThu, 11 Oct 2018 07:17:28 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 3BFCE1C1507\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 11 Oct 2018 07:17:28 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 2F243880C7\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 11 Oct 2018 07:17:28 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id A3JRCtJU3GWi for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 11 Oct 2018 07:17:25 +0000 (UTC)", "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id D7C67880C4\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 11 Oct 2018 07:17:25 +0000 (UTC)", "from orsmga004.jf.intel.com ([10.7.209.38])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t11 Oct 2018 00:17:25 -0700", "from ccdlinuxdev08.iil.intel.com ([143.185.161.150])\n\tby orsmga004.jf.intel.com with ESMTP; 11 Oct 2018 00:17:23 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.54,367,1534834800\"; d=\"scan'208\";a=\"240395348\"", "From": "Sasha Neftin <sasha.neftin@intel.com>", "To": "sasha.neftin@intel.com,\n\tintel-wired-lan@lists.osuosl.org", "Date": "Thu, 11 Oct 2018 10:17:22 +0300", "Message-Id": "<20181011071722.1799-1-sasha.neftin@intel.com>", "X-Mailer": "git-send-email 2.11.0", "Subject": "[Intel-wired-lan] [PATCH v8 06/11] igc: Add transmit and receive\n\tfastpath and interrupt handlers", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "MIME-Version": "1.0", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "This patch adds support for allocating, configuring, and freeing Tx/Rx ring\nresources. With these changes in place the descriptor queues are in a\nstate where they are ready to transmit or receive if provided buffers.\n\nThis also adds the transmit and receive fastpath and interrupt handlers.\nWith this code in place the network device is now able to send and receive\nframes over the network interface using a single queue.\n\nSasha Neftin (v2):\nremoved obsolete code\nrefactor transmit and receive methods\n\nSasha Neftin (v3):\nremoved unused mac type\ncode optimization, remome switch statement where it is not necessary\nresolve conflicts\n\nSasha Neftin (v4):\naddress comments\nfix comments\nremove obsolete Helbert's patch\nfix xmas tree layout\nremove obsolete IGC_SET_FLAG definition\nre-work PAGE_SIZE definition\nfix code indentation\nre-work igc_tx_olinfo_status method\nreplace e1000_ prefix with igc_ prefix\nremove unused descriptors definition\n\nSasha Neftin (v5):\ncode clean\nremove obsolete flash_address reference\nremove unused defines\n\nSasha Neftin (v6):\nremove duplication of define MAX_Q_VECTORS\nminor cosmetic changes\n\nSasha Neftin (v7):\nno changes\n\nSasha Neftin (v8):\nfix whitespaces in comments\nreplace space with tab\nremove unneeded forward declarations\n\nSigned-off-by: Sasha Neftin <sasha.neftin@intel.com>\n---\n drivers/net/ethernet/intel/igc/igc.h | 66 ++\n drivers/net/ethernet/intel/igc/igc_base.h | 15 +\n drivers/net/ethernet/intel/igc/igc_defines.h | 45 ++\n drivers/net/ethernet/intel/igc/igc_main.c | 1123 +++++++++++++++++++++++++-\n 4 files changed, 1205 insertions(+), 44 deletions(-)", "diff": "diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h\nindex 7bb19328b899..88ee451e36fd 100644\n--- a/drivers/net/ethernet/intel/igc/igc.h\n+++ b/drivers/net/ethernet/intel/igc/igc.h\n@@ -32,13 +32,31 @@ extern char igc_driver_version[];\n #define IGC_START_ITR\t\t\t648 /* ~6000 ints/sec */\n #define IGC_FLAG_HAS_MSI\t\tBIT(0)\n #define IGC_FLAG_QUEUE_PAIRS\t\tBIT(4)\n+#define IGC_FLAG_NEED_LINK_UPDATE\tBIT(9)\n #define IGC_FLAG_HAS_MSIX\t\tBIT(13)\n+#define IGC_FLAG_VLAN_PROMISC\t\tBIT(15)\n \n #define IGC_START_ITR\t\t\t648 /* ~6000 ints/sec */\n #define IGC_4K_ITR\t\t\t980\n #define IGC_20K_ITR\t\t\t196\n #define IGC_70K_ITR\t\t\t56\n \n+#define IGC_DEFAULT_ITR\t\t3 /* dynamic */\n+#define IGC_MAX_ITR_USECS\t10000\n+#define IGC_MIN_ITR_USECS\t10\n+#define NON_Q_VECTORS\t\t1\n+#define MAX_MSIX_ENTRIES\t10\n+\n+/* TX/RX descriptor defines */\n+#define IGC_DEFAULT_TXD\t\t256\n+#define IGC_DEFAULT_TX_WORK\t128\n+#define IGC_MIN_TXD\t\t80\n+#define IGC_MAX_TXD\t\t4096\n+\n+#define IGC_DEFAULT_RXD\t\t256\n+#define IGC_MIN_RXD\t\t80\n+#define IGC_MAX_RXD\t\t4096\n+\n /* Transmit and receive queues */\n #define IGC_MAX_RX_QUEUES\t\t4\n #define IGC_MAX_TX_QUEUES\t\t4\n@@ -85,6 +103,16 @@ extern char igc_driver_version[];\n #define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)\n #endif\n \n+/* How many Rx Buffers do we bundle into one write to the hardware ? */\n+#define IGC_RX_BUFFER_WRITE\t16 /* Must be power of 2 */\n+\n+/* igc_test_staterr - tests bits within Rx descriptor status and error fields */\n+static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,\n+\t\t\t\t const u32 stat_err_bits)\n+{\n+\treturn rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);\n+}\n+\n enum igc_state_t {\n \t__IGC_TESTING,\n \t__IGC_RESETTING,\n@@ -92,6 +120,27 @@ enum igc_state_t {\n \t__IGC_PTP_TX_IN_PROGRESS,\n };\n \n+enum igc_tx_flags {\n+\t/* cmd_type flags */\n+\tIGC_TX_FLAGS_VLAN\t= 0x01,\n+\tIGC_TX_FLAGS_TSO\t= 0x02,\n+\tIGC_TX_FLAGS_TSTAMP\t= 0x04,\n+\n+\t/* olinfo flags */\n+\tIGC_TX_FLAGS_IPV4\t= 0x10,\n+\tIGC_TX_FLAGS_CSUM\t= 0x20,\n+};\n+\n+/* The largest size we can write to the descriptor is 65535. In order to\n+ * maintain a power of two alignment we have to limit ourselves to 32K.\n+ */\n+#define IGC_MAX_TXD_PWR\t\t15\n+#define IGC_MAX_DATA_PER_TXD\tBIT(IGC_MAX_TXD_PWR)\n+\n+/* Tx Descriptors needed, worst case */\n+#define TXD_USE_COUNT(S)\tDIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)\n+#define DESC_NEEDED\t(MAX_SKB_FRAGS + 4)\n+\n /* wrapper around a pointer to a socket buffer,\n * so a DMA handle can be stored along with the buffer\n */\n@@ -123,6 +172,7 @@ struct igc_tx_queue_stats {\n \tu64 packets;\n \tu64 bytes;\n \tu64 restart_queue;\n+\tu64 restart_queue2;\n };\n \n struct igc_rx_queue_stats {\n@@ -181,11 +231,14 @@ struct igc_ring {\n \t\t/* TX */\n \t\tstruct {\n \t\t\tstruct igc_tx_queue_stats tx_stats;\n+\t\t\tstruct u64_stats_sync tx_syncp;\n+\t\t\tstruct u64_stats_sync tx_syncp2;\n \t\t};\n \t\t/* RX */\n \t\tstruct {\n \t\t\tstruct igc_rx_queue_stats rx_stats;\n \t\t\tstruct igc_rx_packet_stats pkt_stats;\n+\t\t\tstruct u64_stats_sync rx_syncp;\n \t\t\tstruct sk_buff *skb;\n \t\t};\n \t};\n@@ -258,11 +311,17 @@ struct igc_adapter {\n \tstruct work_struct watchdog_task;\n \tstruct work_struct dma_err_task;\n \n+\tu8 tx_timeout_factor;\n+\n \tint msg_enable;\n \tu32 max_frame_size;\n+\tu32 min_frame_size;\n \n \t/* OS defined structs */\n \tstruct pci_dev *pdev;\n+\t/* lock for statistics */\n+\tspinlock_t stats64_lock;\n+\tstruct rtnl_link_stats64 stats64;\n \n \t/* structs defined in igc_hw.h */\n \tstruct igc_hw hw;\n@@ -275,8 +334,13 @@ struct igc_adapter {\n \tu16 tx_ring_count;\n \tu16 rx_ring_count;\n \n+\tu32 *shadow_vfta;\n+\n \tu32 rss_queues;\n \n+\t/* lock for RX network flow classification filter */\n+\tspinlock_t nfc_lock;\n+\n \tstruct igc_mac_addr *mac_table;\n };\n \n@@ -332,6 +396,8 @@ static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)\n \n #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))\n \n+#define IGC_TXD_DCMD\t(IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)\n+\n #define IGC_RX_DESC(R, i) \\\n \t(&(((union igc_adv_rx_desc *)((R)->desc))[i]))\n #define IGC_TX_DESC(R, i) \\\ndiff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h\nindex 4bdb4ecf3bc8..3078a18f70a9 100644\n--- a/drivers/net/ethernet/intel/igc/igc_base.h\n+++ b/drivers/net/ethernet/intel/igc/igc_base.h\n@@ -21,6 +21,18 @@ union igc_adv_tx_desc {\n \t} wb;\n };\n \n+/* Adv Transmit Descriptor Config Masks */\n+#define IGC_ADVTXD_MAC_TSTAMP\t0x00080000 /* IEEE1588 Timestamp packet */\n+#define IGC_ADVTXD_DTYP_CTXT\t0x00200000 /* Advanced Context Descriptor */\n+#define IGC_ADVTXD_DTYP_DATA\t0x00300000 /* Advanced Data Descriptor */\n+#define IGC_ADVTXD_DCMD_EOP\t0x01000000 /* End of Packet */\n+#define IGC_ADVTXD_DCMD_IFCS\t0x02000000 /* Insert FCS (Ethernet CRC) */\n+#define IGC_ADVTXD_DCMD_RS\t0x08000000 /* Report Status */\n+#define IGC_ADVTXD_DCMD_DEXT\t0x20000000 /* Descriptor extension (1=Adv) */\n+#define IGC_ADVTXD_DCMD_VLE\t0x40000000 /* VLAN pkt enable */\n+#define IGC_ADVTXD_DCMD_TSE\t0x80000000 /* TCP Seg enable */\n+#define IGC_ADVTXD_PAYLEN_SHIFT\t14 /* Adv desc PAYLEN shift */\n+\n struct igc_adv_data_desc {\n \t__le64 buffer_addr; /* Address of the descriptor's data buffer */\n \tunion {\n@@ -75,6 +87,9 @@ union igc_adv_rx_desc {\n \t} wb; /* writeback */\n };\n \n+/* Adv Transmit Descriptor Config Masks */\n+#define IGC_ADVTXD_PAYLEN_SHIFT\t14 /* Adv desc PAYLEN shift */\n+\n /* Additional Transmit Descriptor Control definitions */\n #define IGC_TXDCTL_QUEUE_ENABLE\t0x02000000 /* Ena specific Tx Queue */\n \ndiff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h\nindex dbc30dead461..c8a321358cf6 100644\n--- a/drivers/net/ethernet/intel/igc/igc_defines.h\n+++ b/drivers/net/ethernet/intel/igc/igc_defines.h\n@@ -84,6 +84,29 @@\n #define IGC_GPIE_EIAME\t\t0x40000000\n #define IGC_GPIE_PBA\t\t0x80000000\n \n+/* Transmit Descriptor bit definitions */\n+#define IGC_TXD_DTYP_D\t\t0x00100000 /* Data Descriptor */\n+#define IGC_TXD_DTYP_C\t\t0x00000000 /* Context Descriptor */\n+#define IGC_TXD_POPTS_IXSM\t0x01 /* Insert IP checksum */\n+#define IGC_TXD_POPTS_TXSM\t0x02 /* Insert TCP/UDP checksum */\n+#define IGC_TXD_CMD_EOP\t\t0x01000000 /* End of Packet */\n+#define IGC_TXD_CMD_IFCS\t0x02000000 /* Insert FCS (Ethernet CRC) */\n+#define IGC_TXD_CMD_IC\t\t0x04000000 /* Insert Checksum */\n+#define IGC_TXD_CMD_RS\t\t0x08000000 /* Report Status */\n+#define IGC_TXD_CMD_RPS\t\t0x10000000 /* Report Packet Sent */\n+#define IGC_TXD_CMD_DEXT\t0x20000000 /* Desc extension (0 = legacy) */\n+#define IGC_TXD_CMD_VLE\t\t0x40000000 /* Add VLAN tag */\n+#define IGC_TXD_CMD_IDE\t\t0x80000000 /* Enable Tidv register */\n+#define IGC_TXD_STAT_DD\t\t0x00000001 /* Descriptor Done */\n+#define IGC_TXD_STAT_EC\t\t0x00000002 /* Excess Collisions */\n+#define IGC_TXD_STAT_LC\t\t0x00000004 /* Late Collisions */\n+#define IGC_TXD_STAT_TU\t\t0x00000008 /* Transmit underrun */\n+#define IGC_TXD_CMD_TCP\t\t0x01000000 /* TCP packet */\n+#define IGC_TXD_CMD_IP\t\t0x02000000 /* IP packet */\n+#define IGC_TXD_CMD_TSE\t\t0x04000000 /* TCP Seg enable */\n+#define IGC_TXD_STAT_TC\t\t0x00000004 /* Tx Underrun */\n+#define IGC_TXD_EXTCMD_TSTAMP\t0x00000010 /* IEEE1588 Timestamp packet */\n+\n /* Transmit Control */\n #define IGC_TCTL_EN\t\t0x00000002 /* enable Tx */\n #define IGC_TCTL_PSP\t\t0x00000008 /* pad short packets */\n@@ -111,6 +134,25 @@\n #define IGC_RCTL_RDMTS_HALF\t0x00000000 /* Rx desc min thresh size */\n #define IGC_RCTL_BAM\t\t0x00008000 /* broadcast enable */\n \n+/* Receive Descriptor bit definitions */\n+#define IGC_RXD_STAT_EOP\t0x02 /* End of Packet */\n+\n+#define IGC_RXDEXT_STATERR_CE\t\t0x01000000\n+#define IGC_RXDEXT_STATERR_SE\t\t0x02000000\n+#define IGC_RXDEXT_STATERR_SEQ\t\t0x04000000\n+#define IGC_RXDEXT_STATERR_CXE\t\t0x10000000\n+#define IGC_RXDEXT_STATERR_TCPE\t\t0x20000000\n+#define IGC_RXDEXT_STATERR_IPE\t\t0x40000000\n+#define IGC_RXDEXT_STATERR_RXE\t\t0x80000000\n+\n+/* Same mask, but for extended and packet split descriptors */\n+#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \\\n+\tIGC_RXDEXT_STATERR_CE |\t\\\n+\tIGC_RXDEXT_STATERR_SE |\t\\\n+\tIGC_RXDEXT_STATERR_SEQ |\t\\\n+\tIGC_RXDEXT_STATERR_CXE |\t\\\n+\tIGC_RXDEXT_STATERR_RXE)\n+\n /* Header split receive */\n #define IGC_RFCTL_IPV6_EX_DIS\t0x00010000\n #define IGC_RFCTL_LEF\t\t0x00040000\n@@ -123,6 +165,9 @@\n #define IGC_RCTL_PMCF\t\t0x00800000 /* pass MAC control frames */\n #define IGC_RCTL_SECRC\t\t0x04000000 /* Strip Ethernet CRC */\n \n+#define I225_RXPBSIZE_DEFAULT\t0x000000A2 /* RXPBSIZE default */\n+#define I225_TXPBSIZE_DEFAULT\t0x04000014 /* TXPBSIZE default */\n+\n #define IGC_N0_QUEUE -1\n \n #endif /* _IGC_DEFINES_H_ */\ndiff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c\nindex 373ccea86fb0..db7b6820e0f0 100644\n--- a/drivers/net/ethernet/intel/igc/igc_main.c\n+++ b/drivers/net/ethernet/intel/igc/igc_main.c\n@@ -52,6 +52,8 @@ static void igc_free_q_vectors(struct igc_adapter *adapter);\n static void igc_irq_disable(struct igc_adapter *adapter);\n static void igc_irq_enable(struct igc_adapter *adapter);\n static void igc_configure_msix(struct igc_adapter *adapter);\n+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,\n+\t\t\t\t struct igc_rx_buffer *bi);\n \n enum latency_range {\n \tlowest_latency = 0,\n@@ -219,6 +221,19 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)\n }\n \n /**\n+ * igc_clean_all_tx_rings - Free Tx Buffers for all queues\n+ * @adapter: board private structure\n+ */\n+static void igc_clean_all_tx_rings(struct igc_adapter *adapter)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < adapter->num_tx_queues; i++)\n+\t\tif (adapter->tx_ring[i])\n+\t\t\tigc_clean_tx_ring(adapter->tx_ring[i]);\n+}\n+\n+/**\n * igc_setup_tx_resources - allocate Tx resources (Descriptors)\n * @tx_ring: tx descriptor ring (for a specific queue) to setup\n *\n@@ -326,6 +341,19 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)\n }\n \n /**\n+ * igc_clean_all_rx_rings - Free Rx Buffers for all queues\n+ * @adapter: board private structure\n+ */\n+static void igc_clean_all_rx_rings(struct igc_adapter *adapter)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < adapter->num_rx_queues; i++)\n+\t\tif (adapter->rx_ring[i])\n+\t\t\tigc_clean_rx_ring(adapter->rx_ring[i]);\n+}\n+\n+/**\n * igc_free_rx_resources - Free Rx Resources\n * @rx_ring: ring to clean the resources from\n *\n@@ -666,60 +694,613 @@ static int igc_set_mac(struct net_device *netdev, void *p)\n \treturn 0;\n }\n \n+static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)\n+{\n+}\n+\n+static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)\n+{\n+\tstruct net_device *netdev = tx_ring->netdev;\n+\n+\tnetif_stop_subqueue(netdev, tx_ring->queue_index);\n+\n+\t/* memory barriier comment */\n+\tsmp_mb();\n+\n+\t/* We need to check again in a case another CPU has just\n+\t * made room available.\n+\t */\n+\tif (igc_desc_unused(tx_ring) < size)\n+\t\treturn -EBUSY;\n+\n+\t/* A reprieve! */\n+\tnetif_wake_subqueue(netdev, tx_ring->queue_index);\n+\n+\tu64_stats_update_begin(&tx_ring->tx_syncp2);\n+\ttx_ring->tx_stats.restart_queue2++;\n+\tu64_stats_update_end(&tx_ring->tx_syncp2);\n+\n+\treturn 0;\n+}\n+\n+static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)\n+{\n+\tif (igc_desc_unused(tx_ring) >= size)\n+\t\treturn 0;\n+\treturn __igc_maybe_stop_tx(tx_ring, size);\n+}\n+\n+static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)\n+{\n+\t/* set type for advanced descriptor with frame checksum insertion */\n+\tu32 cmd_type = IGC_ADVTXD_DTYP_DATA |\n+\t\t IGC_ADVTXD_DCMD_DEXT |\n+\t\t IGC_ADVTXD_DCMD_IFCS;\n+\n+\treturn cmd_type;\n+}\n+\n+static void igc_tx_olinfo_status(struct igc_ring *tx_ring,\n+\t\t\t\t union igc_adv_tx_desc *tx_desc,\n+\t\t\t\t u32 tx_flags, unsigned int paylen)\n+{\n+\tu32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;\n+\n+\t/* insert L4 checksum */\n+\tolinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *\n+\t\t\t ((IGC_TXD_POPTS_TXSM << 8) /\n+\t\t\t IGC_TX_FLAGS_CSUM);\n+\n+\t/* insert IPv4 checksum */\n+\tolinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *\n+\t\t\t (((IGC_TXD_POPTS_IXSM << 8)) /\n+\t\t\t IGC_TX_FLAGS_IPV4);\n+\n+\ttx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);\n+}\n+\n+static int igc_tx_map(struct igc_ring *tx_ring,\n+\t\t struct igc_tx_buffer *first,\n+\t\t const u8 hdr_len)\n+{\n+\tstruct sk_buff *skb = first->skb;\n+\tstruct igc_tx_buffer *tx_buffer;\n+\tunion igc_adv_tx_desc *tx_desc;\n+\tu32 tx_flags = first->tx_flags;\n+\tstruct skb_frag_struct *frag;\n+\tu16 i = tx_ring->next_to_use;\n+\tunsigned int data_len, size;\n+\tdma_addr_t dma;\n+\tu32 cmd_type = igc_tx_cmd_type(skb, tx_flags);\n+\n+\ttx_desc = IGC_TX_DESC(tx_ring, i);\n+\n+\tigc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);\n+\n+\tsize = skb_headlen(skb);\n+\tdata_len = skb->data_len;\n+\n+\tdma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);\n+\n+\ttx_buffer = first;\n+\n+\tfor (frag = &skb_shinfo(skb)->frags[0];; frag++) {\n+\t\tif (dma_mapping_error(tx_ring->dev, dma))\n+\t\t\tgoto dma_error;\n+\n+\t\t/* record length, and DMA address */\n+\t\tdma_unmap_len_set(tx_buffer, len, size);\n+\t\tdma_unmap_addr_set(tx_buffer, dma, dma);\n+\n+\t\ttx_desc->read.buffer_addr = cpu_to_le64(dma);\n+\n+\t\twhile (unlikely(size > IGC_MAX_DATA_PER_TXD)) {\n+\t\t\ttx_desc->read.cmd_type_len =\n+\t\t\t\tcpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);\n+\n+\t\t\ti++;\n+\t\t\ttx_desc++;\n+\t\t\tif (i == tx_ring->count) {\n+\t\t\t\ttx_desc = IGC_TX_DESC(tx_ring, 0);\n+\t\t\t\ti = 0;\n+\t\t\t}\n+\t\t\ttx_desc->read.olinfo_status = 0;\n+\n+\t\t\tdma += IGC_MAX_DATA_PER_TXD;\n+\t\t\tsize -= IGC_MAX_DATA_PER_TXD;\n+\n+\t\t\ttx_desc->read.buffer_addr = cpu_to_le64(dma);\n+\t\t}\n+\n+\t\tif (likely(!data_len))\n+\t\t\tbreak;\n+\n+\t\ttx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);\n+\n+\t\ti++;\n+\t\ttx_desc++;\n+\t\tif (i == tx_ring->count) {\n+\t\t\ttx_desc = IGC_TX_DESC(tx_ring, 0);\n+\t\t\ti = 0;\n+\t\t}\n+\t\ttx_desc->read.olinfo_status = 0;\n+\n+\t\tsize = skb_frag_size(frag);\n+\t\tdata_len -= size;\n+\n+\t\tdma = skb_frag_dma_map(tx_ring->dev, frag, 0,\n+\t\t\t\t size, DMA_TO_DEVICE);\n+\n+\t\ttx_buffer = &tx_ring->tx_buffer_info[i];\n+\t}\n+\n+\t/* write last descriptor with RS and EOP bits */\n+\tcmd_type |= size | IGC_TXD_DCMD;\n+\ttx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);\n+\n+\tnetdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);\n+\n+\t/* set the timestamp */\n+\tfirst->time_stamp = jiffies;\n+\n+\t/* Force memory writes to complete before letting h/w know there\n+\t * are new descriptors to fetch. (Only applicable for weak-ordered\n+\t * memory model archs, such as IA-64).\n+\t *\n+\t * We also need this memory barrier to make certain all of the\n+\t * status bits have been updated before next_to_watch is written.\n+\t */\n+\twmb();\n+\n+\t/* set next_to_watch value indicating a packet is present */\n+\tfirst->next_to_watch = tx_desc;\n+\n+\ti++;\n+\tif (i == tx_ring->count)\n+\t\ti = 0;\n+\n+\ttx_ring->next_to_use = i;\n+\n+\t/* Make sure there is space in the ring for the next send. */\n+\tigc_maybe_stop_tx(tx_ring, DESC_NEEDED);\n+\n+\tif (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {\n+\t\twritel(i, tx_ring->tail);\n+\n+\t\t/* we need this if more than one processor can write to our tail\n+\t\t * at a time, it synchronizes IO on IA64/Altix systems\n+\t\t */\n+\t\tmmiowb();\n+\t}\n+\n+\treturn 0;\n+dma_error:\n+\tdev_err(tx_ring->dev, \"TX DMA map failed\\n\");\n+\ttx_buffer = &tx_ring->tx_buffer_info[i];\n+\n+\t/* clear dma mappings for failed tx_buffer_info map */\n+\twhile (tx_buffer != first) {\n+\t\tif (dma_unmap_len(tx_buffer, len))\n+\t\t\tdma_unmap_page(tx_ring->dev,\n+\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t dma_unmap_len(tx_buffer, len),\n+\t\t\t\t DMA_TO_DEVICE);\n+\t\tdma_unmap_len_set(tx_buffer, len, 0);\n+\n+\t\tif (i-- == 0)\n+\t\t\ti += tx_ring->count;\n+\t\ttx_buffer = &tx_ring->tx_buffer_info[i];\n+\t}\n+\n+\tif (dma_unmap_len(tx_buffer, len))\n+\t\tdma_unmap_single(tx_ring->dev,\n+\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t dma_unmap_len(tx_buffer, len),\n+\t\t\t\t DMA_TO_DEVICE);\n+\tdma_unmap_len_set(tx_buffer, len, 0);\n+\n+\tdev_kfree_skb_any(tx_buffer->skb);\n+\ttx_buffer->skb = NULL;\n+\n+\ttx_ring->next_to_use = i;\n+\n+\treturn -1;\n+}\n+\n+static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,\n+\t\t\t\t struct igc_ring *tx_ring)\n+{\n+\tu16 count = TXD_USE_COUNT(skb_headlen(skb));\n+\t__be16 protocol = vlan_get_protocol(skb);\n+\tstruct igc_tx_buffer *first;\n+\tu32 tx_flags = 0;\n+\tunsigned short f;\n+\tu8 hdr_len = 0;\n+\n+\t/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,\n+\t *\t+ 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,\n+\t *\t+ 2 desc gap to keep tail from touching head,\n+\t *\t+ 1 desc for context descriptor,\n+\t * otherwise try next time\n+\t */\n+\tfor (f = 0; f < skb_shinfo(skb)->nr_frags; f++)\n+\t\tcount += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);\n+\n+\tif (igc_maybe_stop_tx(tx_ring, count + 3)) {\n+\t\t/* this is a hard error */\n+\t\treturn NETDEV_TX_BUSY;\n+\t}\n+\n+\t/* record the location of the first descriptor for this packet */\n+\tfirst = &tx_ring->tx_buffer_info[tx_ring->next_to_use];\n+\tfirst->skb = skb;\n+\tfirst->bytecount = skb->len;\n+\tfirst->gso_segs = 1;\n+\n+\tskb_tx_timestamp(skb);\n+\n+\t/* record initial flags and protocol */\n+\tfirst->tx_flags = tx_flags;\n+\tfirst->protocol = protocol;\n+\n+\tigc_tx_csum(tx_ring, first);\n+\n+\tigc_tx_map(tx_ring, first, hdr_len);\n+\n+\treturn NETDEV_TX_OK;\n+}\n+\n+static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,\n+\t\t\t\t\t\t struct sk_buff *skb)\n+{\n+\tunsigned int r_idx = skb->queue_mapping;\n+\n+\tif (r_idx >= adapter->num_tx_queues)\n+\t\tr_idx = r_idx % adapter->num_tx_queues;\n+\n+\treturn adapter->tx_ring[r_idx];\n+}\n+\n static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,\n \t\t\t\t struct net_device *netdev)\n {\n-\tdev_kfree_skb_any(skb);\n-\treturn NETDEV_TX_OK;\n+\tstruct igc_adapter *adapter = netdev_priv(netdev);\n+\n+\t/* The minimum packet size with TCTL.PSP set is 17 so pad the skb\n+\t * in order to meet this minimum size requirement.\n+\t */\n+\tif (skb->len < 17) {\n+\t\tif (skb_padto(skb, 17))\n+\t\t\treturn NETDEV_TX_OK;\n+\t\tskb->len = 17;\n+\t}\n+\n+\treturn igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));\n }\n \n-static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)\n+static inline void igc_rx_hash(struct igc_ring *ring,\n+\t\t\t union igc_adv_rx_desc *rx_desc,\n+\t\t\t struct sk_buff *skb)\n {\n-\treturn ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;\n+\tif (ring->netdev->features & NETIF_F_RXHASH)\n+\t\tskb_set_hash(skb,\n+\t\t\t le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),\n+\t\t\t PKT_HASH_TYPE_L3);\n }\n \n-static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,\n-\t\t\t\t struct igc_rx_buffer *bi)\n+/**\n+ * igc_process_skb_fields - Populate skb header fields from Rx descriptor\n+ * @rx_ring: rx descriptor ring packet is being transacted on\n+ * @rx_desc: pointer to the EOP Rx descriptor\n+ * @skb: pointer to current skb being populated\n+ *\n+ * This function checks the ring, descriptor, and packet information in\n+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and\n+ * other fields within the skb.\n+ */\n+static void igc_process_skb_fields(struct igc_ring *rx_ring,\n+\t\t\t\t union igc_adv_rx_desc *rx_desc,\n+\t\t\t\t struct sk_buff *skb)\n {\n-\tstruct page *page = bi->page;\n-\tdma_addr_t dma;\n+\tigc_rx_hash(rx_ring, rx_desc, skb);\n \n-\t/* since we are recycling buffers we should seldom need to alloc */\n-\tif (likely(page))\n-\t\treturn true;\n+\tskb_record_rx_queue(skb, rx_ring->queue_index);\n \n-\t/* alloc new page for storage */\n-\tpage = dev_alloc_pages(igc_rx_pg_order(rx_ring));\n-\tif (unlikely(!page)) {\n-\t\trx_ring->rx_stats.alloc_failed++;\n-\t\treturn false;\n+\tskb->protocol = eth_type_trans(skb, rx_ring->netdev);\n+}\n+\n+static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,\n+\t\t\t\t\t const unsigned int size)\n+{\n+\tstruct igc_rx_buffer *rx_buffer;\n+\n+\trx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];\n+\tprefetchw(rx_buffer->page);\n+\n+\t/* we are reusing so sync this buffer for CPU use */\n+\tdma_sync_single_range_for_cpu(rx_ring->dev,\n+\t\t\t\t rx_buffer->dma,\n+\t\t\t\t rx_buffer->page_offset,\n+\t\t\t\t size,\n+\t\t\t\t DMA_FROM_DEVICE);\n+\n+\trx_buffer->pagecnt_bias--;\n+\n+\treturn rx_buffer;\n+}\n+\n+/**\n+ * igc_add_rx_frag - Add contents of Rx buffer to sk_buff\n+ * @rx_ring: rx descriptor ring to transact packets on\n+ * @rx_buffer: buffer containing page to add\n+ * @skb: sk_buff to place the data into\n+ * @size: size of buffer to be added\n+ *\n+ * This function will add the data contained in rx_buffer->page to the skb.\n+ */\n+static void igc_add_rx_frag(struct igc_ring *rx_ring,\n+\t\t\t struct igc_rx_buffer *rx_buffer,\n+\t\t\t struct sk_buff *skb,\n+\t\t\t unsigned int size)\n+{\n+#if (PAGE_SIZE < 8192)\n+\tunsigned int truesize = igc_rx_pg_size(rx_ring) / 2;\n+\n+\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,\n+\t\t\trx_buffer->page_offset, size, truesize);\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\tunsigned int truesize = ring_uses_build_skb(rx_ring) ?\n+\t\t\t\tSKB_DATA_ALIGN(IGC_SKB_PAD + size) :\n+\t\t\t\tSKB_DATA_ALIGN(size);\n+\tskb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,\n+\t\t\trx_buffer->page_offset, size, truesize);\n+\trx_buffer->page_offset += truesize;\n+#endif\n+}\n+\n+static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,\n+\t\t\t\t struct igc_rx_buffer *rx_buffer,\n+\t\t\t\t union igc_adv_rx_desc *rx_desc,\n+\t\t\t\t unsigned int size)\n+{\n+\tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n+#if (PAGE_SIZE < 8192)\n+\tunsigned int truesize = igc_rx_pg_size(rx_ring) / 2;\n+#else\n+\tunsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +\n+\t\t\t\tSKB_DATA_ALIGN(IGC_SKB_PAD + size);\n+#endif\n+\tstruct sk_buff *skb;\n+\n+\t/* prefetch first cache line of first page */\n+\tprefetch(va);\n+#if L1_CACHE_BYTES < 128\n+\tprefetch(va + L1_CACHE_BYTES);\n+#endif\n+\n+\t/* build an skb around the page buffer */\n+\tskb = build_skb(va - IGC_SKB_PAD, truesize);\n+\tif (unlikely(!skb))\n+\t\treturn NULL;\n+\n+\t/* update pointers within the skb to store the data */\n+\tskb_reserve(skb, IGC_SKB_PAD);\n+\t __skb_put(skb, size);\n+\n+\t/* update buffer offset */\n+#if (PAGE_SIZE < 8192)\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\trx_buffer->page_offset += truesize;\n+#endif\n+\n+\treturn skb;\n+}\n+\n+static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,\n+\t\t\t\t\t struct igc_rx_buffer *rx_buffer,\n+\t\t\t\t\t union igc_adv_rx_desc *rx_desc,\n+\t\t\t\t\t unsigned int size)\n+{\n+\tvoid *va = page_address(rx_buffer->page) + rx_buffer->page_offset;\n+#if (PAGE_SIZE < 8192)\n+\tunsigned int truesize = igc_rx_pg_size(rx_ring) / 2;\n+#else\n+\tunsigned int truesize = SKB_DATA_ALIGN(size);\n+#endif\n+\tunsigned int headlen;\n+\tstruct sk_buff *skb;\n+\n+\t/* prefetch first cache line of first page */\n+\tprefetch(va);\n+#if L1_CACHE_BYTES < 128\n+\tprefetch(va + L1_CACHE_BYTES);\n+#endif\n+\n+\t/* allocate a skb to store the frags */\n+\tskb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);\n+\tif (unlikely(!skb))\n+\t\treturn NULL;\n+\n+\t/* Determine available headroom for copy */\n+\theadlen = size;\n+\tif (headlen > IGC_RX_HDR_LEN)\n+\t\theadlen = eth_get_headlen(va, IGC_RX_HDR_LEN);\n+\n+\t/* align pull length to size of long to optimize memcpy performance */\n+\tmemcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));\n+\n+\t/* update all of the pointers */\n+\tsize -= headlen;\n+\tif (size) {\n+\t\tskb_add_rx_frag(skb, 0, rx_buffer->page,\n+\t\t\t\t(va + headlen) - page_address(rx_buffer->page),\n+\t\t\t\tsize, truesize);\n+#if (PAGE_SIZE < 8192)\n+\trx_buffer->page_offset ^= truesize;\n+#else\n+\trx_buffer->page_offset += truesize;\n+#endif\n+\t} else {\n+\t\trx_buffer->pagecnt_bias++;\n \t}\n \n-\t/* map page for use */\n-\tdma = dma_map_page_attrs(rx_ring->dev, page, 0,\n-\t\t\t\t igc_rx_pg_size(rx_ring),\n-\t\t\t\t DMA_FROM_DEVICE,\n-\t\t\t\t IGC_RX_DMA_ATTR);\n+\treturn skb;\n+}\n \n-\t/* if mapping failed free memory back to system since\n-\t * there isn't much point in holding memory we can't use\n+/**\n+ * igc_reuse_rx_page - page flip buffer and store it back on the ring\n+ * @rx_ring: rx descriptor ring to store buffers on\n+ * @old_buff: donor buffer to have page reused\n+ *\n+ * Synchronizes page for reuse by the adapter\n+ */\n+static void igc_reuse_rx_page(struct igc_ring *rx_ring,\n+\t\t\t struct igc_rx_buffer *old_buff)\n+{\n+\tu16 nta = rx_ring->next_to_alloc;\n+\tstruct igc_rx_buffer *new_buff;\n+\n+\tnew_buff = &rx_ring->rx_buffer_info[nta];\n+\n+\t/* update, and store next to alloc */\n+\tnta++;\n+\trx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;\n+\n+\t/* Transfer page from old buffer to new buffer.\n+\t * Move each member individually to avoid possible store\n+\t * forwarding stalls.\n \t */\n-\tif (dma_mapping_error(rx_ring->dev, dma)) {\n-\t\t__free_page(page);\n+\tnew_buff->dma\t\t= old_buff->dma;\n+\tnew_buff->page\t\t= old_buff->page;\n+\tnew_buff->page_offset\t= old_buff->page_offset;\n+\tnew_buff->pagecnt_bias\t= old_buff->pagecnt_bias;\n+}\n \n-\t\trx_ring->rx_stats.alloc_failed++;\n+static inline bool igc_page_is_reserved(struct page *page)\n+{\n+\treturn (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);\n+}\n+\n+static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)\n+{\n+\tunsigned int pagecnt_bias = rx_buffer->pagecnt_bias;\n+\tstruct page *page = rx_buffer->page;\n+\n+\t/* avoid re-using remote pages */\n+\tif (unlikely(igc_page_is_reserved(page)))\n+\t\treturn false;\n+\n+#if (PAGE_SIZE < 8192)\n+\t/* if we are only owner of page we can reuse it */\n+\tif (unlikely((page_ref_count(page) - pagecnt_bias) > 1))\n \t\treturn false;\n+#else\n+#define IGC_LAST_OFFSET \\\n+\t(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)\n+\n+\tif (rx_buffer->page_offset > IGC_LAST_OFFSET)\n+\t\treturn false;\n+#endif\n+\n+\t/* If we have drained the page fragment pool we need to update\n+\t * the pagecnt_bias and page count so that we fully restock the\n+\t * number of references the driver holds.\n+\t */\n+\tif (unlikely(!pagecnt_bias)) {\n+\t\tpage_ref_add(page, USHRT_MAX);\n+\t\trx_buffer->pagecnt_bias = USHRT_MAX;\n \t}\n \n-\tbi->dma = dma;\n-\tbi->page = page;\n-\tbi->page_offset = igc_rx_offset(rx_ring);\n-\tbi->pagecnt_bias = 1;\n+\treturn true;\n+}\n+\n+/**\n+ * igc_is_non_eop - process handling of non-EOP buffers\n+ * @rx_ring: Rx ring being processed\n+ * @rx_desc: Rx descriptor for current buffer\n+ * @skb: current socket buffer containing buffer in progress\n+ *\n+ * This function updates next to clean. If the buffer is an EOP buffer\n+ * this function exits returning false, otherwise it will place the\n+ * sk_buff in the next buffer to be chained and return true indicating\n+ * that this is in fact a non-EOP buffer.\n+ */\n+static bool igc_is_non_eop(struct igc_ring *rx_ring,\n+\t\t\t union igc_adv_rx_desc *rx_desc)\n+{\n+\tu32 ntc = rx_ring->next_to_clean + 1;\n+\n+\t/* fetch, update, and store next to clean */\n+\tntc = (ntc < rx_ring->count) ? ntc : 0;\n+\trx_ring->next_to_clean = ntc;\n+\n+\tprefetch(IGC_RX_DESC(rx_ring, ntc));\n+\n+\tif (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))\n+\t\treturn false;\n \n \treturn true;\n }\n \n /**\n+ * igc_cleanup_headers - Correct corrupted or empty headers\n+ * @rx_ring: rx descriptor ring packet is being transacted on\n+ * @rx_desc: pointer to the EOP Rx descriptor\n+ * @skb: pointer to current skb being fixed\n+ *\n+ * Address the case where we are pulling data in on pages only\n+ * and as such no data is present in the skb header.\n+ *\n+ * In addition if skb is not at least 60 bytes we need to pad it so that\n+ * it is large enough to qualify as a valid Ethernet frame.\n+ *\n+ * Returns true if an error was encountered and skb was freed.\n+ */\n+static bool igc_cleanup_headers(struct igc_ring *rx_ring,\n+\t\t\t\tunion igc_adv_rx_desc *rx_desc,\n+\t\t\t\tstruct sk_buff *skb)\n+{\n+\tif (unlikely((igc_test_staterr(rx_desc,\n+\t\t\t\t IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {\n+\t\tstruct net_device *netdev = rx_ring->netdev;\n+\n+\t\tif (!(netdev->features & NETIF_F_RXALL)) {\n+\t\t\tdev_kfree_skb_any(skb);\n+\t\t\treturn true;\n+\t\t}\n+\t}\n+\n+\t/* if eth_skb_pad returns an error the skb was freed */\n+\tif (eth_skb_pad(skb))\n+\t\treturn true;\n+\n+\treturn false;\n+}\n+\n+static void igc_put_rx_buffer(struct igc_ring *rx_ring,\n+\t\t\t struct igc_rx_buffer *rx_buffer)\n+{\n+\tif (igc_can_reuse_rx_page(rx_buffer)) {\n+\t\t/* hand second half of page back to the ring */\n+\t\tigc_reuse_rx_page(rx_ring, rx_buffer);\n+\t} else {\n+\t\t/* We are not reusing the buffer so unmap it and free\n+\t\t * any references we are holding to it\n+\t\t */\n+\t\tdma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,\n+\t\t\t\t igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,\n+\t\t\t\t IGC_RX_DMA_ATTR);\n+\t\t__page_frag_cache_drain(rx_buffer->page,\n+\t\t\t\t\trx_buffer->pagecnt_bias);\n+\t}\n+\n+\t/* clear contents of rx_buffer */\n+\trx_buffer->page = NULL;\n+}\n+\n+/**\n * igc_alloc_rx_buffers - Replace used receive buffers; packet split\n * @adapter: address of board private structure\n */\n@@ -788,6 +1369,314 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)\n \t}\n }\n \n+static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)\n+{\n+\tunsigned int total_bytes = 0, total_packets = 0;\n+\tstruct igc_ring *rx_ring = q_vector->rx.ring;\n+\tstruct sk_buff *skb = rx_ring->skb;\n+\tu16 cleaned_count = igc_desc_unused(rx_ring);\n+\n+\twhile (likely(total_packets < budget)) {\n+\t\tunion igc_adv_rx_desc *rx_desc;\n+\t\tstruct igc_rx_buffer *rx_buffer;\n+\t\tunsigned int size;\n+\n+\t\t/* return some buffers to hardware, one at a time is too slow */\n+\t\tif (cleaned_count >= IGC_RX_BUFFER_WRITE) {\n+\t\t\tigc_alloc_rx_buffers(rx_ring, cleaned_count);\n+\t\t\tcleaned_count = 0;\n+\t\t}\n+\n+\t\trx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);\n+\t\tsize = le16_to_cpu(rx_desc->wb.upper.length);\n+\t\tif (!size)\n+\t\t\tbreak;\n+\n+\t\t/* This memory barrier is needed to keep us from reading\n+\t\t * any other fields out of the rx_desc until we know the\n+\t\t * descriptor has been written back\n+\t\t */\n+\t\tdma_rmb();\n+\n+\t\trx_buffer = igc_get_rx_buffer(rx_ring, size);\n+\n+\t\t/* retrieve a buffer from the ring */\n+\t\tif (skb)\n+\t\t\tigc_add_rx_frag(rx_ring, rx_buffer, skb, size);\n+\t\telse if (ring_uses_build_skb(rx_ring))\n+\t\t\tskb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);\n+\t\telse\n+\t\t\tskb = igc_construct_skb(rx_ring, rx_buffer,\n+\t\t\t\t\t\trx_desc, size);\n+\n+\t\t/* exit if we failed to retrieve a buffer */\n+\t\tif (!skb) {\n+\t\t\trx_ring->rx_stats.alloc_failed++;\n+\t\t\trx_buffer->pagecnt_bias++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tigc_put_rx_buffer(rx_ring, rx_buffer);\n+\t\tcleaned_count++;\n+\n+\t\t/* fetch next buffer in frame if non-eop */\n+\t\tif (igc_is_non_eop(rx_ring, rx_desc))\n+\t\t\tcontinue;\n+\n+\t\t/* verify the packet layout is correct */\n+\t\tif (igc_cleanup_headers(rx_ring, rx_desc, skb)) {\n+\t\t\tskb = NULL;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* probably a little skewed due to removing CRC */\n+\t\ttotal_bytes += skb->len;\n+\n+\t\t/* populate checksum, timestamp, VLAN, and protocol */\n+\t\tigc_process_skb_fields(rx_ring, rx_desc, skb);\n+\n+\t\tnapi_gro_receive(&q_vector->napi, skb);\n+\n+\t\t/* reset skb pointer */\n+\t\tskb = NULL;\n+\n+\t\t/* update budget accounting */\n+\t\ttotal_packets++;\n+\t}\n+\n+\t/* place incomplete frames back on ring for completion */\n+\trx_ring->skb = skb;\n+\n+\tu64_stats_update_begin(&rx_ring->rx_syncp);\n+\trx_ring->rx_stats.packets += total_packets;\n+\trx_ring->rx_stats.bytes += total_bytes;\n+\tu64_stats_update_end(&rx_ring->rx_syncp);\n+\tq_vector->rx.total_packets += total_packets;\n+\tq_vector->rx.total_bytes += total_bytes;\n+\n+\tif (cleaned_count)\n+\t\tigc_alloc_rx_buffers(rx_ring, cleaned_count);\n+\n+\treturn total_packets;\n+}\n+\n+static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)\n+{\n+\treturn ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;\n+}\n+\n+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,\n+\t\t\t\t struct igc_rx_buffer *bi)\n+{\n+\tstruct page *page = bi->page;\n+\tdma_addr_t dma;\n+\n+\t/* since we are recycling buffers we should seldom need to alloc */\n+\tif (likely(page))\n+\t\treturn true;\n+\n+\t/* alloc new page for storage */\n+\tpage = dev_alloc_pages(igc_rx_pg_order(rx_ring));\n+\tif (unlikely(!page)) {\n+\t\trx_ring->rx_stats.alloc_failed++;\n+\t\treturn false;\n+\t}\n+\n+\t/* map page for use */\n+\tdma = dma_map_page_attrs(rx_ring->dev, page, 0,\n+\t\t\t\t igc_rx_pg_size(rx_ring),\n+\t\t\t\t DMA_FROM_DEVICE,\n+\t\t\t\t IGC_RX_DMA_ATTR);\n+\n+\t/* if mapping failed free memory back to system since\n+\t * there isn't much point in holding memory we can't use\n+\t */\n+\tif (dma_mapping_error(rx_ring->dev, dma)) {\n+\t\t__free_page(page);\n+\n+\t\trx_ring->rx_stats.alloc_failed++;\n+\t\treturn false;\n+\t}\n+\n+\tbi->dma = dma;\n+\tbi->page = page;\n+\tbi->page_offset = igc_rx_offset(rx_ring);\n+\tbi->pagecnt_bias = 1;\n+\n+\treturn true;\n+}\n+\n+/**\n+ * igc_clean_tx_irq - Reclaim resources after transmit completes\n+ * @q_vector: pointer to q_vector containing needed info\n+ * @napi_budget: Used to determine if we are in netpoll\n+ *\n+ * returns true if ring is completely cleaned\n+ */\n+static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)\n+{\n+\tstruct igc_adapter *adapter = q_vector->adapter;\n+\tunsigned int total_bytes = 0, total_packets = 0;\n+\tunsigned int budget = q_vector->tx.work_limit;\n+\tstruct igc_ring *tx_ring = q_vector->tx.ring;\n+\tunsigned int i = tx_ring->next_to_clean;\n+\tstruct igc_tx_buffer *tx_buffer;\n+\tunion igc_adv_tx_desc *tx_desc;\n+\n+\tif (test_bit(__IGC_DOWN, &adapter->state))\n+\t\treturn true;\n+\n+\ttx_buffer = &tx_ring->tx_buffer_info[i];\n+\ttx_desc = IGC_TX_DESC(tx_ring, i);\n+\ti -= tx_ring->count;\n+\n+\tdo {\n+\t\tunion igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;\n+\n+\t\t/* if next_to_watch is not set then there is no work pending */\n+\t\tif (!eop_desc)\n+\t\t\tbreak;\n+\n+\t\t/* prevent any other reads prior to eop_desc */\n+\t\tsmp_rmb();\n+\n+\t\t/* if DD is not set pending work has not been completed */\n+\t\tif (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))\n+\t\t\tbreak;\n+\n+\t\t/* clear next_to_watch to prevent false hangs */\n+\t\ttx_buffer->next_to_watch = NULL;\n+\n+\t\t/* update the statistics for this packet */\n+\t\ttotal_bytes += tx_buffer->bytecount;\n+\t\ttotal_packets += tx_buffer->gso_segs;\n+\n+\t\t/* free the skb */\n+\t\tnapi_consume_skb(tx_buffer->skb, napi_budget);\n+\n+\t\t/* unmap skb header data */\n+\t\tdma_unmap_single(tx_ring->dev,\n+\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t dma_unmap_len(tx_buffer, len),\n+\t\t\t\t DMA_TO_DEVICE);\n+\n+\t\t/* clear tx_buffer data */\n+\t\tdma_unmap_len_set(tx_buffer, len, 0);\n+\n+\t\t/* clear last DMA location and unmap remaining buffers */\n+\t\twhile (tx_desc != eop_desc) {\n+\t\t\ttx_buffer++;\n+\t\t\ttx_desc++;\n+\t\t\ti++;\n+\t\t\tif (unlikely(!i)) {\n+\t\t\t\ti -= tx_ring->count;\n+\t\t\t\ttx_buffer = tx_ring->tx_buffer_info;\n+\t\t\t\ttx_desc = IGC_TX_DESC(tx_ring, 0);\n+\t\t\t}\n+\n+\t\t\t/* unmap any remaining paged data */\n+\t\t\tif (dma_unmap_len(tx_buffer, len)) {\n+\t\t\t\tdma_unmap_page(tx_ring->dev,\n+\t\t\t\t\t dma_unmap_addr(tx_buffer, dma),\n+\t\t\t\t\t dma_unmap_len(tx_buffer, len),\n+\t\t\t\t\t DMA_TO_DEVICE);\n+\t\t\t\tdma_unmap_len_set(tx_buffer, len, 0);\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* move us one more past the eop_desc for start of next pkt */\n+\t\ttx_buffer++;\n+\t\ttx_desc++;\n+\t\ti++;\n+\t\tif (unlikely(!i)) {\n+\t\t\ti -= tx_ring->count;\n+\t\t\ttx_buffer = tx_ring->tx_buffer_info;\n+\t\t\ttx_desc = IGC_TX_DESC(tx_ring, 0);\n+\t\t}\n+\n+\t\t/* issue prefetch for next Tx descriptor */\n+\t\tprefetch(tx_desc);\n+\n+\t\t/* update budget accounting */\n+\t\tbudget--;\n+\t} while (likely(budget));\n+\n+\tnetdev_tx_completed_queue(txring_txq(tx_ring),\n+\t\t\t\t total_packets, total_bytes);\n+\n+\ti += tx_ring->count;\n+\ttx_ring->next_to_clean = i;\n+\tu64_stats_update_begin(&tx_ring->tx_syncp);\n+\ttx_ring->tx_stats.bytes += total_bytes;\n+\ttx_ring->tx_stats.packets += total_packets;\n+\tu64_stats_update_end(&tx_ring->tx_syncp);\n+\tq_vector->tx.total_bytes += total_bytes;\n+\tq_vector->tx.total_packets += total_packets;\n+\n+\tif (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {\n+\t\tstruct igc_hw *hw = &adapter->hw;\n+\n+\t\t/* Detect a transmit hang in hardware, this serializes the\n+\t\t * check with the clearing of time_stamp and movement of i\n+\t\t */\n+\t\tclear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);\n+\t\tif (tx_buffer->next_to_watch &&\n+\t\t time_after(jiffies, tx_buffer->time_stamp +\n+\t\t (adapter->tx_timeout_factor * HZ)) &&\n+\t\t !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {\n+\t\t\t/* detected Tx unit hang */\n+\t\t\tdev_err(tx_ring->dev,\n+\t\t\t\t\"Detected Tx Unit Hang\\n\"\n+\t\t\t\t\" Tx Queue <%d>\\n\"\n+\t\t\t\t\" TDH <%x>\\n\"\n+\t\t\t\t\" TDT <%x>\\n\"\n+\t\t\t\t\" next_to_use <%x>\\n\"\n+\t\t\t\t\" next_to_clean <%x>\\n\"\n+\t\t\t\t\"buffer_info[next_to_clean]\\n\"\n+\t\t\t\t\" time_stamp <%lx>\\n\"\n+\t\t\t\t\" next_to_watch <%p>\\n\"\n+\t\t\t\t\" jiffies <%lx>\\n\"\n+\t\t\t\t\" desc.status <%x>\\n\",\n+\t\t\t\ttx_ring->queue_index,\n+\t\t\t\trd32(IGC_TDH(tx_ring->reg_idx)),\n+\t\t\t\treadl(tx_ring->tail),\n+\t\t\t\ttx_ring->next_to_use,\n+\t\t\t\ttx_ring->next_to_clean,\n+\t\t\t\ttx_buffer->time_stamp,\n+\t\t\t\ttx_buffer->next_to_watch,\n+\t\t\t\tjiffies,\n+\t\t\t\ttx_buffer->next_to_watch->wb.status);\n+\t\t\t\tnetif_stop_subqueue(tx_ring->netdev,\n+\t\t\t\t\t\t tx_ring->queue_index);\n+\n+\t\t\t/* we are about to reset, no point in enabling stuff */\n+\t\t\treturn true;\n+\t\t}\n+\t}\n+\n+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)\n+\tif (unlikely(total_packets &&\n+\t\t netif_carrier_ok(tx_ring->netdev) &&\n+\t\t igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {\n+\t\t/* Make sure that anybody stopping the queue after this\n+\t\t * sees the new next_to_clean.\n+\t\t */\n+\t\tsmp_mb();\n+\t\tif (__netif_subqueue_stopped(tx_ring->netdev,\n+\t\t\t\t\t tx_ring->queue_index) &&\n+\t\t !(test_bit(__IGC_DOWN, &adapter->state))) {\n+\t\t\tnetif_wake_subqueue(tx_ring->netdev,\n+\t\t\t\t\t tx_ring->queue_index);\n+\n+\t\t\tu64_stats_update_begin(&tx_ring->tx_syncp);\n+\t\t\ttx_ring->tx_stats.restart_queue++;\n+\t\t\tu64_stats_update_end(&tx_ring->tx_syncp);\n+\t\t}\n+\t}\n+\n+\treturn !!budget;\n+}\n+\n /**\n * igc_ioctl - I/O control method\n * @netdev: network interface device structure\n@@ -842,6 +1731,10 @@ static void igc_update_stats(struct igc_adapter *adapter)\n {\n }\n \n+static void igc_nfc_filter_exit(struct igc_adapter *adapter)\n+{\n+}\n+\n /**\n * igc_down - Close the interface\n * @adapter: board private structure\n@@ -849,21 +1742,83 @@ static void igc_update_stats(struct igc_adapter *adapter)\n static void igc_down(struct igc_adapter *adapter)\n {\n \tstruct net_device *netdev = adapter->netdev;\n+\tstruct igc_hw *hw = &adapter->hw;\n+\tu32 tctl, rctl;\n \tint i = 0;\n \n \tset_bit(__IGC_DOWN, &adapter->state);\n \n+\t/* disable receives in the hardware */\n+\trctl = rd32(IGC_RCTL);\n+\twr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);\n+\t/* flush and sleep below */\n+\n+\tigc_nfc_filter_exit(adapter);\n+\n \t/* set trans_start so we don't get spurious watchdogs during reset */\n \tnetif_trans_update(netdev);\n \n \tnetif_carrier_off(netdev);\n \tnetif_tx_stop_all_queues(netdev);\n \n-\tfor (i = 0; i < adapter->num_q_vectors; i++)\n-\t\tnapi_disable(&adapter->q_vector[i]->napi);\n+\t/* disable transmits in the hardware */\n+\ttctl = rd32(IGC_TCTL);\n+\ttctl &= ~IGC_TCTL_EN;\n+\twr32(IGC_TCTL, tctl);\n+\t/* flush both disables and wait for them to finish */\n+\twrfl();\n+\tusleep_range(10000, 20000);\n+\n+\tigc_irq_disable(adapter);\n+\n+\tadapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;\n+\n+\tfor (i = 0; i < adapter->num_q_vectors; i++) {\n+\t\tif (adapter->q_vector[i]) {\n+\t\t\tnapi_synchronize(&adapter->q_vector[i]->napi);\n+\t\t\tnapi_disable(&adapter->q_vector[i]->napi);\n+\t\t}\n+\t}\n+\n+\tdel_timer_sync(&adapter->watchdog_timer);\n+\tdel_timer_sync(&adapter->phy_info_timer);\n+\n+\t/* record the stats before reset*/\n+\tspin_lock(&adapter->stats64_lock);\n+\tigc_update_stats(adapter);\n+\tspin_unlock(&adapter->stats64_lock);\n \n \tadapter->link_speed = 0;\n \tadapter->link_duplex = 0;\n+\n+\tif (!pci_channel_offline(adapter->pdev))\n+\t\tigc_reset(adapter);\n+\n+\t/* clear VLAN promisc flag so VFTA will be updated if necessary */\n+\tadapter->flags &= ~IGC_FLAG_VLAN_PROMISC;\n+\n+\tigc_clean_all_tx_rings(adapter);\n+\tigc_clean_all_rx_rings(adapter);\n+}\n+\n+static void igc_reinit_locked(struct igc_adapter *adapter)\n+{\n+\tWARN_ON(in_interrupt());\n+\twhile (test_and_set_bit(__IGC_RESETTING, &adapter->state))\n+\t\tusleep_range(1000, 2000);\n+\tigc_down(adapter);\n+\tigc_up(adapter);\n+\tclear_bit(__IGC_RESETTING, &adapter->state);\n+}\n+\n+static void igc_reset_task(struct work_struct *work)\n+{\n+\tstruct igc_adapter *adapter;\n+\n+\tadapter = container_of(work, struct igc_adapter, reset_task);\n+\n+\tnetdev_err(adapter->netdev, \"Reset adapter\\n\");\n+\tigc_reinit_locked(adapter);\n }\n \n /**\n@@ -1321,6 +2276,15 @@ static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)\n }\n \n /**\n+ * igc_watchdog - Timer Call-back\n+ * @data: pointer to adapter cast into an unsigned long\n+ */\n+static void igc_watchdog(struct timer_list *t)\n+{\n+\tstruct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);\n+}\n+\n+/**\n * igc_update_ring_itr - update the dynamic ITR value based on packet size\n * @q_vector: pointer to q_vector\n *\n@@ -1637,9 +2601,13 @@ static int igc_poll(struct napi_struct *napi, int budget)\n \t\t\t\t\t\t napi);\n \tbool clean_complete = true;\n \tint work_done = 0;\n-\tint cleaned = 0;\n+\n+\tif (q_vector->tx.ring)\n+\t\tclean_complete = igc_clean_tx_irq(q_vector, budget);\n \n \tif (q_vector->rx.ring) {\n+\t\tint cleaned = igc_clean_rx_irq(q_vector, budget);\n+\n \t\twork_done += cleaned;\n \t\tif (cleaned >= budget)\n \t\t\tclean_complete = false;\n@@ -2403,6 +3371,14 @@ static int igc_probe(struct pci_dev *pdev,\n \tnetdev->min_mtu = ETH_MIN_MTU;\n \tnetdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;\n \n+\t/* configure RXPBSIZE and TXPBSIZE */\n+\twr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);\n+\twr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);\n+\n+\ttimer_setup(&adapter->watchdog_timer, igc_watchdog, 0);\n+\n+\tINIT_WORK(&adapter->reset_task, igc_reset_task);\n+\n \t/* reset the hardware with the new settings */\n \tigc_reset(adapter);\n \n@@ -2456,7 +3432,10 @@ static void igc_remove(struct pci_dev *pdev)\n \tstruct igc_adapter *adapter = netdev_priv(netdev);\n \n \tset_bit(__IGC_DOWN, &adapter->state);\n-\tflush_scheduled_work();\n+\n+\tdel_timer_sync(&adapter->watchdog_timer);\n+\n+\tcancel_work_sync(&adapter->reset_task);\n \n \t/* Release control of h/w to f/w. If f/w is AMT enabled, this\n \t * would have already happened in close and is redundant.\n@@ -2464,10 +3443,16 @@ static void igc_remove(struct pci_dev *pdev)\n \tigc_release_hw_control(adapter);\n \tunregister_netdev(netdev);\n \n-\tpci_release_selected_regions(pdev,\n-\t\t\t\t pci_select_bars(pdev, IORESOURCE_MEM));\n+\tigc_clear_interrupt_scheme(adapter);\n+\tpci_iounmap(pdev, adapter->io_addr);\n+\tpci_release_mem_regions(pdev);\n \n+\tkfree(adapter->mac_table);\n+\tkfree(adapter->shadow_vfta);\n \tfree_netdev(netdev);\n+\n+\tpci_disable_pcie_error_reporting(pdev);\n+\n \tpci_disable_device(pdev);\n }\n \n@@ -2478,6 +3463,39 @@ static struct pci_driver igc_driver = {\n \t.remove = igc_remove,\n };\n \n+static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,\n+\t\t\t\t const u32 max_rss_queues)\n+{\n+\t/* Determine if we need to pair queues. */\n+\t/* If rss_queues > half of max_rss_queues, pair the queues in\n+\t * order to conserve interrupts due to limited supply.\n+\t */\n+\tif (adapter->rss_queues > (max_rss_queues / 2))\n+\t\tadapter->flags |= IGC_FLAG_QUEUE_PAIRS;\n+\telse\n+\t\tadapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;\n+}\n+\n+static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)\n+{\n+\tunsigned int max_rss_queues;\n+\n+\t/* Determine the maximum number of RSS queues supported. */\n+\tmax_rss_queues = IGC_MAX_RX_QUEUES;\n+\n+\treturn max_rss_queues;\n+}\n+\n+static void igc_init_queue_configuration(struct igc_adapter *adapter)\n+{\n+\tu32 max_rss_queues;\n+\n+\tmax_rss_queues = igc_get_max_rss_queues(adapter);\n+\tadapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());\n+\n+\tigc_set_flag_queue_pairs(adapter, max_rss_queues);\n+}\n+\n /**\n * igc_sw_init - Initialize general software structures (struct igc_adapter)\n * @adapter: board private structure to initialize\n@@ -2492,21 +3510,38 @@ static int igc_sw_init(struct igc_adapter *adapter)\n \tstruct pci_dev *pdev = adapter->pdev;\n \tstruct igc_hw *hw = &adapter->hw;\n \n-\t/* PCI config space info */\n+\tint size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;\n \n-\thw->vendor_id = pdev->vendor;\n-\thw->device_id = pdev->device;\n-\thw->subsystem_vendor_id = pdev->subsystem_vendor;\n-\thw->subsystem_device_id = pdev->subsystem_device;\n+\tpci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);\n \n-\tpci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);\n+\t/* set default ring sizes */\n+\tadapter->tx_ring_count = IGC_DEFAULT_TXD;\n+\tadapter->rx_ring_count = IGC_DEFAULT_RXD;\n \n-\tpci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);\n+\t/* set default ITR values */\n+\tadapter->rx_itr_setting = IGC_DEFAULT_ITR;\n+\tadapter->tx_itr_setting = IGC_DEFAULT_ITR;\n+\n+\t/* set default work limits */\n+\tadapter->tx_work_limit = IGC_DEFAULT_TX_WORK;\n \n \t/* adjust max frame to be at least the size of a standard frame */\n \tadapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +\n-\t\t\t\t\tVLAN_HLEN;\n+\t\t\t\tVLAN_HLEN;\n+\tadapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;\n+\n+\tspin_lock_init(&adapter->nfc_lock);\n+\tspin_lock_init(&adapter->stats64_lock);\n+\t/* Assume MSI-X interrupts, will be checked during IRQ allocation */\n+\tadapter->flags |= IGC_FLAG_HAS_MSIX;\n+\n+\tadapter->mac_table = kzalloc(size, GFP_ATOMIC);\n+\tif (!adapter->mac_table)\n+\t\treturn -ENOMEM;\n+\n+\tigc_init_queue_configuration(adapter);\n \n+\t/* This call may decrease the number of queues */\n \tif (igc_init_interrupt_scheme(adapter, true)) {\n \t\tdev_err(&pdev->dev, \"Unable to allocate memory for queues\\n\");\n \t\treturn -ENOMEM;\n", "prefixes": [ "v8", "06/11" ] }