get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/963013/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 963013,
    "url": "http://patchwork.ozlabs.org/api/patches/963013/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180828155954.26416-1-sasha.neftin@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180828155954.26416-1-sasha.neftin@intel.com>",
    "list_archive_url": null,
    "date": "2018-08-28T15:59:54",
    "name": "[v7,04/11] igc: Add interrupt support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "ed63e460767fe308769cc020a1368b2d5c024fcb",
    "submitter": {
        "id": 69860,
        "url": "http://patchwork.ozlabs.org/api/people/69860/?format=api",
        "name": "Sasha Neftin",
        "email": "sasha.neftin@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180828155954.26416-1-sasha.neftin@intel.com/mbox/",
    "series": [
        {
            "id": 62908,
            "url": "http://patchwork.ozlabs.org/api/series/62908/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=62908",
            "date": "2018-08-28T15:59:52",
            "name": "[v7,01/11] igc: Add skeletal frame for Intel(R) 2.5G Ethernet Controller support.",
            "version": 7,
            "mbox": "http://patchwork.ozlabs.org/series/62908/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/963013/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/963013/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.136; helo=silver.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 420D3y0x09z9s1c\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 29 Aug 2018 02:00:14 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 91A4D2224A;\n\tTue, 28 Aug 2018 16:00:12 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 3MFaxkk1R8b7; Tue, 28 Aug 2018 16:00:07 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id D98AE220E5;\n\tTue, 28 Aug 2018 16:00:07 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id DC24B1C2272\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 28 Aug 2018 16:00:06 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id D8D3385692\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 28 Aug 2018 16:00:06 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id C6Az4dUk9Ipd for <intel-wired-lan@lists.osuosl.org>;\n\tTue, 28 Aug 2018 16:00:03 +0000 (UTC)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 4BA1785AE8\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 28 Aug 2018 16:00:02 +0000 (UTC)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t28 Aug 2018 08:59:57 -0700",
            "from ccdlinuxdev08.iil.intel.com ([143.185.161.150])\n\tby orsmga002.jf.intel.com with ESMTP; 28 Aug 2018 08:59:55 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.53,300,1531810800\"; d=\"scan'208\";a=\"87055409\"",
        "From": "Sasha Neftin <sasha.neftin@intel.com>",
        "To": "sasha.neftin@intel.com,\n\tintel-wired-lan@lists.osuosl.org",
        "Date": "Tue, 28 Aug 2018 18:59:54 +0300",
        "Message-Id": "<20180828155954.26416-1-sasha.neftin@intel.com>",
        "X-Mailer": "git-send-email 2.11.0",
        "Subject": "[Intel-wired-lan] [PATCH v7 04/11] igc: Add interrupt support",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "This patch set adds interrupt support for the igc interfaces.\n\nSasha Neftin (v2):\nfixed comments\n\nSasha Neftin (v3):\nminor cosmetic changes\n\nSasha Neftin (v4):\naddress comments\nmove IGC_NO_QUEUE to .h file\nadd header to irqreturn_t igc_msix_other method\nfix comment in igc_write_ivar method\nfix xmas tree layout\nfix code indentation\nreplace e1000_ prefix with igc_ prefix\nintroduce BIT() macro\n\nSasha Neftin (v5):\nfix xmas tree tree layout\nfix code indentation\n\nSasha Neftin (v6):\nminor cosmetic changes\n\nSasha Neftin (v7):\nfix comments spacing\n\nSigned-off-by: Sasha Neftin <sasha.neftin@intel.com>\n---\n drivers/net/ethernet/intel/igc/igc.h         |  131 ++++\n drivers/net/ethernet/intel/igc/igc_defines.h |   40 +\n drivers/net/ethernet/intel/igc/igc_hw.h      |   84 +++\n drivers/net/ethernet/intel/igc/igc_main.c    | 1030 ++++++++++++++++++++++++++\n 4 files changed, 1285 insertions(+)",
    "diff": "diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h\nindex cccc02b999c1..a279bf2716b9 100644\n--- a/drivers/net/ethernet/intel/igc/igc.h\n+++ b/drivers/net/ethernet/intel/igc/igc.h\n@@ -28,6 +28,17 @@\n extern char igc_driver_name[];\n extern char igc_driver_version[];\n \n+/* Interrupt defines */\n+#define IGC_START_ITR\t\t\t648 /* ~6000 ints/sec */\n+#define IGC_FLAG_HAS_MSI\t\tBIT(0)\n+#define IGC_FLAG_QUEUE_PAIRS\t\tBIT(4)\n+#define IGC_FLAG_HAS_MSIX\t\tBIT(13)\n+\n+#define IGC_START_ITR\t\t\t648 /* ~6000 ints/sec */\n+#define IGC_4K_ITR\t\t\t980\n+#define IGC_20K_ITR\t\t\t196\n+#define IGC_70K_ITR\t\t\t56\n+\n /* Transmit and receive queues */\n #define IGC_MAX_RX_QUEUES\t\t4\n #define IGC_MAX_TX_QUEUES\t\t4\n@@ -42,10 +53,100 @@ enum igc_state_t {\n \t__IGC_PTP_TX_IN_PROGRESS,\n };\n \n+struct igc_tx_queue_stats {\n+\tu64 packets;\n+\tu64 bytes;\n+\tu64 restart_queue;\n+};\n+\n+struct igc_rx_queue_stats {\n+\tu64 packets;\n+\tu64 bytes;\n+\tu64 drops;\n+\tu64 csum_err;\n+\tu64 alloc_failed;\n+};\n+\n+struct igc_rx_packet_stats {\n+\tu64 ipv4_packets;      /* IPv4 headers processed */\n+\tu64 ipv4e_packets;     /* IPv4E headers with extensions processed */\n+\tu64 ipv6_packets;      /* IPv6 headers processed */\n+\tu64 ipv6e_packets;     /* IPv6E headers with extensions processed */\n+\tu64 tcp_packets;       /* TCP headers processed */\n+\tu64 udp_packets;       /* UDP headers processed */\n+\tu64 sctp_packets;      /* SCTP headers processed */\n+\tu64 nfs_packets;       /* NFS headers processe */\n+\tu64 other_packets;\n+};\n+\n+struct igc_ring_container {\n+\tstruct igc_ring *ring;          /* pointer to linked list of rings */\n+\tunsigned int total_bytes;       /* total bytes processed this int */\n+\tunsigned int total_packets;     /* total packets processed this int */\n+\tu16 work_limit;                 /* total work allowed per interrupt */\n+\tu8 count;                       /* total number of rings in vector */\n+\tu8 itr;                         /* current ITR setting for ring */\n+};\n+\n+struct igc_ring {\n+\tstruct igc_q_vector *q_vector;  /* backlink to q_vector */\n+\tstruct net_device *netdev;      /* back pointer to net_device */\n+\tstruct device *dev;             /* device for dma mapping */\n+\tunion {                         /* array of buffer info structs */\n+\t\tstruct igc_tx_buffer *tx_buffer_info;\n+\t\tstruct igc_rx_buffer *rx_buffer_info;\n+\t};\n+\tvoid *desc;                     /* descriptor ring memory */\n+\tunsigned long flags;            /* ring specific flags */\n+\tvoid __iomem *tail;             /* pointer to ring tail register */\n+\tdma_addr_t dma;                 /* phys address of the ring */\n+\tunsigned int size;              /* length of desc. ring in bytes */\n+\n+\tu16 count;                      /* number of desc. in the ring */\n+\tu8 queue_index;                 /* logical index of the ring*/\n+\tu8 reg_idx;                     /* physical index of the ring */\n+\n+\t/* everything past this point are written often */\n+\tu16 next_to_clean;\n+\tu16 next_to_use;\n+\tu16 next_to_alloc;\n+\n+\tunion {\n+\t\t/* TX */\n+\t\tstruct {\n+\t\t\tstruct igc_tx_queue_stats tx_stats;\n+\t\t};\n+\t\t/* RX */\n+\t\tstruct {\n+\t\t\tstruct igc_rx_queue_stats rx_stats;\n+\t\t\tstruct igc_rx_packet_stats pkt_stats;\n+#ifdef CONFIG_IGC_DISABLE_PACKET_SPLIT\n+\t\t\tu16 rx_buffer_len;\n+#else\n+\t\t\tstruct sk_buff *skb;\n+#endif\n+\t\t};\n+\t};\n+} ____cacheline_internodealigned_in_smp;\n+\n struct igc_q_vector {\n \tstruct igc_adapter *adapter;    /* backlink */\n+\tvoid __iomem *itr_register;\n+\tu32 eims_value;                 /* EIMS mask value */\n+\n+\tu16 itr_val;\n+\tu8 set_itr;\n+\n+\tstruct igc_ring_container rx, tx;\n \n \tstruct napi_struct napi;\n+\n+\tstruct rcu_head rcu;    /* to avoid race with update stats on free */\n+\tchar name[IFNAMSIZ + 9];\n+\tstruct net_device poll_dev;\n+\n+\t/* for dynamic allocation of rings associated with this q_vector */\n+\tstruct igc_ring ring[0] ____cacheline_internodealigned_in_smp;\n };\n \n struct igc_mac_addr {\n@@ -65,13 +166,35 @@ struct igc_adapter {\n \tunsigned long state;\n \tunsigned int flags;\n \tunsigned int num_q_vectors;\n+\n+\tstruct msix_entry *msix_entries;\n+\n+\t/* TX */\n+\tu16 tx_work_limit;\n+\tint num_tx_queues;\n+\tstruct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];\n+\n+\t/* RX */\n+\tint num_rx_queues;\n+\tstruct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];\n+\n+\tstruct timer_list watchdog_timer;\n+\tstruct timer_list dma_err_timer;\n+\tstruct timer_list phy_info_timer;\n+\n \tu16 link_speed;\n \tu16 link_duplex;\n \n \tu8 port_num;\n \n \tu8 __iomem *io_addr;\n+\t/* Interrupt Throttle Rate */\n+\tu32 rx_itr_setting;\n+\tu32 tx_itr_setting;\n+\n+\tstruct work_struct reset_task;\n \tstruct work_struct watchdog_task;\n+\tstruct work_struct dma_err_task;\n \n \tint msg_enable;\n \tu32 max_frame_size;\n@@ -81,8 +204,16 @@ struct igc_adapter {\n \n \t/* structs defined in igc_hw.h */\n \tstruct igc_hw hw;\n+\tstruct igc_hw_stats stats;\n \n \tstruct igc_q_vector *q_vector[MAX_Q_VECTORS];\n+\tu32 eims_enable_mask;\n+\tu32 eims_other;\n+\n+\tu16 tx_ring_count;\n+\tu16 rx_ring_count;\n+\n+\tu32 rss_queues;\n \n \tstruct igc_mac_addr *mac_table;\n };\ndiff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h\nindex 3817cbc0a6ad..fcd14dfb7517 100644\n--- a/drivers/net/ethernet/intel/igc/igc_defines.h\n+++ b/drivers/net/ethernet/intel/igc/igc_defines.h\n@@ -45,4 +45,44 @@\n #define IGC_STATUS_SPEED_100\t0x00000040      /* Speed 100Mb/s */\n #define IGC_STATUS_SPEED_1000\t0x00000080      /* Speed 1000Mb/s */\n \n+/* Interrupt Cause Read */\n+#define IGC_ICR_TXDW\t\tBIT(0)\t/* Transmit desc written back */\n+#define IGC_ICR_TXQE\t\tBIT(1)\t/* Transmit Queue empty */\n+#define IGC_ICR_LSC\t\tBIT(2)\t/* Link Status Change */\n+#define IGC_ICR_RXSEQ\t\tBIT(3)\t/* Rx sequence error */\n+#define IGC_ICR_RXDMT0\t\tBIT(4)\t/* Rx desc min. threshold (0) */\n+#define IGC_ICR_RXO\t\tBIT(6)\t/* Rx overrun */\n+#define IGC_ICR_RXT0\t\tBIT(7)\t/* Rx timer intr (ring 0) */\n+#define IGC_ICR_DRSTA\t\tBIT(30)\t/* Device Reset Asserted */\n+#define IGC_ICS_RXT0\t\tIGC_ICR_RXT0      /* Rx timer intr */\n+\n+#define IMS_ENABLE_MASK ( \\\n+\tIGC_IMS_RXT0   |    \\\n+\tIGC_IMS_TXDW   |    \\\n+\tIGC_IMS_RXDMT0 |    \\\n+\tIGC_IMS_RXSEQ  |    \\\n+\tIGC_IMS_LSC)\n+\n+/* Interrupt Mask Set */\n+#define IGC_IMS_TXDW\t\tIGC_ICR_TXDW\t/* Tx desc written back */\n+#define IGC_IMS_RXSEQ\t\tIGC_ICR_RXSEQ\t/* Rx sequence error */\n+#define IGC_IMS_LSC\t\tIGC_ICR_LSC\t/* Link Status Change */\n+#define IGC_IMS_DOUTSYNC\tIGC_ICR_DOUTSYNC /* NIC DMA out of sync */\n+#define IGC_IMS_DRSTA\t\tIGC_ICR_DRSTA\t/* Device Reset Asserted */\n+#define IGC_IMS_RXT0\t\tIGC_ICR_RXT0\t/* Rx timer intr */\n+#define IGC_IMS_RXDMT0\t\tIGC_ICR_RXDMT0\t/* Rx desc min. threshold */\n+\n+#define IGC_QVECTOR_MASK\t0x7FFC\t\t/* Q-vector mask */\n+#define IGC_ITR_VAL_MASK\t0x04\t\t/* ITR value mask */\n+\n+#define IGC_ICR_DOUTSYNC\t0x10000000 /* NIC DMA out of sync */\n+#define IGC_EITR_CNT_IGNR\t0x80000000 /* Don't reset counters on write */\n+#define IGC_IVAR_VALID\t\t0x80\n+#define IGC_GPIE_NSICR\t\t0x00000001\n+#define IGC_GPIE_MSIX_MODE\t0x00000010\n+#define IGC_GPIE_EIAME\t\t0x40000000\n+#define IGC_GPIE_PBA\t\t0x80000000\n+\n+#define IGC_N0_QUEUE -1\n+\n #endif /* _IGC_DEFINES_H_ */\ndiff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h\nindex caed846fdb14..46e4e1aa860c 100644\n--- a/drivers/net/ethernet/intel/igc/igc_hw.h\n+++ b/drivers/net/ethernet/intel/igc/igc_hw.h\n@@ -114,6 +114,90 @@ struct igc_hw {\n \tu8  revision_id;\n };\n \n+/* Statistics counters collected by the MAC */\n+struct igc_hw_stats {\n+\tu64 crcerrs;\n+\tu64 algnerrc;\n+\tu64 symerrs;\n+\tu64 rxerrc;\n+\tu64 mpc;\n+\tu64 scc;\n+\tu64 ecol;\n+\tu64 mcc;\n+\tu64 latecol;\n+\tu64 colc;\n+\tu64 dc;\n+\tu64 tncrs;\n+\tu64 sec;\n+\tu64 cexterr;\n+\tu64 rlec;\n+\tu64 xonrxc;\n+\tu64 xontxc;\n+\tu64 xoffrxc;\n+\tu64 xofftxc;\n+\tu64 fcruc;\n+\tu64 prc64;\n+\tu64 prc127;\n+\tu64 prc255;\n+\tu64 prc511;\n+\tu64 prc1023;\n+\tu64 prc1522;\n+\tu64 gprc;\n+\tu64 bprc;\n+\tu64 mprc;\n+\tu64 gptc;\n+\tu64 gorc;\n+\tu64 gotc;\n+\tu64 rnbc;\n+\tu64 ruc;\n+\tu64 rfc;\n+\tu64 roc;\n+\tu64 rjc;\n+\tu64 mgprc;\n+\tu64 mgpdc;\n+\tu64 mgptc;\n+\tu64 tor;\n+\tu64 tot;\n+\tu64 tpr;\n+\tu64 tpt;\n+\tu64 ptc64;\n+\tu64 ptc127;\n+\tu64 ptc255;\n+\tu64 ptc511;\n+\tu64 ptc1023;\n+\tu64 ptc1522;\n+\tu64 mptc;\n+\tu64 bptc;\n+\tu64 tsctc;\n+\tu64 tsctfc;\n+\tu64 iac;\n+\tu64 icrxptc;\n+\tu64 icrxatc;\n+\tu64 ictxptc;\n+\tu64 ictxatc;\n+\tu64 ictxqec;\n+\tu64 ictxqmtc;\n+\tu64 icrxdmtc;\n+\tu64 icrxoc;\n+\tu64 cbtmpc;\n+\tu64 htdpmc;\n+\tu64 cbrdpc;\n+\tu64 cbrmpc;\n+\tu64 rpthc;\n+\tu64 hgptc;\n+\tu64 htcbdpc;\n+\tu64 hgorc;\n+\tu64 hgotc;\n+\tu64 lenerrs;\n+\tu64 scvpc;\n+\tu64 hrmpc;\n+\tu64 doosync;\n+\tu64 o2bgptc;\n+\tu64 o2bspc;\n+\tu64 b2ospc;\n+\tu64 b2ogprc;\n+};\n+\n /* These functions must be implemented by drivers */\n s32  igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);\n s32  igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);\ndiff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c\nindex 25d76bd5ba84..dcab1b10661c 100644\n--- a/drivers/net/ethernet/intel/igc/igc_main.c\n+++ b/drivers/net/ethernet/intel/igc/igc_main.c\n@@ -41,6 +41,36 @@ static int igc_sw_init(struct igc_adapter *);\n static void igc_configure(struct igc_adapter *adapter);\n static void igc_power_down_link(struct igc_adapter *adapter);\n static void igc_set_default_mac_filter(struct igc_adapter *adapter);\n+static irqreturn_t igc_msix_ring(int irq, void *data);\n+static void igc_write_itr(struct igc_q_vector *q_vector);\n+static int igc_request_msix(struct igc_adapter *adapter);\n+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);\n+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);\n+static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix);\n+static int igc_alloc_q_vectors(struct igc_adapter *adapter);\n+static int igc_poll(struct napi_struct *napi, int budget);\n+static void igc_set_interrupt_capability(struct igc_adapter *adapter,\n+\t\t\t\t\t bool msix);\n+static void igc_reset_interrupt_capability(struct igc_adapter *adapter);\n+static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx);\n+static void igc_clear_interrupt_scheme(struct igc_adapter *adapter);\n+static void igc_free_q_vectors(struct igc_adapter *adapter);\n+static void igc_irq_disable(struct igc_adapter *adapter);\n+static void igc_irq_enable(struct igc_adapter *adapter);\n+static void igc_configure_msix(struct igc_adapter *adapter);\n+static void igc_free_irq(struct igc_adapter *adapter);\n+static void igc_ring_irq_enable(struct igc_q_vector *q_vector);\n+static void igc_set_itr(struct igc_q_vector *q_vector);\n+static void igc_update_ring_itr(struct igc_q_vector *q_vector);\n+static void igc_update_itr(struct igc_q_vector *q_vector,\n+\t\t\t   struct igc_ring_container *ring_container);\n+\n+enum latency_range {\n+\tlowest_latency = 0,\n+\tlow_latency = 1,\n+\tbulk_latency = 2,\n+\tlatency_invalid = 255\n+};\n \n static void igc_reset(struct igc_adapter *adapter)\n {\n@@ -154,6 +184,7 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)\n  **/\n static void igc_up(struct igc_adapter *adapter)\n {\n+\tstruct igc_hw *hw = &adapter->hw;\n \tint i = 0;\n \n \t/* hardware has been reset, we need to reload some things */\n@@ -163,6 +194,15 @@ static void igc_up(struct igc_adapter *adapter)\n \n \tfor (i = 0; i < adapter->num_q_vectors; i++)\n \t\tnapi_enable(&adapter->q_vector[i]->napi);\n+\n+\tif (adapter->msix_entries)\n+\t\tigc_configure_msix(adapter);\n+\telse\n+\t\tigc_assign_vector(adapter->q_vector[0], 0);\n+\n+\t/* Clear any pending interrupts. */\n+\trd32(IGC_ICR);\n+\tigc_irq_enable(adapter);\n }\n \n /**\n@@ -310,6 +350,958 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter)\n }\n \n /**\n+ *  igc_msix_other - msix other interrupt handler\n+ *  @irq: interrupt number\n+ *  @data: pointer to a q_vector\n+ **/\n+static irqreturn_t igc_msix_other(int irq, void *data)\n+{\n+\tstruct igc_adapter *adapter = data;\n+\tstruct igc_hw *hw = &adapter->hw;\n+\tu32 icr = rd32(IGC_ICR);\n+\n+\t/* reading ICR causes bit 31 of EICR to be cleared */\n+\tif (icr & IGC_ICR_DRSTA)\n+\t\tschedule_work(&adapter->reset_task);\n+\n+\tif (icr & IGC_ICR_DOUTSYNC) {\n+\t\t/* HW is reporting DMA is out of sync */\n+\t\tadapter->stats.doosync++;\n+\t}\n+\n+\tif (icr & IGC_ICR_LSC) {\n+\t\thw->mac.get_link_status = 1;\n+\t\t/* guard against interrupt when we're going down */\n+\t\tif (!test_bit(__IGC_DOWN, &adapter->state))\n+\t\t\tmod_timer(&adapter->watchdog_timer, jiffies + 1);\n+\t}\n+\n+\twr32(IGC_EIMS, adapter->eims_other);\n+\n+\treturn IRQ_HANDLED;\n+}\n+\n+/**\n+ *  igc_write_ivar - configure ivar for given MSI-X vector\n+ *  @hw: pointer to the HW structure\n+ *  @msix_vector: vector number we are allocating to a given ring\n+ *  @index: row index of IVAR register to write within IVAR table\n+ *  @offset: column offset of in IVAR, should be multiple of 8\n+ *\n+ *  The IVAR table consists of 2 columns,\n+ *  each containing an cause allocation for an Rx and Tx ring, and a\n+ *  variable number of rows depending on the number of queues supported.\n+ **/\n+static void igc_write_ivar(struct igc_hw *hw, int msix_vector,\n+\t\t\t   int index, int offset)\n+{\n+\tu32 ivar = array_rd32(IGC_IVAR0, index);\n+\n+\t/* clear any bits that are currently set */\n+\tivar &= ~((u32)0xFF << offset);\n+\n+\t/* write vector and valid bit */\n+\tivar |= (msix_vector | IGC_IVAR_VALID) << offset;\n+\n+\tarray_wr32(IGC_IVAR0, index, ivar);\n+}\n+\n+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)\n+{\n+\tstruct igc_adapter *adapter = q_vector->adapter;\n+\tstruct igc_hw *hw = &adapter->hw;\n+\tint rx_queue = IGC_N0_QUEUE;\n+\tint tx_queue = IGC_N0_QUEUE;\n+\n+\tif (q_vector->rx.ring)\n+\t\trx_queue = q_vector->rx.ring->reg_idx;\n+\tif (q_vector->tx.ring)\n+\t\ttx_queue = q_vector->tx.ring->reg_idx;\n+\n+\tswitch (hw->mac.type) {\n+\tcase igc_i225:\n+\t\tif (rx_queue > IGC_N0_QUEUE)\n+\t\t\tigc_write_ivar(hw, msix_vector,\n+\t\t\t\t       rx_queue >> 1,\n+\t\t\t\t       (rx_queue & 0x1) << 4);\n+\t\tif (tx_queue > IGC_N0_QUEUE)\n+\t\t\tigc_write_ivar(hw, msix_vector,\n+\t\t\t\t       tx_queue >> 1,\n+\t\t\t\t       ((tx_queue & 0x1) << 4) + 8);\n+\t\tq_vector->eims_value = BIT(msix_vector);\n+\t\tbreak;\n+\tdefault:\n+\t\tWARN_ONCE(hw->mac.type != igc_i225, \"Wrong MAC type\\n\");\n+\t\tbreak;\n+\t}\n+\n+\t/* add q_vector eims value to global eims_enable_mask */\n+\tadapter->eims_enable_mask |= q_vector->eims_value;\n+\n+\t/* configure q_vector to set itr on first interrupt */\n+\tq_vector->set_itr = 1;\n+}\n+\n+/**\n+ *  igc_configure_msix - Configure MSI-X hardware\n+ *  @adapter: Pointer to adapter structure\n+ *\n+ *  igc_configure_msix sets up the hardware to properly\n+ *  generate MSI-X interrupts.\n+ **/\n+static void igc_configure_msix(struct igc_adapter *adapter)\n+{\n+\tstruct igc_hw *hw = &adapter->hw;\n+\tint i, vector = 0;\n+\tu32 tmp;\n+\n+\tadapter->eims_enable_mask = 0;\n+\n+\t/* set vector for other causes, i.e. link changes */\n+\tswitch (hw->mac.type) {\n+\tcase igc_i225:\n+\t\t/* Turn on MSI-X capability first, or our settings\n+\t\t * won't stick.  And it will take days to debug.\n+\t\t */\n+\t\twr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |\n+\t\t     IGC_GPIE_PBA | IGC_GPIE_EIAME |\n+\t\t     IGC_GPIE_NSICR);\n+\n+\t\t/* enable msix_other interrupt */\n+\t\tadapter->eims_other = BIT(vector);\n+\t\ttmp = (vector++ | IGC_IVAR_VALID) << 8;\n+\n+\t\twr32(IGC_IVAR_MISC, tmp);\n+\t\tbreak;\n+\tdefault:\n+\t\t/* do nothing, since nothing else supports MSI-X */\n+\t\tbreak;\n+\t} /* switch (hw->mac.type) */\n+\n+\tadapter->eims_enable_mask |= adapter->eims_other;\n+\n+\tfor (i = 0; i < adapter->num_q_vectors; i++)\n+\t\tigc_assign_vector(adapter->q_vector[i], vector++);\n+\n+\twrfl();\n+}\n+\n+/**\n+ *  igc_request_msix - Initialize MSI-X interrupts\n+ *  @adapter: Pointer to adapter structure\n+ *\n+ *  igc_request_msix allocates MSI-X vectors and requests interrupts from the\n+ *  kernel.\n+ **/\n+static int igc_request_msix(struct igc_adapter *adapter)\n+{\n+\tint i = 0, err = 0, vector = 0, free_vector = 0;\n+\tstruct net_device *netdev = adapter->netdev;\n+\n+\terr = request_irq(adapter->msix_entries[vector].vector,\n+\t\t\t  &igc_msix_other, 0, netdev->name, adapter);\n+\tif (err)\n+\t\tgoto err_out;\n+\n+\tfor (i = 0; i < adapter->num_q_vectors; i++) {\n+\t\tstruct igc_q_vector *q_vector = adapter->q_vector[i];\n+\n+\t\tvector++;\n+\n+\t\tq_vector->itr_register = adapter->io_addr + IGC_EITR(vector);\n+\n+\t\tif (q_vector->rx.ring && q_vector->tx.ring)\n+\t\t\tsprintf(q_vector->name, \"%s-TxRx-%u\", netdev->name,\n+\t\t\t\tq_vector->rx.ring->queue_index);\n+\t\telse if (q_vector->tx.ring)\n+\t\t\tsprintf(q_vector->name, \"%s-tx-%u\", netdev->name,\n+\t\t\t\tq_vector->tx.ring->queue_index);\n+\t\telse if (q_vector->rx.ring)\n+\t\t\tsprintf(q_vector->name, \"%s-rx-%u\", netdev->name,\n+\t\t\t\tq_vector->rx.ring->queue_index);\n+\t\telse\n+\t\t\tsprintf(q_vector->name, \"%s-unused\", netdev->name);\n+\n+\t\terr = request_irq(adapter->msix_entries[vector].vector,\n+\t\t\t\t  igc_msix_ring, 0, q_vector->name,\n+\t\t\t\t  q_vector);\n+\t\tif (err)\n+\t\t\tgoto err_free;\n+\t}\n+\n+\tigc_configure_msix(adapter);\n+\treturn 0;\n+\n+err_free:\n+\t/* free already assigned IRQs */\n+\tfree_irq(adapter->msix_entries[free_vector++].vector, adapter);\n+\n+\tvector--;\n+\tfor (i = 0; i < vector; i++) {\n+\t\tfree_irq(adapter->msix_entries[free_vector++].vector,\n+\t\t\t adapter->q_vector[i]);\n+\t}\n+err_out:\n+\treturn err;\n+}\n+\n+/**\n+ *  igc_reset_q_vector - Reset config for interrupt vector\n+ *  @adapter: board private structure to initialize\n+ *  @v_idx: Index of vector to be reset\n+ *\n+ *  If NAPI is enabled it will delete any references to the\n+ *  NAPI struct. This is preparation for igc_free_q_vector.\n+ **/\n+static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)\n+{\n+\tstruct igc_q_vector *q_vector = adapter->q_vector[v_idx];\n+\n+\t/* if we're coming from igc_set_interrupt_capability, the vectors are\n+\t * not yet allocated\n+\t */\n+\tif (!q_vector)\n+\t\treturn;\n+\n+\tif (q_vector->tx.ring)\n+\t\tadapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;\n+\n+\tif (q_vector->rx.ring)\n+\t\tadapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;\n+\n+\tnetif_napi_del(&q_vector->napi);\n+}\n+\n+static void igc_reset_interrupt_capability(struct igc_adapter *adapter)\n+{\n+\tint v_idx = adapter->num_q_vectors;\n+\n+\tif (adapter->msix_entries) {\n+\t\tpci_disable_msix(adapter->pdev);\n+\t\tkfree(adapter->msix_entries);\n+\t\tadapter->msix_entries = NULL;\n+\t} else if (adapter->flags & IGC_FLAG_HAS_MSI) {\n+\t\tpci_disable_msi(adapter->pdev);\n+\t}\n+\n+\twhile (v_idx--)\n+\t\tigc_reset_q_vector(adapter, v_idx);\n+}\n+\n+/**\n+ *  igc_clear_interrupt_scheme - reset the device to a state of no interrupts\n+ *  @adapter: Pointer to adapter structure\n+ *\n+ *  This function resets the device so that it has 0 rx queues, tx queues, and\n+ *  MSI-X interrupts allocated.\n+ */\n+static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)\n+{\n+\tigc_free_q_vectors(adapter);\n+\tigc_reset_interrupt_capability(adapter);\n+}\n+\n+/**\n+ *  igc_free_q_vectors - Free memory allocated for interrupt vectors\n+ *  @adapter: board private structure to initialize\n+ *\n+ *  This function frees the memory allocated to the q_vectors.  In addition if\n+ *  NAPI is enabled it will delete any references to the NAPI struct prior\n+ *  to freeing the q_vector.\n+ **/\n+static void igc_free_q_vectors(struct igc_adapter *adapter)\n+{\n+\tint v_idx = adapter->num_q_vectors;\n+\n+\tadapter->num_tx_queues = 0;\n+\tadapter->num_rx_queues = 0;\n+\tadapter->num_q_vectors = 0;\n+\n+\twhile (v_idx--) {\n+\t\tigc_reset_q_vector(adapter, v_idx);\n+\t\tigc_free_q_vector(adapter, v_idx);\n+\t}\n+}\n+\n+/**\n+ *  igc_free_q_vector - Free memory allocated for specific interrupt vector\n+ *  @adapter: board private structure to initialize\n+ *  @v_idx: Index of vector to be freed\n+ *\n+ *  This function frees the memory allocated to the q_vector.\n+ **/\n+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)\n+{\n+\tstruct igc_q_vector *q_vector = adapter->q_vector[v_idx];\n+\n+\tadapter->q_vector[v_idx] = NULL;\n+\n+\t/* igc_get_stats64() might access the rings on this vector,\n+\t * we must wait a grace period before freeing it.\n+\t */\n+\tif (q_vector)\n+\t\tkfree_rcu(q_vector, rcu);\n+}\n+\n+/**\n+ *  igc_update_ring_itr - update the dynamic ITR value based on packet size\n+ *  @q_vector: pointer to q_vector\n+ *\n+ *  Stores a new ITR value based on strictly on packet size.  This\n+ *  algorithm is less sophisticated than that used in igc_update_itr,\n+ *  due to the difficulty of synchronizing statistics across multiple\n+ *  receive rings.  The divisors and thresholds used by this function\n+ *  were determined based on theoretical maximum wire speed and testing\n+ *  data, in order to minimize response time while increasing bulk\n+ *  throughput.\n+ *  NOTE: This function is called only when operating in a multiqueue\n+ *  receive environment.\n+ **/\n+static void igc_update_ring_itr(struct igc_q_vector *q_vector)\n+{\n+\tstruct igc_adapter *adapter = q_vector->adapter;\n+\tint new_val = q_vector->itr_val;\n+\tint avg_wire_size = 0;\n+\tunsigned int packets;\n+\n+\t/* For non-gigabit speeds, just fix the interrupt rate at 4000\n+\t * ints/sec - ITR timer value of 120 ticks.\n+\t */\n+\tswitch (adapter->link_speed) {\n+\tcase SPEED_10:\n+\tcase SPEED_100:\n+\t\tnew_val = IGC_4K_ITR;\n+\t\tgoto set_itr_val;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tpackets = q_vector->rx.total_packets;\n+\tif (packets)\n+\t\tavg_wire_size = q_vector->rx.total_bytes / packets;\n+\n+\tpackets = q_vector->tx.total_packets;\n+\tif (packets)\n+\t\tavg_wire_size = max_t(u32, avg_wire_size,\n+\t\t\t\t      q_vector->tx.total_bytes / packets);\n+\n+\t/* if avg_wire_size isn't set no work was done */\n+\tif (!avg_wire_size)\n+\t\tgoto clear_counts;\n+\n+\t/* Add 24 bytes to size to account for CRC, preamble, and gap */\n+\tavg_wire_size += 24;\n+\n+\t/* Don't starve jumbo frames */\n+\tavg_wire_size = min(avg_wire_size, 3000);\n+\n+\t/* Give a little boost to mid-size frames */\n+\tif (avg_wire_size > 300 && avg_wire_size < 1200)\n+\t\tnew_val = avg_wire_size / 3;\n+\telse\n+\t\tnew_val = avg_wire_size / 2;\n+\n+\t/* conservative mode (itr 3) eliminates the lowest_latency setting */\n+\tif (new_val < IGC_20K_ITR &&\n+\t    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||\n+\t    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))\n+\t\tnew_val = IGC_20K_ITR;\n+\n+set_itr_val:\n+\tif (new_val != q_vector->itr_val) {\n+\t\tq_vector->itr_val = new_val;\n+\t\tq_vector->set_itr = 1;\n+\t}\n+clear_counts:\n+\tq_vector->rx.total_bytes = 0;\n+\tq_vector->rx.total_packets = 0;\n+\tq_vector->tx.total_bytes = 0;\n+\tq_vector->tx.total_packets = 0;\n+}\n+\n+/**\n+ *  igc_update_itr - update the dynamic ITR value based on statistics\n+ *  @q_vector: pointer to q_vector\n+ *  @ring_container: ring info to update the itr for\n+ *\n+ *  Stores a new ITR value based on packets and byte\n+ *  counts during the last interrupt.  The advantage of per interrupt\n+ *  computation is faster updates and more accurate ITR for the current\n+ *  traffic pattern.  Constants in this function were computed\n+ *  based on theoretical maximum wire speed and thresholds were set based\n+ *  on testing data as well as attempting to minimize response time\n+ *  while increasing bulk throughput.\n+ *  NOTE: These calculations are only valid when operating in a single-\n+ *  queue environment.\n+ **/\n+static void igc_update_itr(struct igc_q_vector *q_vector,\n+\t\t\t   struct igc_ring_container *ring_container)\n+{\n+\tunsigned int packets = ring_container->total_packets;\n+\tunsigned int bytes = ring_container->total_bytes;\n+\tu8 itrval = ring_container->itr;\n+\n+\t/* no packets, exit with status unchanged */\n+\tif (packets == 0)\n+\t\treturn;\n+\n+\tswitch (itrval) {\n+\tcase lowest_latency:\n+\t\t/* handle TSO and jumbo frames */\n+\t\tif (bytes / packets > 8000)\n+\t\t\titrval = bulk_latency;\n+\t\telse if ((packets < 5) && (bytes > 512))\n+\t\t\titrval = low_latency;\n+\t\tbreak;\n+\tcase low_latency:  /* 50 usec aka 20000 ints/s */\n+\t\tif (bytes > 10000) {\n+\t\t\t/* this if handles the TSO accounting */\n+\t\t\tif (bytes / packets > 8000)\n+\t\t\t\titrval = bulk_latency;\n+\t\t\telse if ((packets < 10) || ((bytes / packets) > 1200))\n+\t\t\t\titrval = bulk_latency;\n+\t\t\telse if ((packets > 35))\n+\t\t\t\titrval = lowest_latency;\n+\t\t} else if (bytes / packets > 2000) {\n+\t\t\titrval = bulk_latency;\n+\t\t} else if (packets <= 2 && bytes < 512) {\n+\t\t\titrval = lowest_latency;\n+\t\t}\n+\t\tbreak;\n+\tcase bulk_latency: /* 250 usec aka 4000 ints/s */\n+\t\tif (bytes > 25000) {\n+\t\t\tif (packets > 35)\n+\t\t\t\titrval = low_latency;\n+\t\t} else if (bytes < 1500) {\n+\t\t\titrval = low_latency;\n+\t\t}\n+\t\tbreak;\n+\t}\n+\n+\t/* clear work counters since we have the values we need */\n+\tring_container->total_bytes = 0;\n+\tring_container->total_packets = 0;\n+\n+\t/* write updated itr to ring container */\n+\tring_container->itr = itrval;\n+}\n+\n+static void igc_ring_irq_enable(struct igc_q_vector *q_vector)\n+{\n+\tstruct igc_adapter *adapter = q_vector->adapter;\n+\tstruct igc_hw *hw = &adapter->hw;\n+\n+\tif ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||\n+\t    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {\n+\t\tif (adapter->num_q_vectors == 1)\n+\t\t\tigc_set_itr(q_vector);\n+\t\telse\n+\t\t\tigc_update_ring_itr(q_vector);\n+\t}\n+\n+\tif (!test_bit(__IGC_DOWN, &adapter->state)) {\n+\t\tif (adapter->msix_entries)\n+\t\t\twr32(IGC_EIMS, q_vector->eims_value);\n+\t\telse\n+\t\t\tigc_irq_enable(adapter);\n+\t}\n+}\n+\n+static void igc_set_itr(struct igc_q_vector *q_vector)\n+{\n+\tstruct igc_adapter *adapter = q_vector->adapter;\n+\tu32 new_itr = q_vector->itr_val;\n+\tu8 current_itr = 0;\n+\n+\t/* for non-gigabit speeds, just fix the interrupt rate at 4000 */\n+\tswitch (adapter->link_speed) {\n+\tcase SPEED_10:\n+\tcase SPEED_100:\n+\t\tcurrent_itr = 0;\n+\t\tnew_itr = IGC_4K_ITR;\n+\t\tgoto set_itr_now;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tigc_update_itr(q_vector, &q_vector->tx);\n+\tigc_update_itr(q_vector, &q_vector->rx);\n+\n+\tcurrent_itr = max(q_vector->rx.itr, q_vector->tx.itr);\n+\n+\t/* conservative mode (itr 3) eliminates the lowest_latency setting */\n+\tif (current_itr == lowest_latency &&\n+\t    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||\n+\t    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))\n+\t\tcurrent_itr = low_latency;\n+\n+\tswitch (current_itr) {\n+\t/* counts and packets in update_itr are dependent on these numbers */\n+\tcase lowest_latency:\n+\t\tnew_itr = IGC_70K_ITR; /* 70,000 ints/sec */\n+\t\tbreak;\n+\tcase low_latency:\n+\t\tnew_itr = IGC_20K_ITR; /* 20,000 ints/sec */\n+\t\tbreak;\n+\tcase bulk_latency:\n+\t\tnew_itr = IGC_4K_ITR;  /* 4,000 ints/sec */\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+set_itr_now:\n+\tif (new_itr != q_vector->itr_val) {\n+\t\t/* this attempts to bias the interrupt rate towards Bulk\n+\t\t * by adding intermediate steps when interrupt rate is\n+\t\t * increasing\n+\t\t */\n+\t\tnew_itr = new_itr > q_vector->itr_val ?\n+\t\t\t  max((new_itr * q_vector->itr_val) /\n+\t\t\t  (new_itr + (q_vector->itr_val >> 2)),\n+\t\t\t  new_itr) : new_itr;\n+\t\t/* Don't write the value here; it resets the adapter's\n+\t\t * internal timer, and causes us to delay far longer than\n+\t\t * we should between interrupts.  Instead, we write the ITR\n+\t\t * value at the beginning of the next interrupt so the timing\n+\t\t * ends up being correct.\n+\t\t */\n+\t\tq_vector->itr_val = new_itr;\n+\t\tq_vector->set_itr = 1;\n+\t}\n+}\n+\n+/**\n+ *  igc_poll - NAPI Rx polling callback\n+ *  @napi: napi polling structure\n+ *  @budget: count of how many packets we should handle\n+ **/\n+static int igc_poll(struct napi_struct *napi, int budget)\n+{\n+\tstruct igc_q_vector *q_vector = container_of(napi,\n+\t\t\t\t\t\t     struct igc_q_vector,\n+\t\t\t\t\t\t     napi);\n+\tbool clean_complete = true;\n+\tint work_done = 0;\n+\tint cleaned = 0;\n+\n+\tif (q_vector->rx.ring) {\n+\t\twork_done += cleaned;\n+\t\tif (cleaned >= budget)\n+\t\t\tclean_complete = false;\n+\t}\n+\n+\t/* If all work not completed, return budget and keep polling */\n+\tif (!clean_complete)\n+\t\treturn budget;\n+\n+\t/* If not enough Rx work done, exit the polling mode */\n+\tnapi_complete_done(napi, work_done);\n+\tigc_ring_irq_enable(q_vector);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ *  igc_set_interrupt_capability - set MSI or MSI-X if supported\n+ *  @adapter: Pointer to adapter structure\n+ *\n+ *  Attempt to configure interrupts using the best available\n+ *  capabilities of the hardware and kernel.\n+ **/\n+static void igc_set_interrupt_capability(struct igc_adapter *adapter,\n+\t\t\t\t\t bool msix)\n+{\n+\tint numvecs, i;\n+\tint err;\n+\n+\tif (!msix)\n+\t\tgoto msi_only;\n+\tadapter->flags |= IGC_FLAG_HAS_MSIX;\n+\n+\t/* Number of supported queues. */\n+\tadapter->num_rx_queues = adapter->rss_queues;\n+\n+\tadapter->num_tx_queues = adapter->rss_queues;\n+\n+\t/* start with one vector for every Rx queue */\n+\tnumvecs = adapter->num_rx_queues;\n+\n+\t/* if Tx handler is separate add 1 for every Tx queue */\n+\tif (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))\n+\t\tnumvecs += adapter->num_tx_queues;\n+\n+\t/* store the number of vectors reserved for queues */\n+\tadapter->num_q_vectors = numvecs;\n+\n+\t/* add 1 vector for link status interrupts */\n+\tnumvecs++;\n+\n+\tadapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),\n+\t\t\t\t\tGFP_KERNEL);\n+\n+\tif (!adapter->msix_entries)\n+\t\treturn;\n+\n+\t/* populate entry values */\n+\tfor (i = 0; i < numvecs; i++)\n+\t\tadapter->msix_entries[i].entry = i;\n+\n+\terr = pci_enable_msix_range(adapter->pdev,\n+\t\t\t\t    adapter->msix_entries,\n+\t\t\t\t    numvecs,\n+\t\t\t\t    numvecs);\n+\tif (err > 0)\n+\t\treturn;\n+\n+\tkfree(adapter->msix_entries);\n+\tadapter->msix_entries = NULL;\n+\n+\tigc_reset_interrupt_capability(adapter);\n+\n+msi_only:\n+\tadapter->flags &= ~IGC_FLAG_HAS_MSIX;\n+\n+\tadapter->rss_queues = 1;\n+\tadapter->flags |= IGC_FLAG_QUEUE_PAIRS;\n+\tadapter->num_rx_queues = 1;\n+\tadapter->num_tx_queues = 1;\n+\tadapter->num_q_vectors = 1;\n+\tif (!pci_enable_msi(adapter->pdev))\n+\t\tadapter->flags |= IGC_FLAG_HAS_MSI;\n+}\n+\n+static void igc_add_ring(struct igc_ring *ring,\n+\t\t\t struct igc_ring_container *head)\n+{\n+\thead->ring = ring;\n+\thead->count++;\n+}\n+\n+/**\n+ *  igc_alloc_q_vector - Allocate memory for a single interrupt vector\n+ *  @adapter: board private structure to initialize\n+ *  @v_count: q_vectors allocated on adapter, used for ring interleaving\n+ *  @v_idx: index of vector in adapter struct\n+ *  @txr_count: total number of Tx rings to allocate\n+ *  @txr_idx: index of first Tx ring to allocate\n+ *  @rxr_count: total number of Rx rings to allocate\n+ *  @rxr_idx: index of first Rx ring to allocate\n+ *\n+ *  We allocate one q_vector.  If allocation fails we return -ENOMEM.\n+ **/\n+static int igc_alloc_q_vector(struct igc_adapter *adapter,\n+\t\t\t      unsigned int v_count, unsigned int v_idx,\n+\t\t\t      unsigned int txr_count, unsigned int txr_idx,\n+\t\t\t      unsigned int rxr_count, unsigned int rxr_idx)\n+{\n+\tstruct igc_q_vector *q_vector;\n+\tstruct igc_ring *ring;\n+\tint ring_count, size;\n+\n+\t/* igc only supports 1 Tx and/or 1 Rx queue per vector */\n+\tif (txr_count > 1 || rxr_count > 1)\n+\t\treturn -ENOMEM;\n+\n+\tring_count = txr_count + rxr_count;\n+\tsize = sizeof(struct igc_q_vector) +\n+\t\t(sizeof(struct igc_ring) * ring_count);\n+\n+\t/* allocate q_vector and rings */\n+\tq_vector = adapter->q_vector[v_idx];\n+\tif (!q_vector)\n+\t\tq_vector = kzalloc(size, GFP_KERNEL);\n+\telse\n+\t\tmemset(q_vector, 0, size);\n+\tif (!q_vector)\n+\t\treturn -ENOMEM;\n+\n+\t/* initialize NAPI */\n+\tnetif_napi_add(adapter->netdev, &q_vector->napi,\n+\t\t       igc_poll, 64);\n+\n+\t/* tie q_vector and adapter together */\n+\tadapter->q_vector[v_idx] = q_vector;\n+\tq_vector->adapter = adapter;\n+\n+\t/* initialize work limits */\n+\tq_vector->tx.work_limit = adapter->tx_work_limit;\n+\n+\t/* initialize ITR configuration */\n+\tq_vector->itr_register = adapter->io_addr + IGC_EITR(0);\n+\tq_vector->itr_val = IGC_START_ITR;\n+\n+\t/* initialize pointer to rings */\n+\tring = q_vector->ring;\n+\n+\t/* initialize ITR */\n+\tif (rxr_count) {\n+\t\t/* rx or rx/tx vector */\n+\t\tif (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)\n+\t\t\tq_vector->itr_val = adapter->rx_itr_setting;\n+\t} else {\n+\t\t/* tx only vector */\n+\t\tif (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)\n+\t\t\tq_vector->itr_val = adapter->tx_itr_setting;\n+\t}\n+\n+\tif (txr_count) {\n+\t\t/* assign generic ring traits */\n+\t\tring->dev = &adapter->pdev->dev;\n+\t\tring->netdev = adapter->netdev;\n+\n+\t\t/* configure backlink on ring */\n+\t\tring->q_vector = q_vector;\n+\n+\t\t/* update q_vector Tx values */\n+\t\tigc_add_ring(ring, &q_vector->tx);\n+\n+\t\t/* apply Tx specific ring traits */\n+\t\tring->count = adapter->tx_ring_count;\n+\t\tring->queue_index = txr_idx;\n+\n+\t\t/* assign ring to adapter */\n+\t\tadapter->tx_ring[txr_idx] = ring;\n+\n+\t\t/* push pointer to next ring */\n+\t\tring++;\n+\t}\n+\n+\tif (rxr_count) {\n+\t\t/* assign generic ring traits */\n+\t\tring->dev = &adapter->pdev->dev;\n+\t\tring->netdev = adapter->netdev;\n+\n+\t\t/* configure backlink on ring */\n+\t\tring->q_vector = q_vector;\n+\n+\t\t/* update q_vector Rx values */\n+\t\tigc_add_ring(ring, &q_vector->rx);\n+\n+\t\t/* apply Rx specific ring traits */\n+\t\tring->count = adapter->rx_ring_count;\n+\t\tring->queue_index = rxr_idx;\n+\n+\t\t/* assign ring to adapter */\n+\t\tadapter->rx_ring[rxr_idx] = ring;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ *  igc_alloc_q_vectors - Allocate memory for interrupt vectors\n+ *  @adapter: board private structure to initialize\n+ *\n+ *  We allocate one q_vector per queue interrupt.  If allocation fails we\n+ *  return -ENOMEM.\n+ **/\n+static int igc_alloc_q_vectors(struct igc_adapter *adapter)\n+{\n+\tint rxr_remaining = adapter->num_rx_queues;\n+\tint txr_remaining = adapter->num_tx_queues;\n+\tint rxr_idx = 0, txr_idx = 0, v_idx = 0;\n+\tint q_vectors = adapter->num_q_vectors;\n+\tint err;\n+\n+\tif (q_vectors >= (rxr_remaining + txr_remaining)) {\n+\t\tfor (; rxr_remaining; v_idx++) {\n+\t\t\terr = igc_alloc_q_vector(adapter, q_vectors, v_idx,\n+\t\t\t\t\t\t 0, 0, 1, rxr_idx);\n+\n+\t\t\tif (err)\n+\t\t\t\tgoto err_out;\n+\n+\t\t\t/* update counts and index */\n+\t\t\trxr_remaining--;\n+\t\t\trxr_idx++;\n+\t\t}\n+\t}\n+\n+\tfor (; v_idx < q_vectors; v_idx++) {\n+\t\tint rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);\n+\t\tint tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);\n+\n+\t\terr = igc_alloc_q_vector(adapter, q_vectors, v_idx,\n+\t\t\t\t\t tqpv, txr_idx, rqpv, rxr_idx);\n+\n+\t\tif (err)\n+\t\t\tgoto err_out;\n+\n+\t\t/* update counts and index */\n+\t\trxr_remaining -= rqpv;\n+\t\ttxr_remaining -= tqpv;\n+\t\trxr_idx++;\n+\t\ttxr_idx++;\n+\t}\n+\n+\treturn 0;\n+\n+err_out:\n+\tadapter->num_tx_queues = 0;\n+\tadapter->num_rx_queues = 0;\n+\tadapter->num_q_vectors = 0;\n+\n+\twhile (v_idx--)\n+\t\tigc_free_q_vector(adapter, v_idx);\n+\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ *  igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors\n+ *  @adapter: Pointer to adapter structure\n+ *\n+ *  This function initializes the interrupts and allocates all of the queues.\n+ **/\n+static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)\n+{\n+\tstruct pci_dev *pdev = adapter->pdev;\n+\tint err = 0;\n+\n+\tigc_set_interrupt_capability(adapter, msix);\n+\n+\terr = igc_alloc_q_vectors(adapter);\n+\tif (err) {\n+\t\tdev_err(&pdev->dev, \"Unable to allocate memory for vectors\\n\");\n+\t\tgoto err_alloc_q_vectors;\n+\t}\n+\n+\treturn 0;\n+\n+err_alloc_q_vectors:\n+\tigc_reset_interrupt_capability(adapter);\n+\treturn err;\n+}\n+\n+static void igc_free_irq(struct igc_adapter *adapter)\n+{\n+\tif (adapter->msix_entries) {\n+\t\tint vector = 0, i;\n+\n+\t\tfree_irq(adapter->msix_entries[vector++].vector, adapter);\n+\n+\t\tfor (i = 0; i < adapter->num_q_vectors; i++)\n+\t\t\tfree_irq(adapter->msix_entries[vector++].vector,\n+\t\t\t\t adapter->q_vector[i]);\n+\t} else {\n+\t\tfree_irq(adapter->pdev->irq, adapter);\n+\t}\n+}\n+\n+/**\n+ *  igc_irq_disable - Mask off interrupt generation on the NIC\n+ *  @adapter: board private structure\n+ **/\n+static void igc_irq_disable(struct igc_adapter *adapter)\n+{\n+\tstruct igc_hw *hw = &adapter->hw;\n+\n+\tif (adapter->msix_entries) {\n+\t\tu32 regval = rd32(IGC_EIAM);\n+\n+\t\twr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);\n+\t\twr32(IGC_EIMC, adapter->eims_enable_mask);\n+\t\tregval = rd32(IGC_EIAC);\n+\t\twr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);\n+\t}\n+\n+\twr32(IGC_IAM, 0);\n+\twr32(IGC_IMC, ~0);\n+\twrfl();\n+\n+\tif (adapter->msix_entries) {\n+\t\tint vector = 0, i;\n+\n+\t\tsynchronize_irq(adapter->msix_entries[vector++].vector);\n+\n+\t\tfor (i = 0; i < adapter->num_q_vectors; i++)\n+\t\t\tsynchronize_irq(adapter->msix_entries[vector++].vector);\n+\t} else {\n+\t\tsynchronize_irq(adapter->pdev->irq);\n+\t}\n+}\n+\n+/**\n+ *  igc_irq_enable - Enable default interrupt generation settings\n+ *  @adapter: board private structure\n+ **/\n+static void igc_irq_enable(struct igc_adapter *adapter)\n+{\n+\tstruct igc_hw *hw = &adapter->hw;\n+\n+\tif (adapter->msix_entries) {\n+\t\tu32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;\n+\t\tu32 regval = rd32(IGC_EIAC);\n+\n+\t\twr32(IGC_EIAC, regval | adapter->eims_enable_mask);\n+\t\tregval = rd32(IGC_EIAM);\n+\t\twr32(IGC_EIAM, regval | adapter->eims_enable_mask);\n+\t\twr32(IGC_EIMS, adapter->eims_enable_mask);\n+\t\twr32(IGC_IMS, ims);\n+\t} else {\n+\t\twr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);\n+\t\twr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);\n+\t}\n+}\n+\n+/**\n+ *  igc_request_irq - initialize interrupts\n+ *  @adapter: Pointer to adapter structure\n+ *\n+ *  Attempts to configure interrupts using the best available\n+ *  capabilities of the hardware and kernel.\n+ **/\n+static int igc_request_irq(struct igc_adapter *adapter)\n+{\n+\tint err = 0;\n+\n+\tif (adapter->flags & IGC_FLAG_HAS_MSIX) {\n+\t\terr = igc_request_msix(adapter);\n+\t\tif (!err)\n+\t\t\tgoto request_done;\n+\t\t/* fall back to MSI */\n+\n+\t\tigc_clear_interrupt_scheme(adapter);\n+\t\terr = igc_init_interrupt_scheme(adapter, false);\n+\t\tif (err)\n+\t\t\tgoto request_done;\n+\t\tigc_configure(adapter);\n+\t}\n+\n+request_done:\n+\treturn err;\n+}\n+\n+static irqreturn_t igc_msix_ring(int irq, void *data)\n+{\n+\tstruct igc_q_vector *q_vector = data;\n+\n+\t/* Write the ITR value calculated from the previous interrupt. */\n+\tigc_write_itr(q_vector);\n+\n+\tnapi_schedule(&q_vector->napi);\n+\n+\treturn IRQ_HANDLED;\n+}\n+\n+static void igc_write_itr(struct igc_q_vector *q_vector)\n+{\n+\tu32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;\n+\n+\tif (!q_vector->set_itr)\n+\t\treturn;\n+\n+\tif (!itr_val)\n+\t\titr_val = IGC_ITR_VAL_MASK;\n+\n+\titr_val |= IGC_EITR_CNT_IGNR;\n+\n+\twritel(itr_val, q_vector->itr_register);\n+\tq_vector->set_itr = 0;\n+}\n+\n+/**\n  *  igc_open - Called when a network interface is made active\n  *  @netdev: network interface device structure\n  *\n@@ -325,6 +1317,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)\n {\n \tstruct igc_adapter *adapter = netdev_priv(netdev);\n \tstruct igc_hw *hw = &adapter->hw;\n+\tint err = 0;\n \tint i = 0;\n \n \t/* disallow open during test */\n@@ -340,15 +1333,40 @@ static int __igc_open(struct net_device *netdev, bool resuming)\n \n \tigc_configure(adapter);\n \n+\terr = igc_request_irq(adapter);\n+\tif (err)\n+\t\tgoto err_req_irq;\n+\n+\t/* Notify the stack of the actual queue counts. */\n+\tnetif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);\n+\tif (err)\n+\t\tgoto err_set_queues;\n+\n+\terr = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);\n+\tif (err)\n+\t\tgoto err_set_queues;\n+\n \tclear_bit(__IGC_DOWN, &adapter->state);\n \n \tfor (i = 0; i < adapter->num_q_vectors; i++)\n \t\tnapi_enable(&adapter->q_vector[i]->napi);\n \n+\t/* Clear any pending interrupts. */\n+\trd32(IGC_ICR);\n+\tigc_irq_enable(adapter);\n+\n \t/* start the watchdog. */\n \thw->mac.get_link_status = 1;\n \n \treturn IGC_SUCCESS;\n+\n+err_set_queues:\n+\tigc_free_irq(adapter);\n+err_req_irq:\n+\tigc_release_hw_control(adapter);\n+\tigc_power_down_link(adapter);\n+\n+\treturn err;\n }\n \n static int igc_open(struct net_device *netdev)\n@@ -377,6 +1395,8 @@ static int __igc_close(struct net_device *netdev, bool suspending)\n \n \tigc_release_hw_control(adapter);\n \n+\tigc_free_irq(adapter);\n+\n \treturn 0;\n }\n \n@@ -598,6 +1618,8 @@ static int igc_probe(struct pci_dev *pdev,\n err_register:\n \tigc_release_hw_control(adapter);\n err_sw_init:\n+\tigc_clear_interrupt_scheme(adapter);\n+\tiounmap(adapter->io_addr);\n err_ioremap:\n \tfree_netdev(netdev);\n err_alloc_etherdev:\n@@ -675,6 +1697,14 @@ static int igc_sw_init(struct igc_adapter *adapter)\n \tadapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +\n \t\t\t\t\tVLAN_HLEN;\n \n+\tif (igc_init_interrupt_scheme(adapter, true)) {\n+\t\tdev_err(&pdev->dev, \"Unable to allocate memory for queues\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Explicitly disable IRQ since the NIC can be in any state. */\n+\tigc_irq_disable(adapter);\n+\n \tset_bit(__IGC_DOWN, &adapter->state);\n \n \treturn 0;\n",
    "prefixes": [
        "v7",
        "04/11"
    ]
}