get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/867756/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 867756,
    "url": "http://patchwork.ozlabs.org/api/patches/867756/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180131005143.19264.40074.stgit@localhost6.localdomain6/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180131005143.19264.40074.stgit@localhost6.localdomain6>",
    "list_archive_url": null,
    "date": "2018-01-31T00:51:43",
    "name": "[7/9] ixgbevf: allocate the rings as part of q_vector",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "d9cf7739694610dffa0d6e8e9d3c1a8c10e0a816",
    "submitter": {
        "id": 1670,
        "url": "http://patchwork.ozlabs.org/api/people/1670/?format=api",
        "name": "Tantilov, Emil S",
        "email": "emil.s.tantilov@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180131005143.19264.40074.stgit@localhost6.localdomain6/mbox/",
    "series": [
        {
            "id": 26180,
            "url": "http://patchwork.ozlabs.org/api/series/26180/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=26180",
            "date": "2018-01-31T00:51:07",
            "name": "ixgbevf: build_skb support and related changes",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/26180/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/867756/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/867756/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.137; helo=fraxinus.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
        "Received": [
            "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3zWPgM0hVBz9ryQ\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 31 Jan 2018 11:45:51 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 710E587D9C;\n\tWed, 31 Jan 2018 00:45:49 +0000 (UTC)",
            "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 8MqkUna7exad; Wed, 31 Jan 2018 00:45:48 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 47CAE87DD9;\n\tWed, 31 Jan 2018 00:45:48 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id D03861C4377\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:46 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id CC09788354\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:46 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id IIv2gJz+ZHfJ for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:45 +0000 (UTC)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 94566876E9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 31 Jan 2018 00:45:45 +0000 (UTC)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t30 Jan 2018 16:45:44 -0800",
            "from estantil-desk3.jf.intel.com (HELO localhost6.localdomain6)\n\t([134.134.177.100])\n\tby FMSMGA003.fm.intel.com with ESMTP; 30 Jan 2018 16:45:44 -0800"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.46,437,1511856000\"; d=\"scan'208\";a=\"23723887\"",
        "From": "Emil Tantilov <emil.s.tantilov@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Tue, 30 Jan 2018 16:51:43 -0800",
        "Message-ID": "<20180131005143.19264.40074.stgit@localhost6.localdomain6>",
        "In-Reply-To": "<20180131005015.19264.44085.stgit@localhost6.localdomain6>",
        "References": "<20180131005015.19264.44085.stgit@localhost6.localdomain6>",
        "User-Agent": "StGit/0.17.1-17-ge4e0",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [PATCH 7/9] ixgbevf: allocate the rings as part\n\tof q_vector",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "Make it so that all rings allocations are made as part of q_vector.\nThe advantage to this is that we can keep all of the memory related to\na single interrupt in one page.\n\nThe goal is to bring the logic of handling rings closer to ixgbe.\n\nSigned-off-by: Emil Tantilov <emil.s.tantilov@intel.com>\n---\n drivers/net/ethernet/intel/ixgbevf/ixgbevf.h      |    7 \n drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c |  392 +++++++++------------\n 2 files changed, 182 insertions(+), 217 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h\nindex fe7111c..97e1267 100644\n--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h\n+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h\n@@ -97,6 +97,7 @@ enum ixgbevf_ring_state_t {\n \n struct ixgbevf_ring {\n \tstruct ixgbevf_ring *next;\n+\tstruct ixgbevf_q_vector *q_vector;\t/* backpointer to q_vector */\n \tstruct net_device *netdev;\n \tstruct device *dev;\n \tvoid *desc;\t\t\t/* descriptor ring memory */\n@@ -128,7 +129,7 @@ struct ixgbevf_ring {\n \t */\n \tu16 reg_idx;\n \tint queue_index; /* needed for multiqueue queue management */\n-};\n+} ____cacheline_internodealigned_in_smp;\n \n /* How many Rx Buffers do we bundle into one write to the hardware ? */\n #define IXGBEVF_RX_BUFFER_WRITE\t16\t/* Must be power of 2 */\n@@ -241,7 +242,11 @@ struct ixgbevf_q_vector {\n \tu16 itr; /* Interrupt throttle rate written to EITR */\n \tstruct napi_struct napi;\n \tstruct ixgbevf_ring_container rx, tx;\n+\tstruct rcu_head rcu;    /* to avoid race with update stats on free */\n \tchar name[IFNAMSIZ + 9];\n+\n+\t/* for dynamic allocation of rings associated with this q_vector */\n+\tstruct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;\n #ifdef CONFIG_NET_RX_BUSY_POLL\n \tunsigned int state;\n #define IXGBEVF_QV_STATE_IDLE\t\t0\ndiff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\nindex b381127..754efb4 100644\n--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\n+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\n@@ -1270,85 +1270,6 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)\n \treturn IRQ_HANDLED;\n }\n \n-static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,\n-\t\t\t\t     int r_idx)\n-{\n-\tstruct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];\n-\n-\ta->rx_ring[r_idx]->next = q_vector->rx.ring;\n-\tq_vector->rx.ring = a->rx_ring[r_idx];\n-\tq_vector->rx.count++;\n-}\n-\n-static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,\n-\t\t\t\t     int t_idx)\n-{\n-\tstruct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];\n-\n-\ta->tx_ring[t_idx]->next = q_vector->tx.ring;\n-\tq_vector->tx.ring = a->tx_ring[t_idx];\n-\tq_vector->tx.count++;\n-}\n-\n-/**\n- * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors\n- * @adapter: board private structure to initialize\n- *\n- * This function maps descriptor rings to the queue-specific vectors\n- * we were allotted through the MSI-X enabling code.  Ideally, we'd have\n- * one vector per ring/queue, but on a constrained vector budget, we\n- * group the rings as \"efficiently\" as possible.  You would add new\n- * mapping configurations in here.\n- **/\n-static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)\n-{\n-\tint q_vectors;\n-\tint v_start = 0;\n-\tint rxr_idx = 0, txr_idx = 0;\n-\tint rxr_remaining = adapter->num_rx_queues;\n-\tint txr_remaining = adapter->num_tx_queues;\n-\tint i, j;\n-\tint rqpv, tqpv;\n-\n-\tq_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;\n-\n-\t/* The ideal configuration...\n-\t * We have enough vectors to map one per queue.\n-\t */\n-\tif (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {\n-\t\tfor (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)\n-\t\t\tmap_vector_to_rxq(adapter, v_start, rxr_idx);\n-\n-\t\tfor (; txr_idx < txr_remaining; v_start++, txr_idx++)\n-\t\t\tmap_vector_to_txq(adapter, v_start, txr_idx);\n-\t\treturn 0;\n-\t}\n-\n-\t/* If we don't have enough vectors for a 1-to-1\n-\t * mapping, we'll have to group them so there are\n-\t * multiple queues per vector.\n-\t */\n-\t/* Re-adjusting *qpv takes care of the remainder. */\n-\tfor (i = v_start; i < q_vectors; i++) {\n-\t\trqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);\n-\t\tfor (j = 0; j < rqpv; j++) {\n-\t\t\tmap_vector_to_rxq(adapter, i, rxr_idx);\n-\t\t\trxr_idx++;\n-\t\t\trxr_remaining--;\n-\t\t}\n-\t}\n-\tfor (i = v_start; i < q_vectors; i++) {\n-\t\ttqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);\n-\t\tfor (j = 0; j < tqpv; j++) {\n-\t\t\tmap_vector_to_txq(adapter, i, txr_idx);\n-\t\t\ttxr_idx++;\n-\t\t\ttxr_remaining--;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n /**\n  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts\n  * @adapter: board private structure\n@@ -1421,20 +1342,6 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)\n \treturn err;\n }\n \n-static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)\n-{\n-\tint i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;\n-\n-\tfor (i = 0; i < q_vectors; i++) {\n-\t\tstruct ixgbevf_q_vector *q_vector = adapter->q_vector[i];\n-\n-\t\tq_vector->rx.ring = NULL;\n-\t\tq_vector->tx.ring = NULL;\n-\t\tq_vector->rx.count = 0;\n-\t\tq_vector->tx.count = 0;\n-\t}\n-}\n-\n /**\n  * ixgbevf_request_irq - initialize interrupts\n  * @adapter: board private structure\n@@ -1474,8 +1381,6 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)\n \t\tfree_irq(adapter->msix_entries[i].vector,\n \t\t\t adapter->q_vector[i]);\n \t}\n-\n-\tixgbevf_reset_q_vectors(adapter);\n }\n \n /**\n@@ -2457,105 +2362,171 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)\n }\n \n /**\n- * ixgbevf_alloc_queues - Allocate memory for all rings\n+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported\n+ * @adapter: board private structure to initialize\n+ *\n+ * Attempt to configure the interrupts using the best available\n+ * capabilities of the hardware and the kernel.\n+ **/\n+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)\n+{\n+\tint vector, v_budget;\n+\n+\t/* It's easy to be greedy for MSI-X vectors, but it really\n+\t * doesn't do us much good if we have a lot more vectors\n+\t * than CPU's.  So let's be conservative and only ask for\n+\t * (roughly) the same number of vectors as there are CPU's.\n+\t * The default is to use pairs of vectors.\n+\t */\n+\tv_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);\n+\tv_budget = min_t(int, v_budget, num_online_cpus());\n+\tv_budget += NON_Q_VECTORS;\n+\n+\tadapter->msix_entries = kcalloc(v_budget,\n+\t\t\t\t\tsizeof(struct msix_entry), GFP_KERNEL);\n+\tif (!adapter->msix_entries)\n+\t\treturn -ENOMEM;\n+\n+\tfor (vector = 0; vector < v_budget; vector++)\n+\t\tadapter->msix_entries[vector].entry = vector;\n+\n+\t/* A failure in MSI-X entry allocation isn't fatal, but the VF driver\n+\t * does not support any other modes, so we will simply fail here. Note\n+\t * that we clean up the msix_entries pointer else-where.\n+\t */\n+\treturn ixgbevf_acquire_msix_vectors(adapter, v_budget);\n+}\n+\n+static void ixgbevf_add_ring(struct ixgbevf_ring *ring,\n+\t\t\t     struct ixgbevf_ring_container *head)\n+{\n+\tring->next = head->ring;\n+\thead->ring = ring;\n+\thead->count++;\n+}\n+\n+/**\n+ * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector\n  * @adapter: board private structure to initialize\n+ * @v_idx: index of vector in adapter struct\n+ * @txr_count: number of Tx rings for q vector\n+ * @txr_idx: index of first Tx ring to assign\n+ * @rxr_count: number of Rx rings for q vector\n+ * @rxr_idx: index of first Rx ring to assign\n  *\n- * We allocate one ring per queue at run-time since we don't know the\n- * number of queues at compile-time.  The polling_netdev array is\n- * intended for Multiqueue, but should work fine with a single queue.\n+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.\n  **/\n-static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)\n+static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,\n+\t\t\t\t  int txr_count, int txr_idx,\n+\t\t\t\t  int rxr_count, int rxr_idx)\n {\n+\tstruct ixgbevf_q_vector *q_vector;\n \tstruct ixgbevf_ring *ring;\n-\tint rx = 0, tx = 0;\n+\tint ring_count, size;\n+\n+\tring_count = txr_count + rxr_count;\n+\tsize = sizeof(*q_vector) + (sizeof(*ring) * ring_count);\n+\n+\t/* allocate q_vector and rings */\n+\tq_vector = kzalloc(size, GFP_KERNEL);\n+\tif (!q_vector)\n+\t\treturn -ENOMEM;\n+\n+\t/* initialize NAPI */\n+\tnetif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);\n+\n+\t/* tie q_vector and adapter together */\n+\tadapter->q_vector[v_idx] = q_vector;\n+\tq_vector->adapter = adapter;\n+\tq_vector->v_idx = v_idx;\n \n-\tfor (; tx < adapter->num_tx_queues; tx++) {\n-\t\tring = kzalloc(sizeof(*ring), GFP_KERNEL);\n-\t\tif (!ring)\n-\t\t\tgoto err_allocation;\n+\t/* initialize pointer to rings */\n+\tring = q_vector->ring;\n \n+\twhile (txr_count) {\n+\t\t/* assign generic ring traits */\n \t\tring->dev = &adapter->pdev->dev;\n \t\tring->netdev = adapter->netdev;\n+\n+\t\t/* configure backlink on ring */\n+\t\tring->q_vector = q_vector;\n+\n+\t\t/* update q_vector Tx values */\n+\t\tixgbevf_add_ring(ring, &q_vector->tx);\n+\n+\t\t/* apply Tx specific ring traits */\n \t\tring->count = adapter->tx_ring_count;\n-\t\tring->queue_index = tx;\n-\t\tring->reg_idx = tx;\n+\t\tring->queue_index = txr_idx;\n+\t\tring->reg_idx = txr_idx;\n \n-\t\tadapter->tx_ring[tx] = ring;\n-\t}\n+\t\t/* assign ring to adapter */\n+\t\t adapter->tx_ring[txr_idx] = ring;\n+\n+\t\t/* update count and index */\n+\t\ttxr_count--;\n+\t\ttxr_idx++;\n \n-\tfor (; rx < adapter->num_rx_queues; rx++) {\n-\t\tring = kzalloc(sizeof(*ring), GFP_KERNEL);\n-\t\tif (!ring)\n-\t\t\tgoto err_allocation;\n+\t\t/* push pointer to next ring */\n+\t\tring++;\n+\t}\n \n+\twhile (rxr_count) {\n+\t\t/* assign generic ring traits */\n \t\tring->dev = &adapter->pdev->dev;\n \t\tring->netdev = adapter->netdev;\n \n+\t\t/* configure backlink on ring */\n+\t\tring->q_vector = q_vector;\n+\n+\t\t/* update q_vector Rx values */\n+\t\tixgbevf_add_ring(ring, &q_vector->rx);\n+\n+\t\t/* apply Rx specific ring traits */\n \t\tring->count = adapter->rx_ring_count;\n-\t\tring->queue_index = rx;\n-\t\tring->reg_idx = rx;\n+\t\tring->queue_index = rxr_idx;\n+\t\tring->reg_idx = rxr_idx;\n \n-\t\tadapter->rx_ring[rx] = ring;\n-\t}\n+\t\t/* assign ring to adapter */\n+\t\tadapter->rx_ring[rxr_idx] = ring;\n \n-\treturn 0;\n+\t\t/* update count and index */\n+\t\trxr_count--;\n+\t\trxr_idx++;\n \n-err_allocation:\n-\twhile (tx) {\n-\t\tkfree(adapter->tx_ring[--tx]);\n-\t\tadapter->tx_ring[tx] = NULL;\n+\t\t/* push pointer to next ring */\n+\t\tring++;\n \t}\n \n-\twhile (rx) {\n-\t\tkfree(adapter->rx_ring[--rx]);\n-\t\tadapter->rx_ring[rx] = NULL;\n-\t}\n-\treturn -ENOMEM;\n+\treturn 0;\n }\n \n /**\n- * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported\n+ * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector\n  * @adapter: board private structure to initialize\n+ * @v_idx: index of vector in adapter struct\n  *\n- * Attempt to configure the interrupts using the best available\n- * capabilities of the hardware and the kernel.\n+ * This function frees the memory allocated to the q_vector.  In addition if\n+ * NAPI is enabled it will delete any references to the NAPI struct prior\n+ * to freeing the q_vector.\n  **/\n-static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)\n+static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)\n {\n-\tstruct net_device *netdev = adapter->netdev;\n-\tint err;\n-\tint vector, v_budget;\n+\tstruct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];\n+\tstruct ixgbevf_ring *ring;\n \n-\t/* It's easy to be greedy for MSI-X vectors, but it really\n-\t * doesn't do us much good if we have a lot more vectors\n-\t * than CPU's.  So let's be conservative and only ask for\n-\t * (roughly) the same number of vectors as there are CPU's.\n-\t * The default is to use pairs of vectors.\n-\t */\n-\tv_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);\n-\tv_budget = min_t(int, v_budget, num_online_cpus());\n-\tv_budget += NON_Q_VECTORS;\n+\tixgbevf_for_each_ring(ring, q_vector->tx)\n+\t\tadapter->tx_ring[ring->queue_index] = NULL;\n \n-\t/* A failure in MSI-X entry allocation isn't fatal, but it does\n-\t * mean we disable MSI-X capabilities of the adapter.\n-\t */\n-\tadapter->msix_entries = kcalloc(v_budget,\n-\t\t\t\t\tsizeof(struct msix_entry), GFP_KERNEL);\n-\tif (!adapter->msix_entries)\n-\t\treturn -ENOMEM;\n+\tixgbevf_for_each_ring(ring, q_vector->rx)\n+\t\tadapter->rx_ring[ring->queue_index] = NULL;\n \n-\tfor (vector = 0; vector < v_budget; vector++)\n-\t\tadapter->msix_entries[vector].entry = vector;\n+\tadapter->q_vector[v_idx] = NULL;\n+\tnetif_napi_del(&q_vector->napi);\n \n-\terr = ixgbevf_acquire_msix_vectors(adapter, v_budget);\n-\tif (err)\n-\t\treturn err;\n-\n-\terr = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);\n-\tif (err)\n-\t\treturn err;\n-\n-\treturn netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);\n+\t/* ixgbevf_get_stats() might access the rings on this vector,\n+\t * we must wait a grace period before freeing it.\n+\t */\n+\tkfree_rcu(q_vector, rcu);\n }\n \n /**\n@@ -2567,35 +2538,53 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)\n  **/\n static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)\n {\n-\tint q_idx, num_q_vectors;\n-\tstruct ixgbevf_q_vector *q_vector;\n+\tint q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;\n+\tint rxr_remaining = adapter->num_rx_queues;\n+\tint txr_remaining = adapter->num_tx_queues;\n+\tint rxr_idx = 0, txr_idx = 0, v_idx = 0;\n+\tint err;\n+\n+\tif (q_vectors >= (rxr_remaining + txr_remaining)) {\n+\t\tfor (; rxr_remaining; v_idx++, q_vectors--) {\n+\t\t\tint rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);\n+\n+\t\t\terr = ixgbevf_alloc_q_vector(adapter, v_idx,\n+\t\t\t\t\t\t     0, 0, rqpv, rxr_idx);\n+\t\t\tif (err)\n+\t\t\t\tgoto err_out;\n+\n+\t\t\t/* update counts and index */\n+\t\t\trxr_remaining -= rqpv;\n+\t\t\trxr_idx += rqpv;\n+\t\t}\n+\t}\n+\n+\tfor (; q_vectors; v_idx++, q_vectors--) {\n+\t\tint rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);\n+\t\tint tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);\n \n-\tnum_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;\n+\t\terr = ixgbevf_alloc_q_vector(adapter, v_idx,\n+\t\t\t\t\t     tqpv, txr_idx,\n+\t\t\t\t\t     rqpv, rxr_idx);\n \n-\tfor (q_idx = 0; q_idx < num_q_vectors; q_idx++) {\n-\t\tq_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);\n-\t\tif (!q_vector)\n+\t\tif (err)\n \t\t\tgoto err_out;\n-\t\tq_vector->adapter = adapter;\n-\t\tq_vector->v_idx = q_idx;\n-\t\tnetif_napi_add(adapter->netdev, &q_vector->napi,\n-\t\t\t       ixgbevf_poll, 64);\n-\t\tadapter->q_vector[q_idx] = q_vector;\n+\n+\t\t/* update counts and index */\n+\t\trxr_remaining -= rqpv;\n+\t\trxr_idx += rqpv;\n+\t\ttxr_remaining -= tqpv;\n+\t\ttxr_idx += tqpv;\n \t}\n \n \treturn 0;\n \n err_out:\n-\twhile (q_idx) {\n-\t\tq_idx--;\n-\t\tq_vector = adapter->q_vector[q_idx];\n-#ifdef CONFIG_NET_RX_BUSY_POLL\n-\t\tnapi_hash_del(&q_vector->napi);\n-#endif\n-\t\tnetif_napi_del(&q_vector->napi);\n-\t\tkfree(q_vector);\n-\t\tadapter->q_vector[q_idx] = NULL;\n+\twhile (v_idx) {\n+\t\tv_idx--;\n+\t\tixgbevf_free_q_vector(adapter, v_idx);\n \t}\n+\n \treturn -ENOMEM;\n }\n \n@@ -2609,17 +2598,11 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)\n  **/\n static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)\n {\n-\tint q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;\n-\n-\tfor (q_idx = 0; q_idx < num_q_vectors; q_idx++) {\n-\t\tstruct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];\n+\tint q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;\n \n-\t\tadapter->q_vector[q_idx] = NULL;\n-#ifdef CONFIG_NET_RX_BUSY_POLL\n-\t\tnapi_hash_del(&q_vector->napi);\n-#endif\n-\t\tnetif_napi_del(&q_vector->napi);\n-\t\tkfree(q_vector);\n+\twhile (q_vectors) {\n+\t\tq_vectors--;\n+\t\tixgbevf_free_q_vector(adapter, q_vectors);\n \t}\n }\n \n@@ -2663,12 +2646,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)\n \t\tgoto err_alloc_q_vectors;\n \t}\n \n-\terr = ixgbevf_alloc_queues(adapter);\n-\tif (err) {\n-\t\tpr_err(\"Unable to allocate memory for queues\\n\");\n-\t\tgoto err_alloc_queues;\n-\t}\n-\n \thw_dbg(&adapter->hw, \"Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\\n\",\n \t       (adapter->num_rx_queues > 1) ? \"Enabled\" :\n \t       \"Disabled\", adapter->num_rx_queues, adapter->num_tx_queues);\n@@ -2676,8 +2653,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)\n \tset_bit(__IXGBEVF_DOWN, &adapter->state);\n \n \treturn 0;\n-err_alloc_queues:\n-\tixgbevf_free_q_vectors(adapter);\n err_alloc_q_vectors:\n \tixgbevf_reset_interrupt_capability(adapter);\n err_set_interrupt:\n@@ -2693,17 +2668,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)\n  **/\n static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)\n {\n-\tint i;\n-\n-\tfor (i = 0; i < adapter->num_tx_queues; i++) {\n-\t\tkfree(adapter->tx_ring[i]);\n-\t\tadapter->tx_ring[i] = NULL;\n-\t}\n-\tfor (i = 0; i < adapter->num_rx_queues; i++) {\n-\t\tkfree(adapter->rx_ring[i]);\n-\t\tadapter->rx_ring[i] = NULL;\n-\t}\n-\n \tadapter->num_tx_queues = 0;\n \tadapter->num_rx_queues = 0;\n \n@@ -3307,12 +3271,6 @@ int ixgbevf_open(struct net_device *netdev)\n \n \tixgbevf_configure(adapter);\n \n-\t/* Map the Tx/Rx rings to the vectors we were allotted.\n-\t * if request_irq will be called in this function map_rings\n-\t * must be called *before* up_complete\n-\t */\n-\tixgbevf_map_rings_to_vectors(adapter);\n-\n \terr = ixgbevf_request_irq(adapter);\n \tif (err)\n \t\tgoto err_req_irq;\n@@ -4042,6 +4000,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,\n \n \tstats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;\n \n+\trcu_read_lock();\n \tfor (i = 0; i < adapter->num_rx_queues; i++) {\n \t\tring = adapter->rx_ring[i];\n \t\tdo {\n@@ -4063,6 +4022,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,\n \t\tstats->tx_bytes += bytes;\n \t\tstats->tx_packets += packets;\n \t}\n+\trcu_read_unlock();\n }\n \n #define IXGBEVF_MAX_MAC_HDR_LEN\t\t127\n",
    "prefixes": [
        "7/9"
    ]
}