get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/522602/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 522602,
    "url": "http://patchwork.ozlabs.org/api/patches/522602/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20150924233547.15221.59274.stgit@jbrandeb-hsm1.jf.intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20150924233547.15221.59274.stgit@jbrandeb-hsm1.jf.intel.com>",
    "list_archive_url": null,
    "date": "2015-09-24T23:35:47",
    "name": "[net-next,v1] drivers/net/intel: use napi_complete_done()",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "4957e58531c058a00b1868e0bb829fa7acbbc526",
    "submitter": {
        "id": 189,
        "url": "http://patchwork.ozlabs.org/api/people/189/?format=api",
        "name": "Jesse Brandeburg",
        "email": "jesse.brandeburg@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20150924233547.15221.59274.stgit@jbrandeb-hsm1.jf.intel.com/mbox/",
    "series": [],
    "comments": "http://patchwork.ozlabs.org/api/patches/522602/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/522602/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@lists.osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Received": [
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\tby ozlabs.org (Postfix) with ESMTP id A3F20140783\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri, 25 Sep 2015 09:35:53 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id BACB72F28D;\n\tThu, 24 Sep 2015 23:35:52 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id kMyPQ26t98qq; Thu, 24 Sep 2015 23:35:50 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id EE66526E25;\n\tThu, 24 Sep 2015 23:35:49 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id 1E2B31CEB78\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Sep 2015 23:35:49 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 183AA90BEB\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Sep 2015 23:35:49 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id TxrGR-TbFvTv for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Sep 2015 23:35:47 +0000 (UTC)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id CD9E98BBF9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 24 Sep 2015 23:35:47 +0000 (UTC)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga102.jf.intel.com with ESMTP; 24 Sep 2015 16:35:48 -0700",
            "from jbrandeb-hsm1.jf.intel.com ([134.134.3.89])\n\tby orsmga002.jf.intel.com with ESMTP; 24 Sep 2015 16:35:47 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.17,583,1437462000\"; d=\"scan'208\";a=\"812724359\"",
        "From": "Jesse Brandeburg <jesse.brandeburg@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Thu, 24 Sep 2015 16:35:47 -0700",
        "Message-ID": "<20150924233547.15221.59274.stgit@jbrandeb-hsm1.jf.intel.com>",
        "User-Agent": "StGit/0.17.1-dirty",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [net-next PATCH v1] drivers/net/intel: use\n\tnapi_complete_done()",
        "X-BeenThere": "intel-wired-lan@lists.osuosl.org",
        "X-Mailman-Version": "2.1.18-1",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.lists.osuosl.org>",
        "List-Unsubscribe": "<http://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@lists.osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@lists.osuosl.org?subject=help>",
        "List-Subscribe": "<http://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@lists.osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@lists.osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@lists.osuosl.org>"
    },
    "content": "As per Eric Dumazet's previous patches:\n(see commit (24d2e4a50737) - tg3: use napi_complete_done())\n\nQuoting verbatim:\nUsing napi_complete_done() instead of napi_complete() allows\nus to use /sys/class/net/ethX/gro_flush_timeout\n\nGRO layer can aggregate more packets if the flush is delayed a bit,\nwithout having to set too big coalescing parameters that impact\nlatencies.\n</end quote>\n\nTested\nconfiguration: low latency via ethtool -C ethx adaptive-rx off\n\t\t\t\trx-usecs 10 adaptive-tx off tx-usecs 15\nworkload: streaming rx using netperf TCP_MAERTS\n\nigb:\n# ethtool -C enp5s0f0 rx-usecs 10\n# echo 0 >  /sys/class/net/enp5s0f0/gro_flush_timeout\n# netperf -H 10.0.0.1 -t TCP_MAERTS -D1 -v9\nMIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.0.0.1 () port 0 AF_INET : demo\n...\nInterim result:  941.48 10^6bits/s over 1.000 seconds ending at 1440193171.589\n\nAlignment      Offset         Bytes    Bytes       Recvs   Bytes    Sends\nLocal  Remote  Local  Remote  Xfered   Per                 Per\nRecv   Send    Recv   Send             Recv (avg)          Send (avg)\n    8       8      0       0 1176930056  1475.36    797726   16384.00  71905\n\n# echo 10000 >  /sys/class/net/enp5s0f0/gro_flush_timeout\n# netperf -H 10.0.0.1 -t TCP_MAERTS D1 -v9\nMIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.0.0.1 () port 0 AF_INET : demo\n...\nInterim result:  941.49 10^6bits/s over 0.997 seconds ending at 1440193142.763\n\nAlignment      Offset         Bytes    Bytes       Recvs   Bytes    Sends\nLocal  Remote  Local  Remote  Xfered   Per                 Per\nRecv   Send    Recv   Send             Recv (avg)          Send (avg)\n    8       8      0       0 1175182320  50476.00     23282   16384.00  71816\n\ni40e:\nHard to test because the traffic is incoming so fast (24Gb/s) that GRO\nalways receives 87kB, even at the highest interrupt rate.\n\nOther drivers were only compile tested.\n\nSigned-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>\nCC: Don Skidmore <donald.c.skidmore@intel.com>\nCC: Todd Fujinaka <todd.fujinaka@intel.com>\n---\n drivers/net/ethernet/intel/e1000/e1000_main.c     |    2 +-\n drivers/net/ethernet/intel/e1000e/netdev.c        |    2 +-\n drivers/net/ethernet/intel/fm10k/fm10k_main.c     |   21 ++++++++++++---------\n drivers/net/ethernet/intel/i40e/i40e_txrx.c       |    8 ++++++--\n drivers/net/ethernet/intel/i40evf/i40e_txrx.c     |    8 ++++++--\n drivers/net/ethernet/intel/igb/igb_main.c         |   17 +++++++++++------\n drivers/net/ethernet/intel/igbvf/netdev.c         |    2 +-\n drivers/net/ethernet/intel/ixgbe/ixgbe_main.c     |   14 +++++++++-----\n drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c |   14 ++++++++------\n 9 files changed, 55 insertions(+), 33 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c\nindex 36d1d48..e3d0761 100644\n--- a/drivers/net/ethernet/intel/e1000/e1000_main.c\n+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c\n@@ -3820,7 +3820,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)\n \tif (work_done < budget) {\n \t\tif (likely(adapter->itr_setting & 3))\n \t\t\te1000_set_itr(adapter);\n-\t\tnapi_complete(napi);\n+\t\tnapi_complete_done(napi, work_done);\n \t\tif (!test_bit(__E1000_DOWN, &adapter->flags))\n \t\t\te1000_irq_enable(adapter);\n \t}\ndiff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c\nindex a501e77..22a8f02 100644\n--- a/drivers/net/ethernet/intel/e1000e/netdev.c\n+++ b/drivers/net/ethernet/intel/e1000e/netdev.c\n@@ -2693,7 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)\n \tif (work_done < weight) {\n \t\tif (adapter->itr_setting & 3)\n \t\t\te1000_set_itr(adapter);\n-\t\tnapi_complete(napi);\n+\t\tnapi_complete_done(napi, work_done);\n \t\tif (!test_bit(__E1000_DOWN, &adapter->state)) {\n \t\t\tif (adapter->msix_entries)\n \t\t\t\tew32(IMS, adapter->rx_ring->ims_val);\ndiff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c\nindex a272b9f..746a198 100644\n--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c\n+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c\n@@ -593,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,\n \tnapi_gro_receive(&q_vector->napi, skb);\n }\n \n-static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,\n-\t\t\t       struct fm10k_ring *rx_ring,\n-\t\t\t       int budget)\n+static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,\n+\t\t\t      struct fm10k_ring *rx_ring,\n+\t\t\t      int budget)\n {\n \tstruct sk_buff *skb = rx_ring->skb;\n \tunsigned int total_bytes = 0, total_packets = 0;\n@@ -662,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,\n \tq_vector->rx.total_packets += total_packets;\n \tq_vector->rx.total_bytes += total_bytes;\n \n-\treturn total_packets < budget;\n+\treturn total_packets;\n }\n \n #define VXLAN_HLEN (sizeof(struct udphdr) + 8)\n@@ -1422,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)\n \tstruct fm10k_q_vector *q_vector =\n \t\t\t       container_of(napi, struct fm10k_q_vector, napi);\n \tstruct fm10k_ring *ring;\n-\tint per_ring_budget;\n+\tint per_ring_budget, work_done = 0;\n \tbool clean_complete = true;\n \n \tfm10k_for_each_ring(ring, q_vector->tx)\n@@ -1440,16 +1440,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget)\n \telse\n \t\tper_ring_budget = budget;\n \n-\tfm10k_for_each_ring(ring, q_vector->rx)\n-\t\tclean_complete &= fm10k_clean_rx_irq(q_vector, ring,\n-\t\t\t\t\t\t     per_ring_budget);\n+\tfm10k_for_each_ring(ring, q_vector->rx) {\n+\t\tint work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);\n+\n+\t\twork_done += work;\n+\t\tclean_complete &= !!(work < per_ring_budget);\n+\t}\n \n \t/* If all work not completed, return budget and keep polling */\n \tif (!clean_complete)\n \t\treturn budget;\n \n \t/* all work done, exit the polling mode */\n-\tnapi_complete(napi);\n+\tnapi_complete_done(napi, work_done);\n \n \t/* re-enable the q_vector */\n \tfm10k_qv_enable(q_vector);\ndiff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\nindex 99f464f..b61a09f 100644\n--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c\n@@ -1818,7 +1818,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)\n \tbool clean_complete = true;\n \tbool arm_wb = false;\n \tint budget_per_ring;\n-\tint cleaned;\n+\tint work_done = 0;\n \n \tif (test_bit(__I40E_DOWN, &vsi->state)) {\n \t\tnapi_complete(napi);\n@@ -1844,10 +1844,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)\n \tbudget_per_ring = max(budget/q_vector->num_ringpairs, 1);\n \n \ti40e_for_each_ring(ring, q_vector->rx) {\n+\t\tint cleaned;\n+\n \t\tif (ring_is_ps_enabled(ring))\n \t\t\tcleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);\n \t\telse\n \t\t\tcleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);\n+\n+\t\twork_done += cleaned;\n \t\t/* if we didn't clean as many as budgeted, we must be done */\n \t\tclean_complete &= (budget_per_ring != cleaned);\n \t}\n@@ -1864,7 +1868,7 @@ tx_only:\n \t\tq_vector->arm_wb_state = false;\n \n \t/* Work is done so exit the polling mode and re-enable the interrupt */\n-\tnapi_complete(napi);\n+\tnapi_complete_done(napi, work_done);\n \tif (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {\n \t\ti40e_update_enable_itr(vsi, q_vector);\n \t} else { /* Legacy mode */\ndiff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\nindex 5d3a8bd..97493a4 100644\n--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c\n@@ -1266,7 +1266,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)\n \tbool clean_complete = true;\n \tbool arm_wb = false;\n \tint budget_per_ring;\n-\tint cleaned;\n+\tint work_done = 0;\n \n \tif (test_bit(__I40E_DOWN, &vsi->state)) {\n \t\tnapi_complete(napi);\n@@ -1292,10 +1292,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)\n \tbudget_per_ring = max(budget/q_vector->num_ringpairs, 1);\n \n \ti40e_for_each_ring(ring, q_vector->rx) {\n+\t\tint cleaned;\n+\n \t\tif (ring_is_ps_enabled(ring))\n \t\t\tcleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);\n \t\telse\n \t\t\tcleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);\n+\n+\t\twork_done += cleaned;\n \t\t/* if we didn't clean as many as budgeted, we must be done */\n \t\tclean_complete &= (budget_per_ring != cleaned);\n \t}\n@@ -1312,7 +1316,7 @@ tx_only:\n \t\tq_vector->arm_wb_state = false;\n \n \t/* Work is done so exit the polling mode and re-enable the interrupt */\n-\tnapi_complete(napi);\n+\tnapi_complete_done(napi, work_done);\n \ti40e_update_enable_itr(vsi, q_vector);\n \treturn 0;\n }\ndiff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c\nindex cef6de9..09efac7 100644\n--- a/drivers/net/ethernet/intel/igb/igb_main.c\n+++ b/drivers/net/ethernet/intel/igb/igb_main.c\n@@ -151,7 +151,7 @@ static void igb_setup_dca(struct igb_adapter *);\n #endif /* CONFIG_IGB_DCA */\n static int igb_poll(struct napi_struct *, int);\n static bool igb_clean_tx_irq(struct igb_q_vector *);\n-static bool igb_clean_rx_irq(struct igb_q_vector *, int);\n+static int igb_clean_rx_irq(struct igb_q_vector *, int);\n static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);\n static void igb_tx_timeout(struct net_device *);\n static void igb_reset_task(struct work_struct *);\n@@ -6376,6 +6376,7 @@ static int igb_poll(struct napi_struct *napi, int budget)\n \t\t\t\t\t\t     struct igb_q_vector,\n \t\t\t\t\t\t     napi);\n \tbool clean_complete = true;\n+\tint work_done = 0;\n \n #ifdef CONFIG_IGB_DCA\n \tif (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)\n@@ -6384,15 +6385,19 @@ static int igb_poll(struct napi_struct *napi, int budget)\n \tif (q_vector->tx.ring)\n \t\tclean_complete = igb_clean_tx_irq(q_vector);\n \n-\tif (q_vector->rx.ring)\n-\t\tclean_complete &= igb_clean_rx_irq(q_vector, budget);\n+\tif (q_vector->rx.ring) {\n+\t\tint cleaned = igb_clean_rx_irq(q_vector, budget);\n+\n+\t\twork_done += cleaned;\n+\t\tclean_complete &= (cleaned < budget);\n+\t}\n \n \t/* If all work not completed, return budget and keep polling */\n \tif (!clean_complete)\n \t\treturn budget;\n \n \t/* If not enough Rx work done, exit the polling mode */\n-\tnapi_complete(napi);\n+\tnapi_complete_done(napi, work_done);\n \tigb_ring_irq_enable(q_vector);\n \n \treturn 0;\n@@ -6916,7 +6921,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,\n \tskb->protocol = eth_type_trans(skb, rx_ring->netdev);\n }\n \n-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)\n+static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)\n {\n \tstruct igb_ring *rx_ring = q_vector->rx.ring;\n \tstruct sk_buff *skb = rx_ring->skb;\n@@ -6990,7 +6995,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)\n \tif (cleaned_count)\n \t\tigb_alloc_rx_buffers(rx_ring, cleaned_count);\n \n-\treturn total_packets < budget;\n+\treturn total_packets;\n }\n \n static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,\ndiff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c\nindex e86d41e..297af80 100644\n--- a/drivers/net/ethernet/intel/igbvf/netdev.c\n+++ b/drivers/net/ethernet/intel/igbvf/netdev.c\n@@ -1211,7 +1211,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget)\n \n \t/* If not enough Rx work done, exit the polling mode */\n \tif (work_done < budget) {\n-\t\tnapi_complete(napi);\n+\t\tnapi_complete_done(napi, work_done);\n \n \t\tif (adapter->requested_itr & 3)\n \t\t\tigbvf_set_itr(adapter);\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\nindex 6e24352..693f2da 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n@@ -2772,7 +2772,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)\n \t\t\t\tcontainer_of(napi, struct ixgbe_q_vector, napi);\n \tstruct ixgbe_adapter *adapter = q_vector->adapter;\n \tstruct ixgbe_ring *ring;\n-\tint per_ring_budget;\n+\tint per_ring_budget, work_done = 0;\n \tbool clean_complete = true;\n \n #ifdef CONFIG_IXGBE_DCA\n@@ -2794,9 +2794,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)\n \telse\n \t\tper_ring_budget = budget;\n \n-\tixgbe_for_each_ring(ring, q_vector->rx)\n-\t\tclean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,\n-\t\t\t\t   per_ring_budget) < per_ring_budget);\n+\tixgbe_for_each_ring(ring, q_vector->rx) {\n+\t\tint cleaned = ixgbe_clean_rx_irq(q_vector, ring,\n+\t\t\t\t\t\t per_ring_budget);\n+\n+\t\twork_done += cleaned;\n+\t\tclean_complete &= (cleaned < per_ring_budget);\n+\t}\n \n \tixgbe_qv_unlock_napi(q_vector);\n \t/* If all work not completed, return budget and keep polling */\n@@ -2804,7 +2808,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)\n \t\treturn budget;\n \n \t/* all work done, exit the polling mode */\n-\tnapi_complete(napi);\n+\tnapi_complete_done(napi, work_done);\n \tif (adapter->rx_itr_setting & 1)\n \t\tixgbe_set_itr(q_vector);\n \tif (!test_bit(__IXGBE_DOWN, &adapter->state))\ndiff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\nindex 3bcfd78..592ff23 100644\n--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\n+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c\n@@ -1008,7 +1008,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)\n \t\tcontainer_of(napi, struct ixgbevf_q_vector, napi);\n \tstruct ixgbevf_adapter *adapter = q_vector->adapter;\n \tstruct ixgbevf_ring *ring;\n-\tint per_ring_budget;\n+\tint per_ring_budget, work_done = 0;\n \tbool clean_complete = true;\n \n \tixgbevf_for_each_ring(ring, q_vector->tx)\n@@ -1027,10 +1027,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)\n \telse\n \t\tper_ring_budget = budget;\n \n-\tixgbevf_for_each_ring(ring, q_vector->rx)\n-\t\tclean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,\n-\t\t\t\t\t\t\tper_ring_budget)\n-\t\t\t\t   < per_ring_budget);\n+\tixgbevf_for_each_ring(ring, q_vector->rx) {\n+\t\tint cleaned = ixgbevf_clean_rx_irq(q_vector, ring,\n+\t\t\t\t\t\t   per_ring_budget);\n+\t\twork_done += cleaned;\n+\t\tclean_complete &= (cleaned < per_ring_budget);\n+\t}\n \n #ifdef CONFIG_NET_RX_BUSY_POLL\n \tixgbevf_qv_unlock_napi(q_vector);\n@@ -1040,7 +1042,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)\n \tif (!clean_complete)\n \t\treturn budget;\n \t/* all work done, exit the polling mode */\n-\tnapi_complete(napi);\n+\tnapi_complete_done(napi, work_done);\n \tif (adapter->rx_itr_setting & 1)\n \t\tixgbevf_set_itr(q_vector);\n \tif (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&\n",
    "prefixes": [
        "net-next",
        "v1"
    ]
}