get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1044948/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1044948,
    "url": "http://patchwork.ozlabs.org/api/patches/1044948/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190219230414.25627-2-anirudh.venkataramanan@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20190219230414.25627-2-anirudh.venkataramanan@intel.com>",
    "list_archive_url": null,
    "date": "2019-02-19T23:04:01",
    "name": "[S15,01/14] ice: Fix for adaptive interrupt moderation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "6531351261d1713bc5756e700ec08909e935a8df",
    "submitter": {
        "id": 73601,
        "url": "http://patchwork.ozlabs.org/api/people/73601/?format=api",
        "name": "Anirudh Venkataramanan",
        "email": "anirudh.venkataramanan@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190219230414.25627-2-anirudh.venkataramanan@intel.com/mbox/",
    "series": [
        {
            "id": 93015,
            "url": "http://patchwork.ozlabs.org/api/series/93015/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=93015",
            "date": "2019-02-19T23:04:04",
            "name": "Bug fixes and minor clean up for ice",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/93015/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1044948/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1044948/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.133; helo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 443xBn7354z9s6w\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 20 Feb 2019 10:04:33 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 9D8DB861B3;\n\tTue, 19 Feb 2019 23:04:32 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id G2Q7uWH7LtA4; Tue, 19 Feb 2019 23:04:26 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 680BA86CD3;\n\tTue, 19 Feb 2019 23:04:26 +0000 (UTC)",
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\tby ash.osuosl.org (Postfix) with ESMTP id 4DDA61BF2EA\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 19 Feb 2019 23:04:23 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 4B28823355\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 19 Feb 2019 23:04:23 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id DRbeoMZYipvu for <intel-wired-lan@lists.osuosl.org>;\n\tTue, 19 Feb 2019 23:04:16 +0000 (UTC)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n\tby silver.osuosl.org (Postfix) with ESMTPS id 23F0D23236\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tTue, 19 Feb 2019 23:04:16 +0000 (UTC)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t19 Feb 2019 15:04:15 -0800",
            "from shasta.jf.intel.com ([10.166.241.11])\n\tby fmsmga002.fm.intel.com with ESMTP; 19 Feb 2019 15:04:14 -0800"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.58,388,1544515200\"; d=\"scan'208\";a=\"144858758\"",
        "From": "Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Tue, 19 Feb 2019 15:04:01 -0800",
        "Message-Id": "<20190219230414.25627-2-anirudh.venkataramanan@intel.com>",
        "X-Mailer": "git-send-email 2.14.5",
        "In-Reply-To": "<20190219230414.25627-1-anirudh.venkataramanan@intel.com>",
        "References": "<20190219230414.25627-1-anirudh.venkataramanan@intel.com>",
        "Subject": "[Intel-wired-lan] [PATCH S15 01/14] ice: Fix for adaptive interrupt\n\tmoderation",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "commit 63f545ed1285 (\"ice: Add support for adaptive interrupt moderation\")\nwas meant to add support for adaptive interrupt moderation but there was\nan error on my part while formatting the patch, and thus only part of the\npatch ended up being submitted.\n\nThis patch rectifies the error by adding the rest of the code.\n\nFixes: 63f545ed1285 (\"ice: Add support for adaptive interrupt moderation\")\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice.h      |   1 +\n drivers/net/ethernet/intel/ice/ice_txrx.c | 292 +++++++++++++++++++++++++++---\n drivers/net/ethernet/intel/ice/ice_txrx.h |   6 +\n 3 files changed, 275 insertions(+), 24 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex 8b659ac3be94..933297e81fb4 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -309,6 +309,7 @@ struct ice_q_vector {\n \t * value to the device\n \t */\n \tu8 intrl;\n+\tu8 itr_countdown;\t/* when 0 should adjust adaptive ITR */\n } ____cacheline_internodealigned_in_smp;\n \n enum ice_pf_flags {\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c\nindex 2f5981dbdff9..549bcaa1c6d0 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.c\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c\n@@ -1097,18 +1097,257 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)\n \treturn failure ? budget : (int)total_rx_pkts;\n }\n \n+static unsigned int ice_itr_divisor(struct ice_port_info *pi)\n+{\n+\tswitch (pi->phy.link_info.link_speed) {\n+\tcase ICE_AQ_LINK_SPEED_40GB:\n+\t\treturn ICE_ITR_ADAPTIVE_MIN_INC * 1024;\n+\tcase ICE_AQ_LINK_SPEED_25GB:\n+\tcase ICE_AQ_LINK_SPEED_20GB:\n+\t\treturn ICE_ITR_ADAPTIVE_MIN_INC * 512;\n+\tcase ICE_AQ_LINK_SPEED_100MB:\n+\t\treturn ICE_ITR_ADAPTIVE_MIN_INC * 32;\n+\tdefault:\n+\t\treturn ICE_ITR_ADAPTIVE_MIN_INC * 256;\n+\t}\n+}\n+\n+/**\n+ * ice_update_itr - update the adaptive ITR value based on statistics\n+ * @q_vector: structure containing interrupt and ring information\n+ * @rc: structure containing ring performance data\n+ *\n+ * Stores a new ITR value based on packets and byte\n+ * counts during the last interrupt.  The advantage of per interrupt\n+ * computation is faster updates and more accurate ITR for the current\n+ * traffic pattern.  Constants in this function were computed\n+ * based on theoretical maximum wire speed and thresholds were set based\n+ * on testing data as well as attempting to minimize response time\n+ * while increasing bulk throughput.\n+ */\n+static void\n+ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)\n+{\n+\tunsigned int avg_wire_size, packets, bytes, itr;\n+\tunsigned long next_update = jiffies;\n+\tbool container_is_rx;\n+\n+\tif (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))\n+\t\treturn;\n+\n+\t/* If itr_countdown is set it means we programmed an ITR within\n+\t * the last 4 interrupt cycles. This has a side effect of us\n+\t * potentially firing an early interrupt. In order to work around\n+\t * this we need to throw out any data received for a few\n+\t * interrupts following the update.\n+\t */\n+\tif (q_vector->itr_countdown) {\n+\t\titr = rc->target_itr;\n+\t\tgoto clear_counts;\n+\t}\n+\n+\tcontainer_is_rx = (&q_vector->rx == rc);\n+\t/* For Rx we want to push the delay up and default to low latency.\n+\t * for Tx we want to pull the delay down and default to high latency.\n+\t */\n+\titr = container_is_rx ?\n+\t\tICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :\n+\t\tICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;\n+\n+\t/* If we didn't update within up to 1 - 2 jiffies we can assume\n+\t * that either packets are coming in so slow there hasn't been\n+\t * any work, or that there is so much work that NAPI is dealing\n+\t * with interrupt moderation and we don't need to do anything.\n+\t */\n+\tif (time_after(next_update, rc->next_update))\n+\t\tgoto clear_counts;\n+\n+\tpackets = rc->total_pkts;\n+\tbytes = rc->total_bytes;\n+\n+\tif (container_is_rx) {\n+\t\t/* If Rx there are 1 to 4 packets and bytes are less than\n+\t\t * 9000 assume insufficient data to use bulk rate limiting\n+\t\t * approach unless Tx is already in bulk rate limiting. We\n+\t\t * are likely latency driven.\n+\t\t */\n+\t\tif (packets && packets < 4 && bytes < 9000 &&\n+\t\t    (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {\n+\t\t\titr = ICE_ITR_ADAPTIVE_LATENCY;\n+\t\t\tgoto adjust_by_size;\n+\t\t}\n+\t} else if (packets < 4) {\n+\t\t/* If we have Tx and Rx ITR maxed and Tx ITR is running in\n+\t\t * bulk mode and we are receiving 4 or fewer packets just\n+\t\t * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so\n+\t\t * that the Rx can relax.\n+\t\t */\n+\t\tif (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&\n+\t\t    (q_vector->rx.target_itr & ICE_ITR_MASK) ==\n+\t\t    ICE_ITR_ADAPTIVE_MAX_USECS)\n+\t\t\tgoto clear_counts;\n+\t} else if (packets > 32) {\n+\t\t/* If we have processed over 32 packets in a single interrupt\n+\t\t * for Tx assume we need to switch over to \"bulk\" mode.\n+\t\t */\n+\t\trc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;\n+\t}\n+\n+\t/* We have no packets to actually measure against. This means\n+\t * either one of the other queues on this vector is active or\n+\t * we are a Tx queue doing TSO with too high of an interrupt rate.\n+\t *\n+\t * Between 4 and 56 we can assume that our current interrupt delay\n+\t * is only slightly too low. As such we should increase it by a small\n+\t * fixed amount.\n+\t */\n+\tif (packets < 56) {\n+\t\titr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;\n+\t\tif ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {\n+\t\t\titr &= ICE_ITR_ADAPTIVE_LATENCY;\n+\t\t\titr += ICE_ITR_ADAPTIVE_MAX_USECS;\n+\t\t}\n+\t\tgoto clear_counts;\n+\t}\n+\n+\tif (packets <= 256) {\n+\t\titr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);\n+\t\titr &= ICE_ITR_MASK;\n+\n+\t\t/* Between 56 and 112 is our \"goldilocks\" zone where we are\n+\t\t * working out \"just right\". Just report that our current\n+\t\t * ITR is good for us.\n+\t\t */\n+\t\tif (packets <= 112)\n+\t\t\tgoto clear_counts;\n+\n+\t\t/* If packet count is 128 or greater we are likely looking\n+\t\t * at a slight overrun of the delay we want. Try halving\n+\t\t * our delay to see if that will cut the number of packets\n+\t\t * in half per interrupt.\n+\t\t */\n+\t\titr >>= 1;\n+\t\titr &= ICE_ITR_MASK;\n+\t\tif (itr < ICE_ITR_ADAPTIVE_MIN_USECS)\n+\t\t\titr = ICE_ITR_ADAPTIVE_MIN_USECS;\n+\n+\t\tgoto clear_counts;\n+\t}\n+\n+\t/* The paths below assume we are dealing with a bulk ITR since\n+\t * number of packets is greater than 256. We are just going to have\n+\t * to compute a value and try to bring the count under control,\n+\t * though for smaller packet sizes there isn't much we can do as\n+\t * NAPI polling will likely be kicking in sooner rather than later.\n+\t */\n+\titr = ICE_ITR_ADAPTIVE_BULK;\n+\n+adjust_by_size:\n+\t/* If packet counts are 256 or greater we can assume we have a gross\n+\t * overestimation of what the rate should be. Instead of trying to fine\n+\t * tune it just use the formula below to try and dial in an exact value\n+\t * gives the current packet size of the frame.\n+\t */\n+\tavg_wire_size = bytes / packets;\n+\n+\t/* The following is a crude approximation of:\n+\t *  wmem_default / (size + overhead) = desired_pkts_per_int\n+\t *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate\n+\t *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value\n+\t *\n+\t * Assuming wmem_default is 212992 and overhead is 640 bytes per\n+\t * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the\n+\t * formula down to\n+\t *\n+\t *  (170 * (size + 24)) / (size + 640) = ITR\n+\t *\n+\t * We first do some math on the packet size and then finally bitshift\n+\t * by 8 after rounding up. We also have to account for PCIe link speed\n+\t * difference as ITR scales based on this.\n+\t */\n+\tif (avg_wire_size <= 60) {\n+\t\t/* Start at 250k ints/sec */\n+\t\tavg_wire_size = 4096;\n+\t} else if (avg_wire_size <= 380) {\n+\t\t/* 250K ints/sec to 60K ints/sec */\n+\t\tavg_wire_size *= 40;\n+\t\tavg_wire_size += 1696;\n+\t} else if (avg_wire_size <= 1084) {\n+\t\t/* 60K ints/sec to 36K ints/sec */\n+\t\tavg_wire_size *= 15;\n+\t\tavg_wire_size += 11452;\n+\t} else if (avg_wire_size <= 1980) {\n+\t\t/* 36K ints/sec to 30K ints/sec */\n+\t\tavg_wire_size *= 5;\n+\t\tavg_wire_size += 22420;\n+\t} else {\n+\t\t/* plateau at a limit of 30K ints/sec */\n+\t\tavg_wire_size = 32256;\n+\t}\n+\n+\t/* If we are in low latency mode halve our delay which doubles the\n+\t * rate to somewhere between 100K to 16K ints/sec\n+\t */\n+\tif (itr & ICE_ITR_ADAPTIVE_LATENCY)\n+\t\tavg_wire_size >>= 1;\n+\n+\t/* Resultant value is 256 times larger than it needs to be. This\n+\t * gives us room to adjust the value as needed to either increase\n+\t * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.\n+\t *\n+\t * Use addition as we have already recorded the new latency flag\n+\t * for the ITR value.\n+\t */\n+\titr += DIV_ROUND_UP(avg_wire_size,\n+\t\t\t    ice_itr_divisor(q_vector->vsi->port_info)) *\n+\t       ICE_ITR_ADAPTIVE_MIN_INC;\n+\n+\tif ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {\n+\t\titr &= ICE_ITR_ADAPTIVE_LATENCY;\n+\t\titr += ICE_ITR_ADAPTIVE_MAX_USECS;\n+\t}\n+\n+clear_counts:\n+\t/* write back value */\n+\trc->target_itr = itr;\n+\n+\t/* next update should occur within next jiffy */\n+\trc->next_update = next_update + 1;\n+\n+\trc->total_bytes = 0;\n+\trc->total_pkts = 0;\n+}\n+\n /**\n  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register\n  * @itr_idx: interrupt throttling index\n- * @reg_itr: interrupt throttling value adjusted based on ITR granularity\n+ * @itr: interrupt throttling value in usecs\n  */\n-static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)\n+static u32 ice_buildreg_itr(int itr_idx, u16 itr)\n {\n+\t/* The itr value is reported in microseconds, and the register value is\n+\t * recorded in 2 microsecond units. For this reason we only need to\n+\t * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this\n+\t * granularity as a shift instead of division. The mask makes sure the\n+\t * ITR value is never odd so we don't accidentally write into the field\n+\t * prior to the ITR field.\n+\t */\n+\titr &= ICE_ITR_MASK;\n+\n \treturn GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |\n \t\t(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |\n-\t\t(reg_itr << GLINT_DYN_CTL_INTERVAL_S);\n+\t\t(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));\n }\n \n+/* The act of updating the ITR will cause it to immediately trigger. In order\n+ * to prevent this from throwing off adaptive update statistics we defer the\n+ * update so that it can only happen so often. So after either Tx or Rx are\n+ * updated we make the adaptive scheme wait until either the ITR completely\n+ * expires via the next_update expiration or we have been through at least\n+ * 3 interrupts.\n+ */\n+#define ITR_COUNTDOWN_START 3\n+\n /**\n  * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt\n  * @vsi: the VSI associated with the q_vector\n@@ -1117,10 +1356,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)\n static void\n ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)\n {\n-\tstruct ice_hw *hw = &vsi->back->hw;\n-\tstruct ice_ring_container *rc;\n+\tstruct ice_ring_container *tx = &q_vector->tx;\n+\tstruct ice_ring_container *rx = &q_vector->rx;\n \tu32 itr_val;\n \n+\t/* This will do nothing if dynamic updates are not enabled */\n+\tice_update_itr(q_vector, tx);\n+\tice_update_itr(q_vector, rx);\n+\n \t/* This block of logic allows us to get away with only updating\n \t * one ITR value with each interrupt. The idea is to perform a\n \t * pseudo-lazy update with the following criteria.\n@@ -1129,35 +1372,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)\n \t * 2. If we must reduce an ITR that is given highest priority.\n \t * 3. We then give priority to increasing ITR based on amount.\n \t */\n-\tif (q_vector->rx.target_itr < q_vector->rx.current_itr) {\n-\t\trc = &q_vector->rx;\n+\tif (rx->target_itr < rx->current_itr) {\n \t\t/* Rx ITR needs to be reduced, this is highest priority */\n-\t\titr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);\n-\t\trc->current_itr = rc->target_itr;\n-\t} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||\n-\t\t   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <\n-\t\t    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {\n-\t\trc = &q_vector->tx;\n+\t\titr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);\n+\t\trx->current_itr = rx->target_itr;\n+\t\tq_vector->itr_countdown = ITR_COUNTDOWN_START;\n+\t} else if ((tx->target_itr < tx->current_itr) ||\n+\t\t   ((rx->target_itr - rx->current_itr) <\n+\t\t    (tx->target_itr - tx->current_itr))) {\n \t\t/* Tx ITR needs to be reduced, this is second priority\n \t\t * Tx ITR needs to be increased more than Rx, fourth priority\n \t\t */\n-\t\titr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);\n-\t\trc->current_itr = rc->target_itr;\n-\t} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {\n-\t\trc = &q_vector->rx;\n+\t\titr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);\n+\t\ttx->current_itr = tx->target_itr;\n+\t\tq_vector->itr_countdown = ITR_COUNTDOWN_START;\n+\t} else if (rx->current_itr != rx->target_itr) {\n \t\t/* Rx ITR needs to be increased, third priority */\n-\t\titr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);\n-\t\trc->current_itr = rc->target_itr;\n+\t\titr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);\n+\t\trx->current_itr = rx->target_itr;\n+\t\tq_vector->itr_countdown = ITR_COUNTDOWN_START;\n \t} else {\n \t\t/* Still have to re-enable the interrupts */\n \t\titr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);\n+\t\tif (q_vector->itr_countdown)\n+\t\t\tq_vector->itr_countdown--;\n \t}\n \n-\tif (!test_bit(__ICE_DOWN, vsi->state)) {\n-\t\tint vector = vsi->hw_base_vector + q_vector->v_idx;\n-\n-\t\twr32(hw, GLINT_DYN_CTL(vector), itr_val);\n-\t}\n+\tif (!test_bit(__ICE_DOWN, vsi->state))\n+\t\twr32(&vsi->back->hw,\n+\t\t     GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx),\n+\t\t     itr_val);\n }\n \n /**\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex bd446ed423e5..69625857c482 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -133,6 +133,12 @@ enum ice_rx_dtype {\n #define ICE_ITR_MASK\t\t0x1FFE\t/* ITR register value alignment mask */\n #define ITR_REG_ALIGN(setting)\t__ALIGN_MASK(setting, ~ICE_ITR_MASK)\n \n+#define ICE_ITR_ADAPTIVE_MIN_INC\t0x0002\n+#define ICE_ITR_ADAPTIVE_MIN_USECS\t0x0002\n+#define ICE_ITR_ADAPTIVE_MAX_USECS\t0x00FA\n+#define ICE_ITR_ADAPTIVE_LATENCY\t0x8000\n+#define ICE_ITR_ADAPTIVE_BULK\t\t0x0000\n+\n #define ICE_DFLT_INTRL\t0\n \n /* Legacy or Advanced Mode Queue */\n",
    "prefixes": [
        "S15",
        "01/14"
    ]
}