get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/818356/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 818356,
    "url": "http://patchwork.ozlabs.org/api/patches/818356/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170925215225.15616.63705.stgit@localhost.localdomain/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20170925215225.15616.63705.stgit@localhost.localdomain>",
    "list_archive_url": null,
    "date": "2017-09-25T21:55:36",
    "name": "[jkirsher/next-queue] ixgbe: Update adaptive ITR algorithm",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "3b5aed99b4e5842a5ea8fee8077e5f4121c62772",
    "submitter": {
        "id": 252,
        "url": "http://patchwork.ozlabs.org/api/people/252/?format=api",
        "name": "Alexander Duyck",
        "email": "alexander.duyck@gmail.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20170925215225.15616.63705.stgit@localhost.localdomain/mbox/",
    "series": [
        {
            "id": 5016,
            "url": "http://patchwork.ozlabs.org/api/series/5016/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=5016",
            "date": "2017-09-25T21:55:36",
            "name": "[jkirsher/next-queue] ixgbe: Update adaptive ITR algorithm",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/5016/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/818356/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/818356/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.136; helo=silver.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n\tunprotected) header.d=gmail.com header.i=@gmail.com\n\theader.b=\"WVXMkNVe\"; dkim-atps=neutral"
        ],
        "Received": [
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3y1Hvj3FK9z9sDB\n\tfor <incoming@patchwork.ozlabs.org>;\n\tTue, 26 Sep 2017 07:55:45 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 0381A2F6D8;\n\tMon, 25 Sep 2017 21:55:44 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id mTKqfqMj4Tye; Mon, 25 Sep 2017 21:55:42 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id 321692F6C5;\n\tMon, 25 Sep 2017 21:55:42 +0000 (UTC)",
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 826EE1BFF95\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 25 Sep 2017 21:55:40 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 72B4D8791D\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 25 Sep 2017 21:55:40 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id giK2MeKkc5Uy for <intel-wired-lan@lists.osuosl.org>;\n\tMon, 25 Sep 2017 21:55:39 +0000 (UTC)",
            "from mail-pg0-f44.google.com (mail-pg0-f44.google.com\n\t[74.125.83.44])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id 39928878ED\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 25 Sep 2017 21:55:39 +0000 (UTC)",
            "by mail-pg0-f44.google.com with SMTP id i195so4770976pgd.9\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 25 Sep 2017 14:55:39 -0700 (PDT)",
            "from localhost.localdomain ([2001:470:b:9c3:9e5c:8eff:fe4f:f2d0])\n\tby smtp.gmail.com with ESMTPSA id\n\tf9sm14187300pfe.150.2017.09.25.14.55.37\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tMon, 25 Sep 2017 14:55:37 -0700 (PDT)"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n\th=subject:from:to:cc:date:message-id:user-agent:mime-version\n\t:content-transfer-encoding;\n\tbh=w4oQ193UZ3V7izqrm4X7myqvZMBbsKiX5MshE3HyXRY=;\n\tb=WVXMkNVeqdNPWa7UzflcCVDs/UNjGCjSAe0MAwGsX/714nOC6TJYkIjAyAYPxL+AJ/\n\tByqNXwh/bF+nJ2MEa30O7gydLWbZZnElQOQvlghm5RMRuSv3lmiXEeL+r+XtjYfs8VYl\n\tlmKuyttQf/vvr0hlGDKrBhc79ohRR4NZxf17Z3lGqll9QvHC2bkRA2PhFJvlLzo0ORyw\n\t9sDUGctJyf4DZJ5lYTNsSjER2g8vNTNB43snWB0VolyhHLc92oqf+iHQ7VXvXt75dDJ5\n\tNloI5LmTo7CIKe4T82lytSJWvmQqbztGO/y/ht68YdED3dMs3+wMzJiLJgjNrRMUVnUZ\n\tImwg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:subject:from:to:cc:date:message-id:user-agent\n\t:mime-version:content-transfer-encoding;\n\tbh=w4oQ193UZ3V7izqrm4X7myqvZMBbsKiX5MshE3HyXRY=;\n\tb=uc45ffpItYsrCJFekK+juvl7CMfNy4SZJ3KW8CsiLGlhz9BtjNDYqQX0c6jHsDOYDc\n\tkxqcCO+dp34ukndaNsRwwnhA4wAzUYHDfjKdW2kbAAk7Kf6KB2qAaQ1uK9HjkKnL1nHH\n\tkyU0bf0uYzoxZ0GxZLWXAUWEs5WGGfEVQ+5G9oJ6oANKvFD6G9f19jGNnUaauHgK7Z/i\n\txzWdChWG6QEk5+m/pJRG2OCNHgZLhh9H1HSKbYZHZcbEN1QhW/bTQXSZs4hCXvNI4xjK\n\tLVzzLui9qzov69I7PZreBKgKhuIlk/INrpijF70xi6wx8//RbGH6juKHaPgjzZM5voww\n\tfXXA==",
        "X-Gm-Message-State": "AHPjjUjf8AxJsG2KDhCdffWGHQ9o+jQSx3UWUSCZHfwbSxKlNOGNsYV8\n\t8tH0hezrEX+CFC+bMiJjDJE=",
        "X-Google-Smtp-Source": "AOwi7QDIhSQnqYkAM64fgRkVdW6bD6ecSkKxtriDwpGFpelI6oYgIrX7a7dvDFBZ+B9G1XdHnSImdw==",
        "X-Received": "by 10.84.238.135 with SMTP id v7mr8936666plk.276.1506376538695; \n\tMon, 25 Sep 2017 14:55:38 -0700 (PDT)",
        "From": "Alexander Duyck <alexander.duyck@gmail.com>",
        "To": "netdev@vger.kernel.org, intel-wired-lan@lists.osuosl.org",
        "Date": "Mon, 25 Sep 2017 14:55:36 -0700",
        "Message-ID": "<20170925215225.15616.63705.stgit@localhost.localdomain>",
        "User-Agent": "StGit/0.17.1-dirty",
        "MIME-Version": "1.0",
        "Cc": "brouer@redhat.com",
        "Subject": "[Intel-wired-lan] [jkirsher/next-queue PATCH] ixgbe: Update\n\tadaptive ITR algorithm",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.18-1",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Alexander Duyck <alexander.h.duyck@intel.com>\n\nThe following change is meant to update the adaptive ITR algorithm to\nbetter support the needs of the network. Specifically with this change what\nI have done is make it so that our ITR algorithm will try to prevent either\nstarving a socket buffer for memory in the case of Tx, or overruing an Rx\nsocket buffer on receive.\n\nIn addition a side effect of the calculations used is that we should\nfunction better with new features such as XDP which can handle small\npackets at high rates without needing to lock us into NAPI polling mode.\n\nSigned-off-by: Alexander Duyck <alexander.h.duyck@intel.com>\n---\n\nSo I am putting this out to a wider distribution list than normal for a\npatch like this in order to get feedback on if there are any areas I may\nhave overlooked. With this patch is should address many of the performance\nlimitations seen with pktgen and XDP in terms of workloads that the old\nadaptive scheme wasn't handling.\n\n drivers/net/ethernet/intel/ixgbe/ixgbe.h      |    7 +\n drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c  |   11 +\n drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |  215 +++++++++++++++++++------\n 3 files changed, 178 insertions(+), 55 deletions(-)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h\nindex 56039d04b38d..555eb80d8a08 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h\n@@ -435,8 +435,15 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)\n }\n #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))\n \n+#define IXGBE_ITR_ADAPTIVE_MIN_INC\t2\n+#define IXGBE_ITR_ADAPTIVE_MIN_USECS\t10\n+#define IXGBE_ITR_ADAPTIVE_MAX_USECS\t126\n+#define IXGBE_ITR_ADAPTIVE_LATENCY\t0x80\n+#define IXGBE_ITR_ADAPTIVE_BULK\t\t0x00\n+\n struct ixgbe_ring_container {\n \tstruct ixgbe_ring *ring;\t/* pointer to linked list of rings */\n+\tunsigned long next_update;\t/* jiffies value of last update */\n \tunsigned int total_bytes;\t/* total bytes processed this int */\n \tunsigned int total_packets;\t/* total packets processed this int */\n \tu16 work_limit;\t\t\t/* total work allowed per interrupt */\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c\nindex f1bfae0c41d0..8e2a957aca18 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c\n@@ -806,6 +806,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,\n \tring->next = head->ring;\n \thead->ring = ring;\n \thead->count++;\n+\thead->next_update = jiffies + 1;\n }\n \n /**\n@@ -879,8 +880,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,\n \t/* initialize work limits */\n \tq_vector->tx.work_limit = adapter->tx_work_limit;\n \n-\t/* initialize pointer to rings */\n-\tring = q_vector->ring;\n+\t/* Initialize setting for adaptive ITR */\n+\tq_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |\n+\t\t\t   IXGBE_ITR_ADAPTIVE_LATENCY;\n+\tq_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |\n+\t\t\t   IXGBE_ITR_ADAPTIVE_LATENCY;\n \n \t/* intialize ITR */\n \tif (txr_count && !rxr_count) {\n@@ -897,6 +901,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,\n \t\t\tq_vector->itr = adapter->rx_itr_setting;\n \t}\n \n+\t/* initialize pointer to rings */\n+\tring = q_vector->ring;\n+\n \twhile (txr_count) {\n \t\t/* assign generic ring traits */\n \t\tring->dev = &adapter->pdev->dev;\ndiff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\nindex 3d3739f103af..44a96878075b 100644\n--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c\n@@ -2517,50 +2517,174 @@ enum latency_range {\n static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,\n \t\t\t     struct ixgbe_ring_container *ring_container)\n {\n-\tint bytes = ring_container->total_bytes;\n-\tint packets = ring_container->total_packets;\n-\tu32 timepassed_us;\n-\tu64 bytes_perint;\n-\tu8 itr_setting = ring_container->itr;\n+\tunsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |\n+\t\t\t   IXGBE_ITR_ADAPTIVE_LATENCY;\n+\tunsigned int avg_wire_size, packets, bytes;\n+\tunsigned long next_update = jiffies;\n \n-\tif (packets == 0)\n+\t/* If we don't have any rings just leave ourselves set for maximum\n+\t * possible latency so we take ourselves out of the equation.\n+\t */\n+\tif (!ring_container->ring)\n \t\treturn;\n \n-\t/* simple throttlerate management\n-\t *   0-10MB/s   lowest (100000 ints/s)\n-\t *  10-20MB/s   low    (20000 ints/s)\n-\t *  20-1249MB/s bulk   (12000 ints/s)\n+\t/* If we didn't update within up to 1 - 2 jiffies we can assume\n+\t * that either packets are coming in so slow there hasn't been\n+\t * any work, or that there is so much work that NAPI is dealing\n+\t * with interrupt moderation and we don't need to do anything.\n \t */\n-\t/* what was last interrupt timeslice? */\n-\ttimepassed_us = q_vector->itr >> 2;\n-\tif (timepassed_us == 0)\n-\t\treturn;\n+\tif (time_after(next_update, ring_container->next_update))\n+\t\tgoto clear_counts;\n \n-\tbytes_perint = bytes / timepassed_us; /* bytes/usec */\n+\tpackets = ring_container->total_packets;\n \n-\tswitch (itr_setting) {\n-\tcase lowest_latency:\n-\t\tif (bytes_perint > 10)\n-\t\t\titr_setting = low_latency;\n-\t\tbreak;\n-\tcase low_latency:\n-\t\tif (bytes_perint > 20)\n-\t\t\titr_setting = bulk_latency;\n-\t\telse if (bytes_perint <= 10)\n-\t\t\titr_setting = lowest_latency;\n+\t/* We have no packets to actually measure against. This means\n+\t * either one of the other queues on this vector is active or\n+\t * we are a Tx queue doing TSO with too high of an interrupt rate.\n+\t *\n+\t * When this occurs just tick up our delay by the minimum value\n+\t * and hope that this extra delay will prevent us from being called\n+\t * without any work on our queue.\n+\t */\n+\tif (!packets) {\n+\t\titr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;\n+\t\tif (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)\n+\t\t\titr = IXGBE_ITR_ADAPTIVE_MAX_USECS;\n+\t\titr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;\n+\t\tgoto clear_counts;\n+\t}\n+\n+\tbytes = ring_container->total_bytes;\n+\n+\t/* If packets are less than 4 or bytes are less than 9000 assume\n+\t * insufficient data to use bulk rate limiting approach. We are\n+\t * likely latency driven.\n+\t */\n+\tif (packets < 4 && bytes < 9000) {\n+\t\titr = IXGBE_ITR_ADAPTIVE_LATENCY;\n+\t\tgoto adjust_by_size;\n+\t}\n+\n+\t/* Between 4 and 48 we can assume that our current interrupt delay\n+\t * is only slightly too low. As such we should increase it by a small\n+\t * fixed amount.\n+\t */\n+\tif (packets < 48) {\n+\t\titr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;\n+\t\tif (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)\n+\t\t\titr = IXGBE_ITR_ADAPTIVE_MAX_USECS;\n+\t\tgoto clear_counts;\n+\t}\n+\n+\t/* Between 48 and 96 is our \"goldilocks\" zone where we are working\n+\t * out \"just right\". Just report that our current ITR is good for us.\n+\t */\n+\tif (packets < 96) {\n+\t\titr = q_vector->itr >> 2;\n+\t\tgoto clear_counts;\n+\t}\n+\n+\t/* If packet count is 96 or greater we are likely looking at a slight\n+\t * overrun of the delay we want. Try halving our delay to see if that\n+\t * will cut the number of packets in half per interrupt.\n+\t */\n+\tif (packets < 256) {\n+\t\titr = q_vector->itr >> 3;\n+\t\tif (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)\n+\t\t\titr = IXGBE_ITR_ADAPTIVE_MIN_USECS;\n+\t\tgoto clear_counts;\n+\t}\n+\n+\t/* The paths below assume we are dealing with a bulk ITR since number\n+\t * of packets is 256 or greater. We are just going to have to compute\n+\t * a value and try to bring the count under control, though for smaller\n+\t * packet sizes there isn't much we can do as NAPI polling will likely\n+\t * be kicking in sooner rather than later.\n+\t */\n+\titr = IXGBE_ITR_ADAPTIVE_BULK;\n+\n+adjust_by_size:\n+\t/* If packet counts are 256 or greater we can assume we have a gross\n+\t * overestimation of what the rate should be. Instead of trying to fine\n+\t * tune it just use the formula below to try and dial in an exact value\n+\t * give the current packet size of the frame.\n+\t */\n+\tavg_wire_size = bytes / packets;\n+\n+\t/* The following is a crude approximation of:\n+\t *  wmem_default / (size + overhead) = desired_pkts_per_int\n+\t *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate\n+\t *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value\n+\t *\n+\t * Assuming wmem_default is 212992 and overhead is 640 bytes per\n+\t * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the\n+\t * formula down to\n+\t *\n+\t *  (170 * (size + 24)) / (size + 640) = ITR\n+\t *\n+\t * We first do some math on the packet size and then finally bitshift\n+\t * by 8 after rounding up. We also have to account for PCIe link speed\n+\t * difference as ITR scales based on this.\n+\t */\n+\tif (avg_wire_size <= 60) {\n+\t\t/* Start at 50k ints/sec */\n+\t\tavg_wire_size = 5120;\n+\t} else if (avg_wire_size <= 316) {\n+\t\t/* 50K ints/sec to 16K ints/sec */\n+\t\tavg_wire_size *= 40;\n+\t\tavg_wire_size += 2720;\n+\t} else if (avg_wire_size <= 1084) {\n+\t\t/* 16K ints/sec to 9.2K ints/sec */\n+\t\tavg_wire_size *= 15;\n+\t\tavg_wire_size += 11452;\n+\t} else if (avg_wire_size <= 1980) {\n+\t\t/* 9.2K ints/sec to 8K ints/sec */\n+\t\tavg_wire_size *= 5;\n+\t\tavg_wire_size += 22420;\n+\t} else {\n+\t\t/* plateau at a limit of 8K ints/sec */\n+\t\tavg_wire_size = 32256;\n+\t}\n+\n+\t/* If we are in low latency mode half our delay which doubles the rate\n+\t * to somewhere between 100K to 16K ints/sec\n+\t */\n+\tif (itr & IXGBE_ITR_ADAPTIVE_LATENCY)\n+\t\tavg_wire_size >>= 1;\n+\n+\t/* Resultant value is 256 times larger than it needs to be. This\n+\t * gives us room to adjust the value as needed to either increase\n+\t * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.\n+\t *\n+\t * Use addition as we have already recorded the new latency flag\n+\t * for the ITR value.\n+\t */\n+\tswitch (q_vector->adapter->link_speed) {\n+\tcase IXGBE_LINK_SPEED_10GB_FULL:\n+\tcase IXGBE_LINK_SPEED_100_FULL:\n+\tdefault:\n+\t\titr += DIV_ROUND_UP(avg_wire_size,\n+\t\t\t\t    IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *\n+\t\t       IXGBE_ITR_ADAPTIVE_MIN_INC;\n \t\tbreak;\n-\tcase bulk_latency:\n-\t\tif (bytes_perint <= 20)\n-\t\t\titr_setting = low_latency;\n+\tcase IXGBE_LINK_SPEED_2_5GB_FULL:\n+\tcase IXGBE_LINK_SPEED_1GB_FULL:\n+\tcase IXGBE_LINK_SPEED_10_FULL:\n+\t\titr += DIV_ROUND_UP(avg_wire_size,\n+\t\t\t\t    IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *\n+\t\t       IXGBE_ITR_ADAPTIVE_MIN_INC;\n \t\tbreak;\n \t}\n \n-\t/* clear work counters since we have the values we need */\n+clear_counts:\n+\t/* write back value */\n+\tring_container->itr = itr;\n+\n+\t/* next update should occur within next jiffy */\n+\tring_container->next_update = next_update + 1;\n+\n \tring_container->total_bytes = 0;\n \tring_container->total_packets = 0;\n-\n-\t/* write updated itr to ring container */\n-\tring_container->itr = itr_setting;\n }\n \n /**\n@@ -2602,34 +2726,19 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)\n \n static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)\n {\n-\tu32 new_itr = q_vector->itr;\n-\tu8 current_itr;\n+\tu32 new_itr;\n \n \tixgbe_update_itr(q_vector, &q_vector->tx);\n \tixgbe_update_itr(q_vector, &q_vector->rx);\n \n-\tcurrent_itr = max(q_vector->rx.itr, q_vector->tx.itr);\n+\t/* use the smallest value of new ITR delay calculations */\n+\tnew_itr = min(q_vector->rx.itr, q_vector->tx.itr);\n \n-\tswitch (current_itr) {\n-\t/* counts and packets in update_itr are dependent on these numbers */\n-\tcase lowest_latency:\n-\t\tnew_itr = IXGBE_100K_ITR;\n-\t\tbreak;\n-\tcase low_latency:\n-\t\tnew_itr = IXGBE_20K_ITR;\n-\t\tbreak;\n-\tcase bulk_latency:\n-\t\tnew_itr = IXGBE_12K_ITR;\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n+\t/* Clear latency flag if set, shift into correct position */\n+\tnew_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;\n+\tnew_itr <<= 2;\n \n \tif (new_itr != q_vector->itr) {\n-\t\t/* do an exponential smoothing */\n-\t\tnew_itr = (10 * new_itr * q_vector->itr) /\n-\t\t\t  ((9 * new_itr) + q_vector->itr);\n-\n \t\t/* save the algorithm value here */\n \t\tq_vector->itr = new_itr;\n \n",
    "prefixes": [
        "jkirsher/next-queue"
    ]
}