get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/887909/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 887909,
    "url": "http://patchwork.ozlabs.org/api/patches/887909/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180319215644.31978-7-jeffrey.t.kirsher@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20180319215644.31978-7-jeffrey.t.kirsher@intel.com>",
    "list_archive_url": null,
    "date": "2018-03-19T21:56:36",
    "name": "[v3,07/15] ice: Add support for VSI allocation and deallocation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "e8a2df0b312bd920e1f95e582691ebfcac15a356",
    "submitter": {
        "id": 473,
        "url": "http://patchwork.ozlabs.org/api/people/473/?format=api",
        "name": "Kirsher, Jeffrey T",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20180319215644.31978-7-jeffrey.t.kirsher@intel.com/mbox/",
    "series": [
        {
            "id": 34702,
            "url": "http://patchwork.ozlabs.org/api/series/34702/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=34702",
            "date": "2018-03-19T21:56:30",
            "name": "[v3,01/15] ice: Add basic driver framework for Intel(R) E800 Series",
            "version": 3,
            "mbox": "http://patchwork.ozlabs.org/series/34702/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/887909/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/887909/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.137; helo=fraxinus.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=none (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 404qdl6QW2z9sV3\n\tfor <incoming@patchwork.ozlabs.org>;\n\tTue, 20 Mar 2018 08:56:27 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 3F55C86DB8;\n\tMon, 19 Mar 2018 21:56:26 +0000 (UTC)",
            "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 1OUjrET84JFy; Mon, 19 Mar 2018 21:56:19 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 1E0108678B;\n\tMon, 19 Mar 2018 21:56:19 +0000 (UTC)",
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 0EE0C1C2272\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:17 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 0B69988ECF\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:17 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id pDsXyyvVN17m for <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:09 +0000 (UTC)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id DE56D88F51\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tMon, 19 Mar 2018 21:56:08 +0000 (UTC)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n\tby orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t19 Mar 2018 14:56:07 -0700",
            "from jtkirshe-nuc.jf.intel.com ([134.134.177.59])\n\tby orsmga008.jf.intel.com with ESMTP; 19 Mar 2018 14:56:07 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.48,332,1517904000\"; d=\"scan'208\";a=\"26667084\"",
        "From": "Jeff Kirsher <jeffrey.t.kirsher@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Mon, 19 Mar 2018 14:56:36 -0700",
        "Message-Id": "<20180319215644.31978-7-jeffrey.t.kirsher@intel.com>",
        "X-Mailer": "git-send-email 2.14.3",
        "In-Reply-To": "<20180319215644.31978-1-jeffrey.t.kirsher@intel.com>",
        "References": "<20180319215644.31978-1-jeffrey.t.kirsher@intel.com>",
        "Subject": "[Intel-wired-lan] [PATCH v3 07/15] ice: Add support for VSI\n\tallocation and deallocation",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.24",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\n\nThis patch introduces data structures and functions to alloc/free\nVSIs. The driver represents a VSI using the ice_vsi structure.\n\nSome noteworthy points about VSI allocation:\n\n1) A VSI is allocated in the firmware using the \"add VSI\" admin queue\n   command (implemented as ice_aq_add_vsi). The firmware returns an\n   identifier for the allocated VSI. The VSI context is used to program\n   certain aspects (loopback, queue map, etc.) of the VSI's configuration.\n\n2) A VSI is deleted using the \"free VSI\" admin queue command (implemented\n   as ice_aq_free_vsi).\n\n3) The driver represents a VSI using struct ice_vsi. This is allocated\n   and initialized as part of the ice_vsi_alloc flow, and deallocated\n   as part of the ice_vsi_delete flow.\n\n4) Once the VSI is created, a netdev is allocated and associated with it.\n   The VSI's ring and vector related data structures are also allocated\n   and initialized.\n\n5) A VSI's queues can either be contiguous or scattered. To do this, the\n   driver maintains a bitmap (vsi->avail_txqs) which is kept in sync with\n   the firmware's VSI queue allocation imap. If the VSI can't get a\n   contiguous queue allocation, it will fallback to scatter. This is\n   implemented in ice_vsi_get_qs which is called as part of the VSI setup\n   flow. In the release flow, the VSI's queues are released and the bitmap\n   is updated to reflect this by ice_vsi_put_qs.\n\nCC: Shannon Nelson <shannon.nelson@oracle.com>\nSigned-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>\nAcked-by: Shannon Nelson <shannon.nelson@oracle.com>\n---\n drivers/net/ethernet/intel/ice/ice.h            |   72 ++\n drivers/net/ethernet/intel/ice/ice_adminq_cmd.h |  199 ++++\n drivers/net/ethernet/intel/ice/ice_main.c       | 1111 +++++++++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_switch.c     |  115 +++\n drivers/net/ethernet/intel/ice/ice_switch.h     |   21 +\n drivers/net/ethernet/intel/ice/ice_txrx.h       |   26 +\n drivers/net/ethernet/intel/ice/ice_type.h       |    4 +\n 7 files changed, 1548 insertions(+)",
    "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h\nindex c8079c852a48..c9f59374daad 100644\n--- a/drivers/net/ethernet/intel/ice/ice.h\n+++ b/drivers/net/ethernet/intel/ice/ice.h\n@@ -25,6 +25,8 @@\n #include <linux/netdevice.h>\n #include <linux/compiler.h>\n #include <linux/etherdevice.h>\n+#include <linux/cpumask.h>\n+#include <linux/if_vlan.h>\n #include <linux/pci.h>\n #include <linux/workqueue.h>\n #include <linux/aer.h>\n@@ -32,6 +34,7 @@\n #include <linux/timer.h>\n #include <linux/delay.h>\n #include <linux/bitmap.h>\n+#include <linux/log2.h>\n #include <linux/if_bridge.h>\n #include \"ice_devids.h\"\n #include \"ice_type.h\"\n@@ -41,17 +44,43 @@\n #include \"ice_sched.h\"\n \n #define ICE_BAR0\t\t0\n+#define ICE_DFLT_NUM_DESC\t128\n+#define ICE_REQ_DESC_MULTIPLE\t32\n #define ICE_INT_NAME_STR_LEN\t(IFNAMSIZ + 16)\n #define ICE_AQ_LEN\t\t64\n #define ICE_MIN_MSIX\t\t2\n+#define ICE_NO_VSI\t\t0xffff\n #define ICE_MAX_VSI_ALLOC\t130\n #define ICE_MAX_TXQS\t\t2048\n #define ICE_MAX_RXQS\t\t2048\n+#define ICE_VSI_MAP_CONTIG\t0\n+#define ICE_VSI_MAP_SCATTER\t1\n+#define ICE_MAX_SCATTER_TXQS\t16\n+#define ICE_MAX_SCATTER_RXQS\t16\n #define ICE_RES_VALID_BIT\t0x8000\n #define ICE_RES_MISC_VEC_ID\t(ICE_RES_VALID_BIT - 1)\n+#define ICE_INVAL_Q_INDEX\t0xffff\n \n #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)\n \n+#define ICE_MAX_MTU\t(ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \\\n+\t\t\t ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)\n+\n+#define ICE_UP_TABLE_TRANSLATE(val, i) \\\n+\t\t(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \\\n+\t\t  ICE_AQ_VSI_UP_TABLE_UP##i##_M)\n+\n+struct ice_tc_info {\n+\tu16 qoffset;\n+\tu16 qcount;\n+};\n+\n+struct ice_tc_cfg {\n+\tu8 numtc; /* Total number of enabled TCs */\n+\tu8 ena_tc; /* TX map */\n+\tstruct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];\n+};\n+\n struct ice_res_tracker {\n \tu16 num_entries;\n \tu16 search_hint;\n@@ -75,8 +104,47 @@ enum ice_state {\n /* struct that defines a VSI, associated with a dev */\n struct ice_vsi {\n \tstruct net_device *netdev;\n+\tstruct ice_sw *vsw;\t\t /* switch this VSI is on */\n+\tstruct ice_pf *back;\t\t /* back pointer to PF */\n \tstruct ice_port_info *port_info; /* back pointer to port_info */\n+\tstruct ice_ring **rx_rings;\t /* rx ring array */\n+\tstruct ice_ring **tx_rings;\t /* tx ring array */\n+\tstruct ice_q_vector **q_vectors; /* q_vector array */\n+\tDECLARE_BITMAP(state, __ICE_STATE_NBITS);\n+\tint num_q_vectors;\n+\tint base_vector;\n+\tenum ice_vsi_type type;\n \tu16 vsi_num;\t\t\t /* HW (absolute) index of this VSI */\n+\tu16 idx;\t\t\t /* software index in pf->vsi[] */\n+\n+\t/* Interrupt thresholds */\n+\tu16 work_lmt;\n+\n+\tstruct ice_aqc_vsi_props info;\t /* VSI properties */\n+\n+\t/* queue information */\n+\tu8 tx_mapping_mode;\t\t /* ICE_MAP_MODE_[CONTIG|SCATTER] */\n+\tu8 rx_mapping_mode;\t\t /* ICE_MAP_MODE_[CONTIG|SCATTER] */\n+\tu16 txq_map[ICE_MAX_TXQS];\t /* index in pf->avail_txqs */\n+\tu16 rxq_map[ICE_MAX_RXQS];\t /* index in pf->avail_rxqs */\n+\tu16 alloc_txq;\t\t\t /* Allocated Tx queues */\n+\tu16 num_txq;\t\t\t /* Used Tx queues */\n+\tu16 alloc_rxq;\t\t\t /* Allocated Rx queues */\n+\tu16 num_rxq;\t\t\t /* Used Rx queues */\n+\tu16 num_desc;\n+\tstruct ice_tc_cfg tc_cfg;\n+} ____cacheline_internodealigned_in_smp;\n+\n+/* struct that defines an interrupt vector */\n+struct ice_q_vector {\n+\tstruct ice_vsi *vsi;\n+\tcpumask_t affinity_mask;\n+\tstruct napi_struct napi;\n+\tstruct ice_ring_container rx;\n+\tstruct ice_ring_container tx;\n+\tu16 v_idx;\t\t\t/* index in the vsi->q_vector array. */\n+\tu8 num_ring_tx;\t\t\t/* total number of tx rings in vector */\n+\tu8 num_ring_rx;\t\t\t/* total number of rx rings in vector */\n } ____cacheline_internodealigned_in_smp;\n \n enum ice_pf_flags {\n@@ -117,6 +185,10 @@ struct ice_pf {\n \tchar int_name[ICE_INT_NAME_STR_LEN];\n };\n \n+struct ice_netdev_priv {\n+\tstruct ice_vsi *vsi;\n+};\n+\n /**\n  * ice_irq_dynamic_ena - Enable default interrupt generation settings\n  * @hw: pointer to hw struct\ndiff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\nindex 1acd936eec49..570169c99786 100644\n--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h\n@@ -23,6 +23,7 @@\n  */\n \n #define ICE_AQC_TOPO_MAX_LEVEL_NUM\t0x9\n+#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX\t9728\n \n struct ice_aqc_generic {\n \t__le32 param0;\n@@ -204,6 +205,199 @@ struct ice_aqc_get_sw_cfg_resp {\n \tstruct ice_aqc_get_sw_cfg_resp_elem elements[1];\n };\n \n+/* Add VSI (indirect 0x0210)\n+ * Update VSI (indirect 0x0211)\n+ * Get VSI (indirect 0x0212)\n+ * Free VSI (indirect 0x0213)\n+ */\n+struct ice_aqc_add_get_update_free_vsi {\n+\t__le16 vsi_num;\n+#define ICE_AQ_VSI_NUM_S\t0\n+#define ICE_AQ_VSI_NUM_M\t(0x03FF << ICE_AQ_VSI_NUM_S)\n+#define ICE_AQ_VSI_IS_VALID\tBIT(15)\n+\t__le16 cmd_flags;\n+#define ICE_AQ_VSI_KEEP_ALLOC\t0x1\n+\tu8 vf_id;\n+\tu8 reserved;\n+\t__le16 vsi_flags;\n+#define ICE_AQ_VSI_TYPE_S\t0\n+#define ICE_AQ_VSI_TYPE_M\t(0x3 << ICE_AQ_VSI_TYPE_S)\n+#define ICE_AQ_VSI_TYPE_VF\t0x0\n+#define ICE_AQ_VSI_TYPE_VMDQ2\t0x1\n+#define ICE_AQ_VSI_TYPE_PF\t0x2\n+#define ICE_AQ_VSI_TYPE_EMP_MNG\t0x3\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+/* Response descriptor for:\n+ * Add VSI (indirect 0x0210)\n+ * Update VSI (indirect 0x0211)\n+ * Free VSI (indirect 0x0213)\n+ */\n+struct ice_aqc_add_update_free_vsi_resp {\n+\t__le16 vsi_num;\n+\t__le16 ext_status;\n+\t__le16 vsi_used;\n+\t__le16 vsi_free;\n+\t__le32 addr_high;\n+\t__le32 addr_low;\n+};\n+\n+struct ice_aqc_vsi_props {\n+\t__le16 valid_sections;\n+#define ICE_AQ_VSI_PROP_SW_VALID\t\tBIT(0)\n+#define ICE_AQ_VSI_PROP_SECURITY_VALID\t\tBIT(1)\n+#define ICE_AQ_VSI_PROP_VLAN_VALID\t\tBIT(2)\n+#define ICE_AQ_VSI_PROP_OUTER_TAG_VALID\t\tBIT(3)\n+#define ICE_AQ_VSI_PROP_INGRESS_UP_VALID\tBIT(4)\n+#define ICE_AQ_VSI_PROP_EGRESS_UP_VALID\t\tBIT(5)\n+#define ICE_AQ_VSI_PROP_RXQ_MAP_VALID\t\tBIT(6)\n+#define ICE_AQ_VSI_PROP_Q_OPT_VALID\t\tBIT(7)\n+#define ICE_AQ_VSI_PROP_OUTER_UP_VALID\t\tBIT(8)\n+#define ICE_AQ_VSI_PROP_FLOW_DIR_VALID\t\tBIT(11)\n+#define ICE_AQ_VSI_PROP_PASID_VALID\t\tBIT(12)\n+\t/* switch section */\n+\tu8 sw_id;\n+\tu8 sw_flags;\n+#define ICE_AQ_VSI_SW_FLAG_ALLOW_LB\t\tBIT(5)\n+#define ICE_AQ_VSI_SW_FLAG_LOCAL_LB\t\tBIT(6)\n+#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE\t\tBIT(7)\n+\tu8 sw_flags2;\n+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S\t0\n+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M\t\\\n+\t\t\t\t(0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)\n+#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA\tBIT(0)\n+#define ICE_AQ_VSI_SW_FLAG_LAN_ENA\t\tBIT(4)\n+\tu8 veb_stat_id;\n+#define ICE_AQ_VSI_SW_VEB_STAT_ID_S\t\t0\n+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M\t(0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)\n+#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID\t\tBIT(5)\n+\t/* security section */\n+\tu8 sec_flags;\n+#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD\tBIT(0)\n+#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF\tBIT(2)\n+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S\t4\n+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M\t(0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)\n+#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA\tBIT(0)\n+\tu8 sec_reserved;\n+\t/* VLAN section */\n+\t__le16 pvid; /* VLANS include priority bits */\n+\tu8 pvlan_reserved[2];\n+\tu8 port_vlan_flags;\n+#define ICE_AQ_VSI_PVLAN_MODE_S\t0\n+#define ICE_AQ_VSI_PVLAN_MODE_M\t(0x3 << ICE_AQ_VSI_PVLAN_MODE_S)\n+#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED\t0x1\n+#define ICE_AQ_VSI_PVLAN_MODE_TAGGED\t0x2\n+#define ICE_AQ_VSI_PVLAN_MODE_ALL\t0x3\n+#define ICE_AQ_VSI_PVLAN_INSERT_PVID\tBIT(2)\n+#define ICE_AQ_VSI_PVLAN_EMOD_S\t3\n+#define ICE_AQ_VSI_PVLAN_EMOD_M\t(0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)\n+#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH\t(0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)\n+#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP\t(0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)\n+#define ICE_AQ_VSI_PVLAN_EMOD_STR\t(0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)\n+#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING\t(0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)\n+\tu8 pvlan_reserved2[3];\n+\t/* ingress egress up sections */\n+\t__le32 ingress_table; /* bitmap, 3 bits per up */\n+#define ICE_AQ_VSI_UP_TABLE_UP0_S\t0\n+#define ICE_AQ_VSI_UP_TABLE_UP0_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)\n+#define ICE_AQ_VSI_UP_TABLE_UP1_S\t3\n+#define ICE_AQ_VSI_UP_TABLE_UP1_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)\n+#define ICE_AQ_VSI_UP_TABLE_UP2_S\t6\n+#define ICE_AQ_VSI_UP_TABLE_UP2_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)\n+#define ICE_AQ_VSI_UP_TABLE_UP3_S\t9\n+#define ICE_AQ_VSI_UP_TABLE_UP3_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)\n+#define ICE_AQ_VSI_UP_TABLE_UP4_S\t12\n+#define ICE_AQ_VSI_UP_TABLE_UP4_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)\n+#define ICE_AQ_VSI_UP_TABLE_UP5_S\t15\n+#define ICE_AQ_VSI_UP_TABLE_UP5_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)\n+#define ICE_AQ_VSI_UP_TABLE_UP6_S\t18\n+#define ICE_AQ_VSI_UP_TABLE_UP6_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)\n+#define ICE_AQ_VSI_UP_TABLE_UP7_S\t21\n+#define ICE_AQ_VSI_UP_TABLE_UP7_M\t(0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)\n+\t__le32 egress_table;   /* same defines as for ingress table */\n+\t/* outer tags section */\n+\t__le16 outer_tag;\n+\tu8 outer_tag_flags;\n+#define ICE_AQ_VSI_OUTER_TAG_MODE_S\t0\n+#define ICE_AQ_VSI_OUTER_TAG_MODE_M\t(0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)\n+#define ICE_AQ_VSI_OUTER_TAG_NOTHING\t0x0\n+#define ICE_AQ_VSI_OUTER_TAG_REMOVE\t0x1\n+#define ICE_AQ_VSI_OUTER_TAG_COPY\t0x2\n+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S\t2\n+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M\t(0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)\n+#define ICE_AQ_VSI_OUTER_TAG_NONE\t0x0\n+#define ICE_AQ_VSI_OUTER_TAG_STAG\t0x1\n+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100\t0x2\n+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100\t0x3\n+#define ICE_AQ_VSI_OUTER_TAG_INSERT\tBIT(4)\n+#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)\n+\tu8 outer_tag_reserved;\n+\t/* queue mapping section */\n+\t__le16 mapping_flags;\n+#define ICE_AQ_VSI_Q_MAP_CONTIG\t0x0\n+#define ICE_AQ_VSI_Q_MAP_NONCONTIG\tBIT(0)\n+\t__le16 q_mapping[16];\n+#define ICE_AQ_VSI_Q_S\t\t0\n+#define ICE_AQ_VSI_Q_M\t\t(0x7FF << ICE_AQ_VSI_Q_S)\n+\t__le16 tc_mapping[8];\n+#define ICE_AQ_VSI_TC_Q_OFFSET_S\t0\n+#define ICE_AQ_VSI_TC_Q_OFFSET_M\t(0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)\n+#define ICE_AQ_VSI_TC_Q_NUM_S\t\t11\n+#define ICE_AQ_VSI_TC_Q_NUM_M\t\t(0xF << ICE_AQ_VSI_TC_Q_NUM_S)\n+\t/* queueing option section */\n+\tu8 q_opt_rss;\n+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S\t0\n+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M\t(0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)\n+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI\t0x0\n+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF\t0x2\n+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL\t0x3\n+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S\t2\n+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M\t(0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)\n+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S\t6\n+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M\t(0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)\n+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ\t(0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)\n+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ\t(0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)\n+#define ICE_AQ_VSI_Q_OPT_RSS_XOR\t(0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)\n+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH\t(0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)\n+\tu8 q_opt_tc;\n+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S\t0\n+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M\t(0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)\n+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR\tBIT(7)\n+\tu8 q_opt_flags;\n+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN\tBIT(0)\n+\tu8 q_opt_reserved[3];\n+\t/* outer up section */\n+\t__le32 outer_up_table; /* same structure and defines as ingress tbl */\n+\t/* section 10 */\n+\t__le16 sect_10_reserved;\n+\t/* flow director section */\n+\t__le16 fd_options;\n+#define ICE_AQ_VSI_FD_ENABLE\t\tBIT(0)\n+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE\tBIT(1)\n+#define ICE_AQ_VSI_FD_PROG_ENABLE\tBIT(3)\n+\t__le16 max_fd_fltr_dedicated;\n+\t__le16 max_fd_fltr_shared;\n+\t__le16 fd_def_q;\n+#define ICE_AQ_VSI_FD_DEF_Q_S\t\t0\n+#define ICE_AQ_VSI_FD_DEF_Q_M\t\t(0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)\n+#define ICE_AQ_VSI_FD_DEF_GRP_S\t12\n+#define ICE_AQ_VSI_FD_DEF_GRP_M\t(0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)\n+\t__le16 fd_report_opt;\n+#define ICE_AQ_VSI_FD_REPORT_Q_S\t0\n+#define ICE_AQ_VSI_FD_REPORT_Q_M\t(0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)\n+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S\t12\n+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M\t(0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)\n+#define ICE_AQ_VSI_FD_DEF_DROP\t\tBIT(15)\n+\t/* PASID section */\n+\t__le32 pasid_id;\n+#define ICE_AQ_VSI_PASID_ID_S\t\t0\n+#define ICE_AQ_VSI_PASID_ID_M\t\t(0xFFFFF << ICE_AQ_VSI_PASID_ID_S)\n+#define ICE_AQ_VSI_PASID_ID_VALID\tBIT(31)\n+\tu8 reserved[24];\n+};\n+\n /* Get Default Topology (indirect 0x0400) */\n struct ice_aqc_get_topo {\n \tu8 port_num;\n@@ -590,6 +784,7 @@ struct ice_aq_desc {\n \t\tstruct ice_aqc_query_txsched_res query_sched_res;\n \t\tstruct ice_aqc_add_move_delete_elem add_move_delete_elem;\n \t\tstruct ice_aqc_nvm nvm;\n+\t\tstruct ice_aqc_add_get_update_free_vsi vsi_cmd;\n \t\tstruct ice_aqc_get_link_status get_link_status;\n \t} params;\n };\n@@ -640,6 +835,10 @@ enum ice_adminq_opc {\n \t/* internal switch commands */\n \tice_aqc_opc_get_sw_cfg\t\t\t\t= 0x0200,\n \n+\t/* VSI commands */\n+\tice_aqc_opc_add_vsi\t\t\t\t= 0x0210,\n+\tice_aqc_opc_update_vsi\t\t\t\t= 0x0211,\n+\tice_aqc_opc_free_vsi\t\t\t\t= 0x0213,\n \tice_aqc_opc_clear_pf_cfg\t\t\t= 0x02A4,\n \n \t/* transmit scheduler commands */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c\nindex 197c64ea79e8..eebc25cab244 100644\n--- a/drivers/net/ethernet/intel/ice/ice_main.c\n+++ b/drivers/net/ethernet/intel/ice/ice_main.c\n@@ -42,6 +42,37 @@ MODULE_PARM_DESC(debug, \"netif level (0=none,...,16=all)\");\n \n static struct workqueue_struct *ice_wq;\n \n+static int ice_vsi_release(struct ice_vsi *vsi);\n+\n+/**\n+ * ice_get_free_slot - get the next non-NULL location index in array\n+ * @array: array to search\n+ * @size: size of the array\n+ * @curr: last known occupied index to be used as a search hint\n+ *\n+ * void * is being used to keep the functionality generic. This lets us use this\n+ * function on any array of pointers.\n+ */\n+static int ice_get_free_slot(void *array, int size, int curr)\n+{\n+\tint **tmp_array = (int **)array;\n+\tint next;\n+\n+\tif (curr < (size - 1) && !tmp_array[curr + 1]) {\n+\t\tnext = curr + 1;\n+\t} else {\n+\t\tint i = 0;\n+\n+\t\twhile ((i < size) && (tmp_array[i]))\n+\t\t\ti++;\n+\t\tif (i == size)\n+\t\t\tnext = ICE_NO_VSI;\n+\t\telse\n+\t\t\tnext = i;\n+\t}\n+\treturn next;\n+}\n+\n /**\n  * ice_search_res - Search the tracker for a block of resources\n  * @res: pointer to the resource\n@@ -340,6 +371,270 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)\n \thw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;\n }\n \n+/**\n+ * ice_vsi_delete - delete a VSI from the switch\n+ * @vsi: pointer to VSI being removed\n+ */\n+static void ice_vsi_delete(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_vsi_ctx ctxt;\n+\tenum ice_status status;\n+\n+\tctxt.vsi_num = vsi->vsi_num;\n+\n+\tmemcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));\n+\n+\tstatus = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);\n+\tif (status)\n+\t\tdev_err(&pf->pdev->dev, \"Failed to delete VSI %i in FW\\n\",\n+\t\t\tvsi->vsi_num);\n+}\n+\n+/**\n+ * ice_vsi_setup_q_map - Setup a VSI queue map\n+ * @vsi: the VSI being configured\n+ * @ctxt: VSI context structure\n+ */\n+static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)\n+{\n+\tu16 offset = 0, qmap = 0, pow = 0, qcount;\n+\tu16 qcount_tx = vsi->alloc_txq;\n+\tu16 qcount_rx = vsi->alloc_rxq;\n+\tbool ena_tc0 = false;\n+\tint i;\n+\n+\t/* at least TC0 should be enabled by default */\n+\tif (vsi->tc_cfg.numtc) {\n+\t\tif (!(vsi->tc_cfg.ena_tc & BIT(0)))\n+\t\t\tena_tc0 =  true;\n+\t} else {\n+\t\tena_tc0 =  true;\n+\t}\n+\n+\tif (ena_tc0) {\n+\t\tvsi->tc_cfg.numtc++;\n+\t\tvsi->tc_cfg.ena_tc |= 1;\n+\t}\n+\n+\tqcount = qcount_rx / vsi->tc_cfg.numtc;\n+\n+\t/* find higher power-of-2 of qcount */\n+\tpow = ilog2(qcount);\n+\n+\tif (!is_power_of_2(qcount))\n+\t\tpow++;\n+\n+\t/* TC mapping is a function of the number of Rx queues assigned to the\n+\t * VSI for each traffic class and the offset of these queues.\n+\t * The first 10 bits are for queue offset for TC0, next 4 bits for no:of\n+\t * queues allocated to TC0. No:of queues is a power-of-2.\n+\t *\n+\t * If TC is not enabled, the queue offset is set to 0, and allocate one\n+\t * queue, this way, traffic for the given TC will be sent to the default\n+\t * queue.\n+\t *\n+\t * Setup number and offset of Rx queues for all TCs for the VSI\n+\t */\n+\tfor (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {\n+\t\tif (!(vsi->tc_cfg.ena_tc & BIT(i))) {\n+\t\t\t/* TC is not enabled */\n+\t\t\tvsi->tc_cfg.tc_info[i].qoffset = 0;\n+\t\t\tvsi->tc_cfg.tc_info[i].qcount = 1;\n+\t\t\tctxt->info.tc_mapping[i] = 0;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* TC is enabled */\n+\t\tvsi->tc_cfg.tc_info[i].qoffset = offset;\n+\t\tvsi->tc_cfg.tc_info[i].qcount = qcount;\n+\n+\t\tqmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &\n+\t\t\tICE_AQ_VSI_TC_Q_OFFSET_M) |\n+\t\t\t((pow << ICE_AQ_VSI_TC_Q_NUM_S) &\n+\t\t\t ICE_AQ_VSI_TC_Q_NUM_M);\n+\t\toffset += qcount;\n+\t\tctxt->info.tc_mapping[i] = cpu_to_le16(qmap);\n+\t}\n+\n+\tvsi->num_txq = qcount_tx;\n+\tvsi->num_rxq = offset;\n+\n+\t/* Rx queue mapping */\n+\tctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);\n+\t/* q_mapping buffer holds the info for the first queue allocated for\n+\t * this VSI in the PF space and also the number of queues associated\n+\t * with this VSI.\n+\t */\n+\tctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);\n+\tctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);\n+}\n+\n+/**\n+ * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI\n+ * @ctxt: the VSI context being set\n+ *\n+ * This initializes a default VSI context for all sections except the Queues.\n+ */\n+static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)\n+{\n+\tu32 table = 0;\n+\n+\tmemset(&ctxt->info, 0, sizeof(ctxt->info));\n+\t/* VSI's should be allocated from shared pool */\n+\tctxt->alloc_from_pool = true;\n+\t/* Src pruning enabled by default */\n+\tctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;\n+\t/* Traffic from VSI can be sent to LAN */\n+\tctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;\n+\t/* Allow all packets untagged/tagged */\n+\tctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &\n+\t\t\t\t       ICE_AQ_VSI_PVLAN_MODE_M) >>\n+\t\t\t\t      ICE_AQ_VSI_PVLAN_MODE_S);\n+\t/* Show VLAN/UP from packets in Rx descriptors */\n+\tctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &\n+\t\t\t\t\tICE_AQ_VSI_PVLAN_EMOD_M) >>\n+\t\t\t\t       ICE_AQ_VSI_PVLAN_EMOD_S);\n+\t/* Have 1:1 UP mapping for both ingress/egress tables */\n+\ttable |= ICE_UP_TABLE_TRANSLATE(0, 0);\n+\ttable |= ICE_UP_TABLE_TRANSLATE(1, 1);\n+\ttable |= ICE_UP_TABLE_TRANSLATE(2, 2);\n+\ttable |= ICE_UP_TABLE_TRANSLATE(3, 3);\n+\ttable |= ICE_UP_TABLE_TRANSLATE(4, 4);\n+\ttable |= ICE_UP_TABLE_TRANSLATE(5, 5);\n+\ttable |= ICE_UP_TABLE_TRANSLATE(6, 6);\n+\ttable |= ICE_UP_TABLE_TRANSLATE(7, 7);\n+\tctxt->info.ingress_table = cpu_to_le32(table);\n+\tctxt->info.egress_table = cpu_to_le32(table);\n+\t/* Have 1:1 UP mapping for outer to inner UP table */\n+\tctxt->info.outer_up_table = cpu_to_le32(table);\n+\t/* No Outer tag support outer_tag_flags remains to zero */\n+}\n+\n+/**\n+ * ice_vsi_add - Create a new VSI or fetch preallocated VSI\n+ * @vsi: the VSI being configured\n+ *\n+ * This initializes a VSI context depending on the VSI type to be added and\n+ * passes it down to the add_vsi aq command to create a new VSI.\n+ */\n+static int ice_vsi_add(struct ice_vsi *vsi)\n+{\n+\tstruct ice_vsi_ctx ctxt = { 0 };\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_hw *hw = &pf->hw;\n+\tint ret = 0;\n+\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\tctxt.flags = ICE_AQ_VSI_TYPE_PF;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tice_set_dflt_vsi_ctx(&ctxt);\n+\t/* if the switch is in VEB mode, allow VSI loopback */\n+\tif (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)\n+\t\tctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;\n+\n+\tctxt.info.sw_id = vsi->port_info->sw_id;\n+\tice_vsi_setup_q_map(vsi, &ctxt);\n+\n+\tret = ice_aq_add_vsi(hw, &ctxt, NULL);\n+\tif (ret) {\n+\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\"Add VSI AQ call failed, err %d\\n\", ret);\n+\t\treturn -EIO;\n+\t}\n+\tvsi->info = ctxt.info;\n+\tvsi->vsi_num = ctxt.vsi_num;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI\n+ * @vsi: the VSI having rings deallocated\n+ */\n+static void ice_vsi_clear_rings(struct ice_vsi *vsi)\n+{\n+\tint i;\n+\n+\tif (vsi->tx_rings) {\n+\t\tfor (i = 0; i < vsi->alloc_txq; i++) {\n+\t\t\tif (vsi->tx_rings[i]) {\n+\t\t\t\tkfree_rcu(vsi->tx_rings[i], rcu);\n+\t\t\t\tvsi->tx_rings[i] = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+\tif (vsi->rx_rings) {\n+\t\tfor (i = 0; i < vsi->alloc_rxq; i++) {\n+\t\t\tif (vsi->rx_rings[i]) {\n+\t\t\t\tkfree_rcu(vsi->rx_rings[i], rcu);\n+\t\t\t\tvsi->rx_rings[i] = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI\n+ * @vsi: VSI which is having rings allocated\n+ */\n+static int ice_vsi_alloc_rings(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint i;\n+\n+\t/* Allocate tx_rings */\n+\tfor (i = 0; i < vsi->alloc_txq; i++) {\n+\t\tstruct ice_ring *ring;\n+\n+\t\t/* allocate with kzalloc(), free with kfree_rcu() */\n+\t\tring = kzalloc(sizeof(*ring), GFP_KERNEL);\n+\n+\t\tif (!ring)\n+\t\t\tgoto err_out;\n+\n+\t\tring->q_index = i;\n+\t\tring->reg_idx = vsi->txq_map[i];\n+\t\tring->ring_active = false;\n+\t\tring->vsi = vsi;\n+\t\tring->netdev = vsi->netdev;\n+\t\tring->dev = &pf->pdev->dev;\n+\t\tring->count = vsi->num_desc;\n+\n+\t\tvsi->tx_rings[i] = ring;\n+\t}\n+\n+\t/* Allocate rx_rings */\n+\tfor (i = 0; i < vsi->alloc_rxq; i++) {\n+\t\tstruct ice_ring *ring;\n+\n+\t\t/* allocate with kzalloc(), free with kfree_rcu() */\n+\t\tring = kzalloc(sizeof(*ring), GFP_KERNEL);\n+\t\tif (!ring)\n+\t\t\tgoto err_out;\n+\n+\t\tring->q_index = i;\n+\t\tring->reg_idx = vsi->rxq_map[i];\n+\t\tring->ring_active = false;\n+\t\tring->vsi = vsi;\n+\t\tring->netdev = vsi->netdev;\n+\t\tring->dev = &pf->pdev->dev;\n+\t\tring->count = vsi->num_desc;\n+\t\tvsi->rx_rings[i] = ring;\n+\t}\n+\n+\treturn 0;\n+\n+err_out:\n+\tice_vsi_clear_rings(vsi);\n+\treturn -ENOMEM;\n+}\n+\n /**\n  * ice_ena_misc_vector - enable the non-queue interrupts\n  * @pf: board private structure\n@@ -426,6 +721,189 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)\n \treturn ret;\n }\n \n+/**\n+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors\n+ * @vsi: the VSI being configured\n+ *\n+ * This function maps descriptor rings to the queue-specific vectors allotted\n+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx\n+ * and Rx rings to the vector as \"efficiently\" as possible.\n+ */\n+static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)\n+{\n+\tint q_vectors = vsi->num_q_vectors;\n+\tint tx_rings_rem, rx_rings_rem;\n+\tint v_id;\n+\n+\t/* initially assigning remaining rings count to VSIs num queue value */\n+\ttx_rings_rem = vsi->num_txq;\n+\trx_rings_rem = vsi->num_rxq;\n+\n+\tfor (v_id = 0; v_id < q_vectors; v_id++) {\n+\t\tstruct ice_q_vector *q_vector = vsi->q_vectors[v_id];\n+\t\tint tx_rings_per_v, rx_rings_per_v, q_id, q_base;\n+\n+\t\t/* Tx rings mapping to vector */\n+\t\ttx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);\n+\t\tq_vector->num_ring_tx = tx_rings_per_v;\n+\t\tq_vector->tx.ring = NULL;\n+\t\tq_base = vsi->num_txq - tx_rings_rem;\n+\n+\t\tfor (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {\n+\t\t\tstruct ice_ring *tx_ring = vsi->tx_rings[q_id];\n+\n+\t\t\ttx_ring->q_vector = q_vector;\n+\t\t\ttx_ring->next = q_vector->tx.ring;\n+\t\t\tq_vector->tx.ring = tx_ring;\n+\t\t}\n+\t\ttx_rings_rem -= tx_rings_per_v;\n+\n+\t\t/* Rx rings mapping to vector */\n+\t\trx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);\n+\t\tq_vector->num_ring_rx = rx_rings_per_v;\n+\t\tq_vector->rx.ring = NULL;\n+\t\tq_base = vsi->num_rxq - rx_rings_rem;\n+\n+\t\tfor (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {\n+\t\t\tstruct ice_ring *rx_ring = vsi->rx_rings[q_id];\n+\n+\t\t\trx_ring->q_vector = q_vector;\n+\t\t\trx_ring->next = q_vector->rx.ring;\n+\t\t\tq_vector->rx.ring = rx_ring;\n+\t\t}\n+\t\trx_rings_rem -= rx_rings_per_v;\n+\t}\n+}\n+\n+/**\n+ * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI\n+ * @vsi: the VSI being configured\n+ *\n+ * Return 0 on success and a negative value on error\n+ */\n+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\tvsi->alloc_txq = pf->num_lan_tx;\n+\t\tvsi->alloc_rxq = pf->num_lan_rx;\n+\t\tvsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);\n+\t\tvsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_warn(&vsi->back->pdev->dev, \"Unknown VSI type %d\\n\",\n+\t\t\t vsi->type);\n+\t\tbreak;\n+\t}\n+}\n+\n+/**\n+ * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi\n+ * @vsi: VSI pointer\n+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.\n+ *\n+ * On error: returns error code (negative)\n+ * On success: returns 0\n+ */\n+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\n+\t/* allocate memory for both Tx and Rx ring pointers */\n+\tvsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,\n+\t\t\t\t     sizeof(struct ice_ring *), GFP_KERNEL);\n+\tif (!vsi->tx_rings)\n+\t\tgoto err_txrings;\n+\n+\tvsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,\n+\t\t\t\t     sizeof(struct ice_ring *), GFP_KERNEL);\n+\tif (!vsi->rx_rings)\n+\t\tgoto err_rxrings;\n+\n+\tif (alloc_qvectors) {\n+\t\t/* allocate memory for q_vector pointers */\n+\t\tvsi->q_vectors = devm_kcalloc(&pf->pdev->dev,\n+\t\t\t\t\t      vsi->num_q_vectors,\n+\t\t\t\t\t      sizeof(struct ice_q_vector *),\n+\t\t\t\t\t      GFP_KERNEL);\n+\t\tif (!vsi->q_vectors)\n+\t\t\tgoto err_vectors;\n+\t}\n+\n+\treturn 0;\n+\n+err_vectors:\n+\tdevm_kfree(&pf->pdev->dev, vsi->rx_rings);\n+err_rxrings:\n+\tdevm_kfree(&pf->pdev->dev, vsi->tx_rings);\n+err_txrings:\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ * ice_vsi_alloc - Allocates the next available struct vsi in the PF\n+ * @pf: board private structure\n+ * @type: type of VSI\n+ *\n+ * returns a pointer to a VSI on success, NULL on failure.\n+ */\n+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)\n+{\n+\tstruct ice_vsi *vsi = NULL;\n+\n+\t/* Need to protect the allocation of the VSIs at the PF level */\n+\tmutex_lock(&pf->sw_mutex);\n+\n+\t/* If we have already allocated our maximum number of VSIs,\n+\t * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index\n+\t * is available to be populated\n+\t */\n+\tif (pf->next_vsi == ICE_NO_VSI) {\n+\t\tdev_dbg(&pf->pdev->dev, \"out of VSI slots!\\n\");\n+\t\tgoto unlock_pf;\n+\t}\n+\n+\tvsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);\n+\tif (!vsi)\n+\t\tgoto unlock_pf;\n+\n+\tvsi->type = type;\n+\tvsi->back = pf;\n+\tset_bit(__ICE_DOWN, vsi->state);\n+\tvsi->idx = pf->next_vsi;\n+\tvsi->work_lmt = ICE_DFLT_IRQ_WORK;\n+\n+\tice_vsi_set_num_qs(vsi);\n+\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\tif (ice_vsi_alloc_arrays(vsi, true))\n+\t\t\tgoto err_rings;\n+\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_warn(&pf->pdev->dev, \"Unknown VSI type %d\\n\", vsi->type);\n+\t\tgoto unlock_pf;\n+\t}\n+\n+\t/* fill VSI slot in the PF struct */\n+\tpf->vsi[pf->next_vsi] = vsi;\n+\n+\t/* prepare pf->next_vsi for next use */\n+\tpf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,\n+\t\t\t\t\t pf->next_vsi);\n+\tgoto unlock_pf;\n+\n+err_rings:\n+\tdevm_kfree(&pf->pdev->dev, vsi);\n+\tvsi = NULL;\n+unlock_pf:\n+\tmutex_unlock(&pf->sw_mutex);\n+\treturn vsi;\n+}\n+\n /**\n  * ice_free_irq_msix_misc - Unroll misc vector setup\n  * @pf: board private structure\n@@ -507,6 +985,581 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)\n \treturn 0;\n }\n \n+/**\n+ * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI\n+ * @vsi: the VSI getting queues\n+ *\n+ * Return 0 on success and a negative value on error\n+ */\n+static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint offset, ret = 0;\n+\n+\tmutex_lock(&pf->avail_q_mutex);\n+\t/* look for contiguous block of queues for tx */\n+\toffset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,\n+\t\t\t\t\t    0, vsi->alloc_txq, 0);\n+\tif (offset < ICE_MAX_TXQS) {\n+\t\tint i;\n+\n+\t\tbitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);\n+\t\tfor (i = 0; i < vsi->alloc_txq; i++)\n+\t\t\tvsi->txq_map[i] = i + offset;\n+\t} else {\n+\t\tret = -ENOMEM;\n+\t\tvsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;\n+\t}\n+\n+\t/* look for contiguous block of queues for rx */\n+\toffset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,\n+\t\t\t\t\t    0, vsi->alloc_rxq, 0);\n+\tif (offset < ICE_MAX_RXQS) {\n+\t\tint i;\n+\n+\t\tbitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);\n+\t\tfor (i = 0; i < vsi->alloc_rxq; i++)\n+\t\t\tvsi->rxq_map[i] = i + offset;\n+\t} else {\n+\t\tret = -ENOMEM;\n+\t\tvsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;\n+\t}\n+\tmutex_unlock(&pf->avail_q_mutex);\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI\n+ * @vsi: the VSI getting queues\n+ *\n+ * Return 0 on success and a negative value on error\n+ */\n+static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint i, index = 0;\n+\n+\tmutex_lock(&pf->avail_q_mutex);\n+\n+\tif (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {\n+\t\tfor (i = 0; i < vsi->alloc_txq; i++) {\n+\t\t\tindex = find_next_zero_bit(pf->avail_txqs,\n+\t\t\t\t\t\t   ICE_MAX_TXQS, index);\n+\t\t\tif (index < ICE_MAX_TXQS) {\n+\t\t\t\tset_bit(index, pf->avail_txqs);\n+\t\t\t\tvsi->txq_map[i] = index;\n+\t\t\t} else {\n+\t\t\t\tgoto err_scatter_tx;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {\n+\t\tfor (i = 0; i < vsi->alloc_rxq; i++) {\n+\t\t\tindex = find_next_zero_bit(pf->avail_rxqs,\n+\t\t\t\t\t\t   ICE_MAX_RXQS, index);\n+\t\t\tif (index < ICE_MAX_RXQS) {\n+\t\t\t\tset_bit(index, pf->avail_rxqs);\n+\t\t\t\tvsi->rxq_map[i] = index;\n+\t\t\t} else {\n+\t\t\t\tgoto err_scatter_rx;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tmutex_unlock(&pf->avail_q_mutex);\n+\treturn 0;\n+\n+err_scatter_rx:\n+\t/* unflag any queues we have grabbed (i is failed position) */\n+\tfor (index = 0; index < i; index++) {\n+\t\tclear_bit(vsi->rxq_map[index], pf->avail_rxqs);\n+\t\tvsi->rxq_map[index] = 0;\n+\t}\n+\ti = vsi->alloc_txq;\n+err_scatter_tx:\n+\t/* i is either position of failed attempt or vsi->alloc_txq */\n+\tfor (index = 0; index < i; index++) {\n+\t\tclear_bit(vsi->txq_map[index], pf->avail_txqs);\n+\t\tvsi->txq_map[index] = 0;\n+\t}\n+\n+\tmutex_unlock(&pf->avail_q_mutex);\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ * ice_vsi_get_qs - Assign queues from PF to VSI\n+ * @vsi: the VSI to assign queues to\n+ *\n+ * Returns 0 on success and a negative value on error\n+ */\n+static int ice_vsi_get_qs(struct ice_vsi *vsi)\n+{\n+\tint ret = 0;\n+\n+\tvsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;\n+\tvsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;\n+\n+\t/* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping\n+\t * modes individually to scatter if assigning contiguous queues\n+\t * to rx or tx fails\n+\t */\n+\tret = ice_vsi_get_qs_contig(vsi);\n+\tif (ret < 0) {\n+\t\tif (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)\n+\t\t\tvsi->alloc_txq = max_t(u16, vsi->alloc_txq,\n+\t\t\t\t\t       ICE_MAX_SCATTER_TXQS);\n+\t\tif (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)\n+\t\t\tvsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,\n+\t\t\t\t\t       ICE_MAX_SCATTER_RXQS);\n+\t\tret = ice_vsi_get_qs_scatter(vsi);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ice_vsi_put_qs - Release queues from VSI to PF\n+ * @vsi: the VSI thats going to release queues\n+ */\n+static void ice_vsi_put_qs(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint i;\n+\n+\tmutex_lock(&pf->avail_q_mutex);\n+\n+\tfor (i = 0; i < vsi->alloc_txq; i++) {\n+\t\tclear_bit(vsi->txq_map[i], pf->avail_txqs);\n+\t\tvsi->txq_map[i] = ICE_INVAL_Q_INDEX;\n+\t}\n+\n+\tfor (i = 0; i < vsi->alloc_rxq; i++) {\n+\t\tclear_bit(vsi->rxq_map[i], pf->avail_rxqs);\n+\t\tvsi->rxq_map[i] = ICE_INVAL_Q_INDEX;\n+\t}\n+\n+\tmutex_unlock(&pf->avail_q_mutex);\n+}\n+\n+/**\n+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector\n+ * @vsi: VSI having the memory freed\n+ * @v_idx: index of the vector to be freed\n+ */\n+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)\n+{\n+\tstruct ice_q_vector *q_vector;\n+\tstruct ice_ring *ring;\n+\n+\tif (!vsi->q_vectors[v_idx]) {\n+\t\tdev_dbg(&vsi->back->pdev->dev, \"Queue vector at index %d not found\\n\",\n+\t\t\tv_idx);\n+\t\treturn;\n+\t}\n+\tq_vector = vsi->q_vectors[v_idx];\n+\n+\tice_for_each_ring(ring, q_vector->tx)\n+\t\tring->q_vector = NULL;\n+\tice_for_each_ring(ring, q_vector->rx)\n+\t\tring->q_vector = NULL;\n+\n+\t/* only VSI with an associated netdev is set up with NAPI */\n+\tif (vsi->netdev)\n+\t\tnetif_napi_del(&q_vector->napi);\n+\n+\tdevm_kfree(&vsi->back->pdev->dev, q_vector);\n+\tvsi->q_vectors[v_idx] = NULL;\n+}\n+\n+/**\n+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors\n+ * @vsi: the VSI having memory freed\n+ */\n+static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)\n+{\n+\tint v_idx;\n+\n+\tfor (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)\n+\t\tice_free_q_vector(vsi, v_idx);\n+}\n+\n+/**\n+ * ice_cfg_netdev - Setup the netdev flags\n+ * @vsi: the VSI being configured\n+ *\n+ * Returns 0 on success, negative value on failure\n+ */\n+static int ice_cfg_netdev(struct ice_vsi *vsi)\n+{\n+\tstruct ice_netdev_priv *np;\n+\tstruct net_device *netdev;\n+\tu8 mac_addr[ETH_ALEN];\n+\n+\tnetdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),\n+\t\t\t\t    vsi->alloc_txq, vsi->alloc_rxq);\n+\tif (!netdev)\n+\t\treturn -ENOMEM;\n+\n+\tvsi->netdev = netdev;\n+\tnp = netdev_priv(netdev);\n+\tnp->vsi = vsi;\n+\n+\t/* set features that user can change */\n+\tnetdev->hw_features = NETIF_F_SG\t|\n+\t\t\t      NETIF_F_HIGHDMA\t|\n+\t\t\t      NETIF_F_RXHASH;\n+\n+\t/* enable features */\n+\tnetdev->features |= netdev->hw_features;\n+\n+\tif (vsi->type == ICE_VSI_PF) {\n+\t\tSET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);\n+\t\tether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);\n+\n+\t\tether_addr_copy(netdev->dev_addr, mac_addr);\n+\t\tether_addr_copy(netdev->perm_addr, mac_addr);\n+\t}\n+\n+\tnetdev->priv_flags |= IFF_UNICAST_FLT;\n+\n+\t/* setup watchdog timeout value to be 5 second */\n+\tnetdev->watchdog_timeo = 5 * HZ;\n+\n+\tnetdev->min_mtu = ETH_MIN_MTU;\n+\tnetdev->max_mtu = ICE_MAX_MTU;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_free_arrays - clean up vsi resources\n+ * @vsi: pointer to VSI being cleared\n+ * @free_qvectors: bool to specify if q_vectors should be deallocated\n+ */\n+static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\n+\t/* free the ring and vector containers */\n+\tif (free_qvectors && vsi->q_vectors) {\n+\t\tdevm_kfree(&pf->pdev->dev, vsi->q_vectors);\n+\t\tvsi->q_vectors = NULL;\n+\t}\n+\tif (vsi->tx_rings) {\n+\t\tdevm_kfree(&pf->pdev->dev, vsi->tx_rings);\n+\t\tvsi->tx_rings = NULL;\n+\t}\n+\tif (vsi->rx_rings) {\n+\t\tdevm_kfree(&pf->pdev->dev, vsi->rx_rings);\n+\t\tvsi->rx_rings = NULL;\n+\t}\n+}\n+\n+/**\n+ * ice_vsi_clear - clean up and deallocate the provided vsi\n+ * @vsi: pointer to VSI being cleared\n+ *\n+ * This deallocates the vsi's queue resources, removes it from the PF's\n+ * VSI array if necessary, and deallocates the VSI\n+ *\n+ * Returns 0 on success, negative on failure\n+ */\n+static int ice_vsi_clear(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = NULL;\n+\n+\tif (!vsi)\n+\t\treturn 0;\n+\n+\tif (!vsi->back)\n+\t\treturn -EINVAL;\n+\n+\tpf = vsi->back;\n+\n+\tif (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {\n+\t\tdev_dbg(&pf->pdev->dev, \"vsi does not exist at pf->vsi[%d]\\n\",\n+\t\t\tvsi->idx);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmutex_lock(&pf->sw_mutex);\n+\t/* updates the PF for this cleared vsi */\n+\n+\tpf->vsi[vsi->idx] = NULL;\n+\tif (vsi->idx < pf->next_vsi)\n+\t\tpf->next_vsi = vsi->idx;\n+\n+\tice_vsi_free_arrays(vsi, true);\n+\tmutex_unlock(&pf->sw_mutex);\n+\tdevm_kfree(&pf->pdev->dev, vsi);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector\n+ * @vsi: the VSI being configured\n+ * @v_idx: index of the vector in the vsi struct\n+ *\n+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.\n+ */\n+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tstruct ice_q_vector *q_vector;\n+\n+\t/* allocate q_vector */\n+\tq_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);\n+\tif (!q_vector)\n+\t\treturn -ENOMEM;\n+\n+\tq_vector->vsi = vsi;\n+\tq_vector->v_idx = v_idx;\n+\t/* only set affinity_mask if the CPU is online */\n+\tif (cpu_online(v_idx))\n+\t\tcpumask_set_cpu(v_idx, &q_vector->affinity_mask);\n+\n+\t/* tie q_vector and vsi together */\n+\tvsi->q_vectors[v_idx] = q_vector;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors\n+ * @vsi: the VSI being configured\n+ *\n+ * We allocate one q_vector per queue interrupt.  If allocation fails we\n+ * return -ENOMEM.\n+ */\n+static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint v_idx = 0, num_q_vectors;\n+\tint err;\n+\n+\tif (vsi->q_vectors[0]) {\n+\t\tdev_dbg(&pf->pdev->dev, \"VSI %d has existing q_vectors\\n\",\n+\t\t\tvsi->vsi_num);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tif (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {\n+\t\tnum_q_vectors = vsi->num_q_vectors;\n+\t} else {\n+\t\terr = -EINVAL;\n+\t\tgoto err_out;\n+\t}\n+\n+\tfor (v_idx = 0; v_idx < num_q_vectors; v_idx++) {\n+\t\terr = ice_vsi_alloc_q_vector(vsi, v_idx);\n+\t\tif (err)\n+\t\t\tgoto err_out;\n+\t}\n+\n+\treturn 0;\n+\n+err_out:\n+\twhile (v_idx--)\n+\t\tice_free_q_vector(vsi, v_idx);\n+\n+\tdev_err(&pf->pdev->dev,\n+\t\t\"Failed to allocate %d q_vector for VSI %d, ret=%d\\n\",\n+\t\tvsi->num_q_vectors, vsi->vsi_num, err);\n+\tvsi->num_q_vectors = 0;\n+\treturn err;\n+}\n+\n+/**\n+ * ice_vsi_setup_vector_base - Set up the base vector for the given VSI\n+ * @vsi: ptr to the VSI\n+ *\n+ * This should only be called after ice_vsi_alloc() which allocates the\n+ * corresponding SW VSI structure and initializes num_queue_pairs for the\n+ * newly allocated VSI.\n+ *\n+ * Returns 0 on success or negative on failure\n+ */\n+static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf = vsi->back;\n+\tint num_q_vectors = 0;\n+\n+\tif (vsi->base_vector) {\n+\t\tdev_dbg(&pf->pdev->dev, \"VSI %d has non-zero base vector %d\\n\",\n+\t\t\tvsi->vsi_num, vsi->base_vector);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tif (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))\n+\t\treturn -ENOENT;\n+\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\tnum_q_vectors = vsi->num_q_vectors;\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_warn(&vsi->back->pdev->dev, \"Unknown VSI type %d\\n\",\n+\t\t\t vsi->type);\n+\t\tbreak;\n+\t}\n+\n+\tif (num_q_vectors)\n+\t\tvsi->base_vector = ice_get_res(pf, pf->irq_tracker,\n+\t\t\t\t\t       num_q_vectors, vsi->idx);\n+\n+\tif (vsi->base_vector < 0) {\n+\t\tdev_err(&pf->pdev->dev,\n+\t\t\t\"Failed to get tracking for %d vectors for VSI %d, err=%d\\n\",\n+\t\t\tnum_q_vectors, vsi->vsi_num, vsi->base_vector);\n+\t\treturn -ENOENT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * ice_vsi_setup - Set up a VSI by a given type\n+ * @pf: board private structure\n+ * @type: VSI type\n+ * @pi: pointer to the port_info instance\n+ *\n+ * This allocates the sw VSI structure and its queue resources.\n+ *\n+ * Returns pointer to the successfully allocated and configure VSI sw struct on\n+ * success, otherwise returns NULL on failure.\n+ */\n+static struct ice_vsi *\n+ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,\n+\t      struct ice_port_info *pi)\n+{\n+\tstruct device *dev = &pf->pdev->dev;\n+\tstruct ice_vsi_ctx ctxt = { 0 };\n+\tstruct ice_vsi *vsi;\n+\tint ret;\n+\n+\tvsi = ice_vsi_alloc(pf, type);\n+\tif (!vsi) {\n+\t\tdev_err(dev, \"could not allocate VSI\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tvsi->port_info = pi;\n+\tvsi->vsw = pf->first_sw;\n+\n+\tif (ice_vsi_get_qs(vsi)) {\n+\t\tdev_err(dev, \"Failed to allocate queues. vsi->idx = %d\\n\",\n+\t\t\tvsi->idx);\n+\t\tgoto err_get_qs;\n+\t}\n+\n+\t/* create the VSI */\n+\tret = ice_vsi_add(vsi);\n+\tif (ret)\n+\t\tgoto err_vsi;\n+\n+\tctxt.vsi_num = vsi->vsi_num;\n+\n+\tswitch (vsi->type) {\n+\tcase ICE_VSI_PF:\n+\t\tret = ice_cfg_netdev(vsi);\n+\t\tif (ret)\n+\t\t\tgoto err_cfg_netdev;\n+\n+\t\tret = register_netdev(vsi->netdev);\n+\t\tif (ret)\n+\t\t\tgoto err_register_netdev;\n+\n+\t\tnetif_carrier_off(vsi->netdev);\n+\n+\t\t/* make sure transmit queues start off as stopped */\n+\t\tnetif_tx_stop_all_queues(vsi->netdev);\n+\t\tret = ice_vsi_alloc_q_vectors(vsi);\n+\t\tif (ret)\n+\t\t\tgoto err_msix;\n+\n+\t\tret = ice_vsi_setup_vector_base(vsi);\n+\t\tif (ret)\n+\t\t\tgoto err_rings;\n+\n+\t\tret = ice_vsi_alloc_rings(vsi);\n+\t\tif (ret)\n+\t\t\tgoto err_rings;\n+\n+\t\tice_vsi_map_rings_to_vectors(vsi);\n+\n+\t\tbreak;\n+\tdefault:\n+\t\t/* if vsi type is not recognized, clean up the resources and\n+\t\t * exit\n+\t\t */\n+\t\tgoto err_rings;\n+\t}\n+\treturn vsi;\n+\n+err_rings:\n+\tice_vsi_free_q_vectors(vsi);\n+err_msix:\n+\tif (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)\n+\t\tunregister_netdev(vsi->netdev);\n+err_register_netdev:\n+\tif (vsi->netdev) {\n+\t\tfree_netdev(vsi->netdev);\n+\t\tvsi->netdev = NULL;\n+\t}\n+err_cfg_netdev:\n+\tret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);\n+\tif (ret)\n+\t\tdev_err(&vsi->back->pdev->dev,\n+\t\t\t\"Free VSI AQ call failed, err %d\\n\", ret);\n+err_vsi:\n+\tice_vsi_put_qs(vsi);\n+err_get_qs:\n+\tpf->q_left_tx += vsi->alloc_txq;\n+\tpf->q_left_rx += vsi->alloc_rxq;\n+\tice_vsi_clear(vsi);\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * ice_setup_pf_sw - Setup the HW switch on startup or after reset\n+ * @pf: board private structure\n+ *\n+ * Returns 0 on success, negative value on failure\n+ */\n+static int ice_setup_pf_sw(struct ice_pf *pf)\n+{\n+\tstruct ice_vsi *vsi;\n+\tint status = 0;\n+\n+\tvsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);\n+\tif (!vsi) {\n+\t\tstatus = -ENOMEM;\n+\t\tgoto error_exit;\n+\t}\n+\n+error_exit:\n+\tif (vsi) {\n+\t\tice_vsi_free_q_vectors(vsi);\n+\t\tif (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)\n+\t\t\tunregister_netdev(vsi->netdev);\n+\t\tif (vsi->netdev) {\n+\t\t\tfree_netdev(vsi->netdev);\n+\t\t\tvsi->netdev = NULL;\n+\t\t}\n+\t\tice_vsi_delete(vsi);\n+\t\tice_vsi_put_qs(vsi);\n+\t\tpf->q_left_tx += vsi->alloc_txq;\n+\t\tpf->q_left_rx += vsi->alloc_rxq;\n+\t\tice_vsi_clear(vsi);\n+\t}\n+\treturn status;\n+}\n+\n /**\n  * ice_determine_q_usage - Calculate queue distribution\n  * @pf: board private structure\n@@ -824,8 +1877,17 @@ static int ice_probe(struct pci_dev *pdev,\n \t/* record the sw_id available for later use */\n \tpf->first_sw->sw_id = hw->port_info->sw_id;\n \n+\terr = ice_setup_pf_sw(pf);\n+\tif (err) {\n+\t\tdev_err(&pdev->dev,\n+\t\t\t\"probe failed due to setup pf switch:%d\\n\", err);\n+\t\tgoto err_alloc_sw_unroll;\n+\t}\n \treturn 0;\n \n+err_alloc_sw_unroll:\n+\tset_bit(__ICE_DOWN, pf->state);\n+\tdevm_kfree(&pf->pdev->dev, pf->first_sw);\n err_msix_misc_unroll:\n \tice_free_irq_msix_misc(pf);\n err_init_interrupt_unroll:\n@@ -846,12 +1908,24 @@ static int ice_probe(struct pci_dev *pdev,\n static void ice_remove(struct pci_dev *pdev)\n {\n \tstruct ice_pf *pf = pci_get_drvdata(pdev);\n+\tint i = 0;\n+\tint err;\n \n \tif (!pf)\n \t\treturn;\n \n \tset_bit(__ICE_DOWN, pf->state);\n \n+\tfor (i = 0; i < pf->num_alloc_vsi; i++) {\n+\t\tif (!pf->vsi[i])\n+\t\t\tcontinue;\n+\n+\t\terr = ice_vsi_release(pf->vsi[i]);\n+\t\tif (err)\n+\t\t\tdev_dbg(&pf->pdev->dev, \"Failed to release VSI index %d (err %d)\\n\",\n+\t\t\t\ti, err);\n+\t}\n+\n \tice_free_irq_msix_misc(pf);\n \tice_clear_interrupt_scheme(pf);\n \tice_deinit_pf(pf);\n@@ -927,3 +2001,40 @@ static void __exit ice_module_exit(void)\n \tpr_info(\"module unloaded\\n\");\n }\n module_exit(ice_module_exit);\n+\n+/**\n+ * ice_vsi_release - Delete a VSI and free its resources\n+ * @vsi: the VSI being removed\n+ *\n+ * Returns 0 on success or < 0 on error\n+ */\n+static int ice_vsi_release(struct ice_vsi *vsi)\n+{\n+\tstruct ice_pf *pf;\n+\n+\tif (!vsi->back)\n+\t\treturn -ENODEV;\n+\tpf = vsi->back;\n+\n+\tif (vsi->netdev) {\n+\t\tunregister_netdev(vsi->netdev);\n+\t\tfree_netdev(vsi->netdev);\n+\t\tvsi->netdev = NULL;\n+\t}\n+\n+\t/* reclaim interrupt vectors back to PF */\n+\tice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);\n+\tpf->num_avail_msix += vsi->num_q_vectors;\n+\n+\tice_vsi_delete(vsi);\n+\tice_vsi_free_q_vectors(vsi);\n+\tice_vsi_clear_rings(vsi);\n+\n+\tice_vsi_put_qs(vsi);\n+\tpf->q_left_tx += vsi->alloc_txq;\n+\tpf->q_left_rx += vsi->alloc_rxq;\n+\n+\tice_vsi_clear(vsi);\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c\nindex 5824a1e57a17..e94bc30c20af 100644\n--- a/drivers/net/ethernet/intel/ice/ice_switch.c\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.c\n@@ -64,6 +64,121 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,\n \treturn status;\n }\n \n+/**\n+ * ice_aq_add_vsi\n+ * @hw: pointer to the hw struct\n+ * @vsi_ctx: pointer to a VSI context struct\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Add a VSI context to the hardware (0x0210)\n+ */\n+enum ice_status\n+ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,\n+\t       struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_add_update_free_vsi_resp *res;\n+\tstruct ice_aqc_add_get_update_free_vsi *cmd;\n+\tenum ice_status status;\n+\tstruct ice_aq_desc desc;\n+\n+\tcmd = &desc.params.vsi_cmd;\n+\tres = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);\n+\n+\tif (!vsi_ctx->alloc_from_pool)\n+\t\tcmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |\n+\t\t\t\t\t   ICE_AQ_VSI_IS_VALID);\n+\n+\tcmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);\n+\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\n+\tstatus = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,\n+\t\t\t\t sizeof(vsi_ctx->info), cd);\n+\n+\tif (!status) {\n+\t\tvsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;\n+\t\tvsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);\n+\t\tvsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * ice_aq_update_vsi\n+ * @hw: pointer to the hw struct\n+ * @vsi_ctx: pointer to a VSI context struct\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Update VSI context in the hardware (0x0211)\n+ */\n+enum ice_status\n+ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,\n+\t\t  struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_add_update_free_vsi_resp *resp;\n+\tstruct ice_aqc_add_get_update_free_vsi *cmd;\n+\tstruct ice_aq_desc desc;\n+\tenum ice_status status;\n+\n+\tcmd = &desc.params.vsi_cmd;\n+\tresp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);\n+\n+\tcmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);\n+\n+\tdesc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);\n+\n+\tstatus = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,\n+\t\t\t\t sizeof(vsi_ctx->info), cd);\n+\n+\tif (!status) {\n+\t\tvsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);\n+\t\tvsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * ice_aq_free_vsi\n+ * @hw: pointer to the hw struct\n+ * @vsi_ctx: pointer to a VSI context struct\n+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources\n+ * @cd: pointer to command details structure or NULL\n+ *\n+ * Get VSI context info from hardware (0x0213)\n+ */\n+enum ice_status\n+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,\n+\t\tbool keep_vsi_alloc, struct ice_sq_cd *cd)\n+{\n+\tstruct ice_aqc_add_update_free_vsi_resp *resp;\n+\tstruct ice_aqc_add_get_update_free_vsi *cmd;\n+\tstruct ice_aq_desc desc;\n+\tenum ice_status status;\n+\n+\tcmd = &desc.params.vsi_cmd;\n+\tresp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;\n+\n+\tice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);\n+\n+\tcmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);\n+\tif (keep_vsi_alloc)\n+\t\tcmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);\n+\n+\tstatus = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);\n+\tif (!status) {\n+\t\tvsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);\n+\t\tvsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);\n+\t}\n+\n+\treturn status;\n+}\n+\n /* ice_init_port_info - Initialize port_info with switch configuration data\n  * @pi: pointer to port_info\n  * @vsi_port_num: VSI number or port number\ndiff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h\nindex 57d10e58e0b2..d04ff160df70 100644\n--- a/drivers/net/ethernet/intel/ice/ice_switch.h\n+++ b/drivers/net/ethernet/intel/ice/ice_switch.h\n@@ -23,6 +23,27 @@\n #define ICE_SW_CFG_MAX_BUF_LEN 2048\n #define ICE_DFLT_VSI_INVAL 0xff\n \n+/* VSI context structure for add/get/update/free operations */\n+struct ice_vsi_ctx {\n+\tu16 vsi_num;\n+\tu16 vsis_allocd;\n+\tu16 vsis_unallocated;\n+\tu16 flags;\n+\tstruct ice_aqc_vsi_props info;\n+\tbool alloc_from_pool;\n+};\n+\n+/* VSI related commands */\n+enum ice_status\n+ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,\n+\t       struct ice_sq_cd *cd);\n+enum ice_status\n+ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,\n+\t\t  struct ice_sq_cd *cd);\n+enum ice_status\n+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,\n+\t\tbool keep_vsi_alloc, struct ice_sq_cd *cd);\n+\n enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);\n \n #endif /* _ICE_SWITCH_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex f1f872d3e09b..b5ec4fb88aa8 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -40,4 +40,30 @@ enum ice_dyn_idx_t {\n /* apply ITR HW granularity translation to program the HW registers */\n #define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))\n \n+/* descriptor ring, associated with a VSI */\n+struct ice_ring {\n+\tstruct ice_ring *next;\t\t/* pointer to next ring in q_vector */\n+\tstruct device *dev;\t\t/* Used for DMA mapping */\n+\tstruct net_device *netdev;\t/* netdev ring maps to */\n+\tstruct ice_vsi *vsi;\t\t/* Backreference to associated VSI */\n+\tstruct ice_q_vector *q_vector;\t/* Backreference to associated vector */\n+\tu16 q_index;\t\t\t/* Queue number of ring */\n+\tu16 count;\t\t\t/* Number of descriptors */\n+\tu16 reg_idx;\t\t\t/* HW register index of the ring */\n+\tbool ring_active;\t\t/* is ring online or not */\n+\tstruct rcu_head rcu;\t\t/* to avoid race on free */\n+} ____cacheline_internodealigned_in_smp;\n+\n+struct ice_ring_container {\n+\t/* array of pointers to rings */\n+\tstruct ice_ring *ring;\n+\tunsigned int total_bytes;\t/* total bytes processed this int */\n+\tunsigned int total_pkts;\t/* total packets processed this int */\n+\tu16 itr;\n+};\n+\n+/* iterator for handling rings in ring container */\n+#define ice_for_each_ring(pos, head) \\\n+\tfor (pos = (head).ring; pos; pos = pos->next)\n+\n #endif /* _ICE_TXRX_H_ */\ndiff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h\nindex 1ae6fa5b59ba..8bfd9ff7edda 100644\n--- a/drivers/net/ethernet/intel/ice/ice_type.h\n+++ b/drivers/net/ethernet/intel/ice/ice_type.h\n@@ -69,6 +69,10 @@ enum ice_media_type {\n \tICE_MEDIA_DA,\n };\n \n+enum ice_vsi_type {\n+\tICE_VSI_PF = 0,\n+};\n+\n struct ice_link_status {\n \t/* Refer to ice_aq_phy_type for bits definition */\n \tu64 phy_type_low;\n",
    "prefixes": [
        "v3",
        "07/15"
    ]
}