Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1122934/?format=api
{ "id": 1122934, "url": "http://patchwork.ozlabs.org/api/patches/1122934/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190626092027.52845-5-anthony.l.nguyen@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20190626092027.52845-5-anthony.l.nguyen@intel.com>", "list_archive_url": null, "date": "2019-06-26T09:20:16", "name": "[S22,05/16] ice: separate out control queue lock creation", "commit_ref": null, "pull_url": null, "state": "accepted", "archived": false, "hash": "b846dffcdf5ac582acdc5bbbef788562cda5e0fb", "submitter": { "id": 68875, "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api", "name": "Tony Nguyen", "email": "anthony.l.nguyen@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190626092027.52845-5-anthony.l.nguyen@intel.com/mbox/", "series": [ { "id": 116295, "url": "http://patchwork.ozlabs.org/api/series/116295/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=116295", "date": "2019-06-26T09:20:14", "name": "[S22,01/16] ice: add lp_advertising flow control support", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/116295/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1122934/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1122934/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.138; helo=whitealder.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 45Yr912ZzGz9s4V\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 27 Jun 2019 03:48:05 +1000 (AEST)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id CE3BC8654D;\n\tWed, 26 Jun 2019 17:48:03 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id HFbRrWgM0f0u; Wed, 26 Jun 2019 17:47:59 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 76FDC86A48;\n\tWed, 26 Jun 2019 17:47:58 +0000 (UTC)", "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id D93A01BF326\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 26 Jun 2019 17:47:54 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id D1E34843BB\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 26 Jun 2019 17:47:54 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id cWbu8XqYmGI3 for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 26 Jun 2019 17:47:53 +0000 (UTC)", "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 5630086A0B\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 26 Jun 2019 17:47:53 +0000 (UTC)", "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t26 Jun 2019 10:47:52 -0700", "from unknown (HELO localhost.jf.intel.com) ([10.166.244.174])\n\tby fmsmga001.fm.intel.com with ESMTP; 26 Jun 2019 10:47:51 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.63,420,1557212400\"; d=\"scan'208\";a=\"183218120\"", "From": "Tony Nguyen <anthony.l.nguyen@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Wed, 26 Jun 2019 02:20:16 -0700", "Message-Id": "<20190626092027.52845-5-anthony.l.nguyen@intel.com>", "X-Mailer": "git-send-email 2.20.1", "In-Reply-To": "<20190626092027.52845-1-anthony.l.nguyen@intel.com>", "References": "<20190626092027.52845-1-anthony.l.nguyen@intel.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH S22 05/16] ice: separate out control queue\n\tlock creation", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Jacob Keller <jacob.e.keller@intel.com>\n\nThe ice_init_all_ctrlq and ice_shutdown_all_ctrlq functions create and\ndestroy the locks used to protect the send and receive process of each\ncontrol queue.\n\nThis is problematic, as the driver may use these functions to shutdown\nand re-initialize the control queues at run time. For example, it may do\nthis in response to a device reset.\n\nIf the driver failed to recover from a reset, it might leave the control\nqueues offline. In this case, the locks will no longer be initialized.\nA later call to ice_sq_send_cmd will then attempt to acquire a lock that\nhas been destroyed.\n\nIt is incorrect behavior to access a lock that has been destroyed.\n\nIndeed, ice_aq_send_cmd already tries to avoid accessing an offline\ncontrol queue, but the check occurs inside the lock.\n\nThe root of the problem is that the locks are destroyed at run time.\n\nModify ice_init_all_ctrlq and ice_shutdown_all_ctrlq such that they no\nlonger create or destroy the locks.\n\nIntroduce new functions, ice_create_all_ctrlq and ice_destroy_all_ctrlq.\nCall these functions in ice_init_hw and ice_deinit_hw.\n\nNow, the control queue locks will remain valid for the life of the\ndriver, and will not be destroyed until the driver unloads.\n\nThis also allows removing a duplicate check of the sq.count and\nrq.count values when shutting down the controlqs. The ice_shutdown_ctrlq\nfunction already checks this value under the lock. Previously\ncommit dec64ff10ed9 (\"ice: use [sr]q.count when checking if queue is\ninitialized\") needed this check to happen outside the lock, because it\nprevented duplicate attempts at destroying the locks.\n\nThe driver may now safely use ice_init_all_ctrlq and\nice_shutdown_all_ctrlq while handling reset events, without causing the\nlocks to be invalid.\n\nSigned-off-by: Jacob Keller <jacob.e.keller@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\n---\n drivers/net/ethernet/intel/ice/ice_common.c | 6 +-\n drivers/net/ethernet/intel/ice/ice_common.h | 2 +\n drivers/net/ethernet/intel/ice/ice_controlq.c | 112 ++++++++++++++----\n 3 files changed, 91 insertions(+), 29 deletions(-)", "diff": "diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c\nindex 01e5ecaaa322..5f9dc76699d2 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.c\n+++ b/drivers/net/ethernet/intel/ice/ice_common.c\n@@ -740,7 +740,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)\n \n \tice_get_itr_intrl_gran(hw);\n \n-\tstatus = ice_init_all_ctrlq(hw);\n+\tstatus = ice_create_all_ctrlq(hw);\n \tif (status)\n \t\tgoto err_unroll_cqinit;\n \n@@ -855,7 +855,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)\n err_unroll_alloc:\n \tdevm_kfree(ice_hw_to_dev(hw), hw->port_info);\n err_unroll_cqinit:\n-\tice_shutdown_all_ctrlq(hw);\n+\tice_destroy_all_ctrlq(hw);\n \treturn status;\n }\n \n@@ -881,7 +881,7 @@ void ice_deinit_hw(struct ice_hw *hw)\n \n \t/* Attempt to disable FW logging before shutting down control queues */\n \tice_cfg_fw_log(hw, false);\n-\tice_shutdown_all_ctrlq(hw);\n+\tice_destroy_all_ctrlq(hw);\n \n \t/* Clear VSI contexts if not already cleared */\n \tice_clear_all_vsi_ctx(hw);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h\nindex 68218e63afc2..e376d1eadba4 100644\n--- a/drivers/net/ethernet/intel/ice/ice_common.h\n+++ b/drivers/net/ethernet/intel/ice/ice_common.h\n@@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw);\n void ice_deinit_hw(struct ice_hw *hw);\n enum ice_status ice_check_reset(struct ice_hw *hw);\n enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);\n+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);\n enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);\n void ice_shutdown_all_ctrlq(struct ice_hw *hw);\n+void ice_destroy_all_ctrlq(struct ice_hw *hw);\n enum ice_status\n ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,\n \t\t struct ice_rq_event_info *e, u16 *pending);\ndiff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c\nindex e91ac4df0242..2353166c654e 100644\n--- a/drivers/net/ethernet/intel/ice/ice_controlq.c\n+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c\n@@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n * @cq: pointer to the specific Control queue\n *\n * This is the main initialization routine for the Control Send Queue\n- * Prior to calling this function, drivers *MUST* set the following fields\n+ * Prior to calling this function, the driver *MUST* set the following fields\n * in the cq->structure:\n * - cq->num_sq_entries\n * - cq->sq_buf_size\n@@ -369,7 +369,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n * @cq: pointer to the specific Control queue\n *\n * The main initialization routine for the Admin Receive (Event) Queue.\n- * Prior to calling this function, drivers *MUST* set the following fields\n+ * Prior to calling this function, the driver *MUST* set the following fields\n * in the cq->structure:\n * - cq->num_rq_entries\n * - cq->rq_buf_size\n@@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)\n \treturn 0;\n \n init_ctrlq_free_rq:\n-\tif (cq->rq.count) {\n-\t\tice_shutdown_rq(hw, cq);\n-\t\tmutex_destroy(&cq->rq_lock);\n-\t}\n-\tif (cq->sq.count) {\n-\t\tice_shutdown_sq(hw, cq);\n-\t\tmutex_destroy(&cq->sq_lock);\n-\t}\n+\tice_shutdown_rq(hw, cq);\n+\tice_shutdown_sq(hw, cq);\n \treturn status;\n }\n \n@@ -585,12 +579,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)\n * @hw: pointer to the hardware structure\n * @q_type: specific Control queue type\n *\n- * Prior to calling this function, drivers *MUST* set the following fields\n+ * Prior to calling this function, the driver *MUST* set the following fields\n * in the cq->structure:\n * - cq->num_sq_entries\n * - cq->num_rq_entries\n * - cq->rq_buf_size\n * - cq->sq_buf_size\n+ *\n+ * NOTE: this function does not initialize the controlq locks\n */\n static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n {\n@@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n \t !cq->rq_buf_size || !cq->sq_buf_size) {\n \t\treturn ICE_ERR_CFG;\n \t}\n-\tmutex_init(&cq->sq_lock);\n-\tmutex_init(&cq->rq_lock);\n \n \t/* setup SQ command write back timeout */\n \tcq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;\n@@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n \t/* allocate the ATQ */\n \tret_code = ice_init_sq(hw, cq);\n \tif (ret_code)\n-\t\tgoto init_ctrlq_destroy_locks;\n+\t\treturn ret_code;\n \n \t/* allocate the ARQ */\n \tret_code = ice_init_rq(hw, cq);\n@@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n \n init_ctrlq_free_sq:\n \tice_shutdown_sq(hw, cq);\n-init_ctrlq_destroy_locks:\n-\tmutex_destroy(&cq->sq_lock);\n-\tmutex_destroy(&cq->rq_lock);\n \treturn ret_code;\n }\n \n@@ -647,12 +638,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n * ice_init_all_ctrlq - main initialization routine for all control queues\n * @hw: pointer to the hardware structure\n *\n- * Prior to calling this function, drivers *MUST* set the following fields\n+ * Prior to calling this function, the driver MUST* set the following fields\n * in the cq->structure for all control queues:\n * - cq->num_sq_entries\n * - cq->num_rq_entries\n * - cq->rq_buf_size\n * - cq->sq_buf_size\n+ *\n+ * NOTE: this function does not initialize the controlq locks.\n */\n enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)\n {\n@@ -671,10 +664,48 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)\n \treturn ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);\n }\n \n+/**\n+ * ice_init_ctrlq_locks - Initialize locks for a control queue\n+ * @cq: pointer to the control queue\n+ *\n+ * Initializes the send and receive queue locks for a given control queue.\n+ */\n+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)\n+{\n+\tmutex_init(&cq->sq_lock);\n+\tmutex_init(&cq->rq_lock);\n+}\n+\n+/**\n+ * ice_create_all_ctrlq - main initialization routine for all control queues\n+ * @hw: pointer to the hardware structure\n+ *\n+ * Prior to calling this function, the driver *MUST* set the following fields\n+ * in the cq->structure for all control queues:\n+ * - cq->num_sq_entries\n+ * - cq->num_rq_entries\n+ * - cq->rq_buf_size\n+ * - cq->sq_buf_size\n+ *\n+ * This function creates all the control queue locks and then calls\n+ * ice_init_all_ctrlq. It should be called once during driver load. If the\n+ * driver needs to re-initialize control queues at run time it should call\n+ * ice_init_all_ctrlq instead.\n+ */\n+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)\n+{\n+\tice_init_ctrlq_locks(&hw->adminq);\n+\tice_init_ctrlq_locks(&hw->mailboxq);\n+\n+\treturn ice_init_all_ctrlq(hw);\n+}\n+\n /**\n * ice_shutdown_ctrlq - shutdown routine for any control queue\n * @hw: pointer to the hardware structure\n * @q_type: specific Control queue type\n+ *\n+ * NOTE: this function does not destroy the control queue locks.\n */\n static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n {\n@@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n \t\treturn;\n \t}\n \n-\tif (cq->sq.count) {\n-\t\tice_shutdown_sq(hw, cq);\n-\t\tmutex_destroy(&cq->sq_lock);\n-\t}\n-\tif (cq->rq.count) {\n-\t\tice_shutdown_rq(hw, cq);\n-\t\tmutex_destroy(&cq->rq_lock);\n-\t}\n+\tice_shutdown_sq(hw, cq);\n+\tice_shutdown_rq(hw, cq);\n }\n \n /**\n * ice_shutdown_all_ctrlq - shutdown routine for all control queues\n * @hw: pointer to the hardware structure\n+ *\n+ * NOTE: this function does not destroy the control queue locks. The driver\n+ * may call this at runtime to shutdown and later restart control queues, such\n+ * as in response to a reset event.\n */\n void ice_shutdown_all_ctrlq(struct ice_hw *hw)\n {\n@@ -715,6 +744,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)\n \tice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);\n }\n \n+/**\n+ * ice_destroy_ctrlq_locks - Destroy locks for a control queue\n+ * @cq: pointer to the control queue\n+ *\n+ * Destroys the send and receive queue locks for a given control queue.\n+ */\n+static void\n+ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)\n+{\n+\tmutex_destroy(&cq->sq_lock);\n+\tmutex_destroy(&cq->rq_lock);\n+}\n+\n+/**\n+ * ice_destroy_all_ctrlq - exit routine for all control queues\n+ * @hw: pointer to the hardware structure\n+ *\n+ * This function shuts down all the control queues and then destroys the\n+ * control queue locks. It should be called once during driver unload. The\n+ * driver should call ice_shutdown_all_ctrlq if it needs to shut down and\n+ * reinitialize control queues, such as in response to a reset event.\n+ */\n+void ice_destroy_all_ctrlq(struct ice_hw *hw)\n+{\n+\t/* shut down all the control queues first */\n+\tice_shutdown_all_ctrlq(hw);\n+\n+\tice_destroy_ctrlq_locks(&hw->adminq);\n+\tice_destroy_ctrlq_locks(&hw->mailboxq);\n+}\n+\n /**\n * ice_clean_sq - cleans Admin send queue (ATQ)\n * @hw: pointer to the hardware structure\n", "prefixes": [ "S22", "05/16" ] }