diff mbox series

[xenial:linux-azure,09/15] blk-mq: Rename blk_mq_request_direct_issue() into blk_mq_request_issue_directly()

Message ID 20191127201820.32174-10-marcelo.cerri@canonical.com
State New
Headers show
Series Bug #1848739 - [linux-azure] Patch to prevent possible data corruption | expand

Commit Message

Marcelo Henrique Cerri Nov. 27, 2019, 8:18 p.m. UTC
From: Bart Van Assche <bart.vanassche@wdc.com>

BugLink: https://bugs.launchpad.net/bugs/1848739

Most blk-mq functions have a name that follows the pattern blk_mq_${action}.
However, the function name blk_mq_request_direct_issue is an exception.
Hence rename this function. This patch does not change any functionality.

Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
(cherry picked from commit c77ff7fd03ddca8face268c4cf093c0edf4bcf1f)
Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
---
 block/blk-core.c | 2 +-
 block/blk-mq.c   | 4 ++--
 block/blk-mq.h   | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index 30a3cb2eca5a..f885b65324c2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2508,7 +2508,7 @@  blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
 		 * bypass a potential scheduler on the bottom device for
 		 * insert.
 		 */
-		return blk_mq_request_direct_issue(rq);
+		return blk_mq_request_issue_directly(rq);
 	}
 
 	spin_lock_irqsave(q->queue_lock, flags);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e035215b1546..3145221ca824 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1751,7 +1751,7 @@  static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	 * RCU or SRCU read lock is needed before checking quiesced flag.
 	 *
 	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
-	 * blk_mq_request_direct_issue(), and return BLK_STS_OK to caller,
+	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
 	 * and avoid driver to try to dispatch again.
 	 */
 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
@@ -1799,7 +1799,7 @@  static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	hctx_unlock(hctx, srcu_idx);
 }
 
-blk_status_t blk_mq_request_direct_issue(struct request *rq)
+blk_status_t blk_mq_request_issue_directly(struct request *rq)
 {
 	blk_status_t ret;
 	int srcu_idx;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 0daa9f2c3d61..c11c627ebd6d 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -61,7 +61,7 @@  void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 				struct list_head *list);
 
 /* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_direct_issue(struct request *rq);
+blk_status_t blk_mq_request_issue_directly(struct request *rq);
 
 /*
  * CPU -> queue mappings