diff mbox

[Xenial] blk-mq: really fix plug list flushing for nomerge queues

Message ID 20170118103142.24265-1-cascardo@canonical.com
State New
Headers show

Commit Message

Thadeu Lima de Souza Cascardo Jan. 18, 2017, 10:31 a.m. UTC
From: Omar Sandoval <osandov@fb.com>

BugLink: https://bugs.launchpad.net/bugs/1657281

Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
updated blk_mq_make_request() to set request_count even when
blk_queue_nomerges() returns true. However, blk_mq_make_request() only
does limited plugging and doesn't use request_count;
blk_sq_make_request() is the one that should have been fixed. Do that
and get rid of the unnecessary work in the mq version.

Fixes: 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
Signed-off-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Ming Lei <tom.leiming@gmail.com>
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 87c279e613f848c691111b29d49de8df3f4f56da)
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
---
 block/blk-mq.c | 17 ++++++++---------
 1 file changed, 8 insertions(+), 9 deletions(-)

Comments

Luis Henriques Jan. 18, 2017, 10:47 a.m. UTC | #1
On Wed, Jan 18, 2017 at 08:31:42AM -0200, Thadeu Lima de Souza Cascardo wrote:
> From: Omar Sandoval <osandov@fb.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1657281

Clean cherry-pick, probably upstream stable material.  And seems to be
easily tested.

Acked-by: Luis Henriques <luis.henriques@canonical.com>

Cheers,
--
Luís

> 
> Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
> updated blk_mq_make_request() to set request_count even when
> blk_queue_nomerges() returns true. However, blk_mq_make_request() only
> does limited plugging and doesn't use request_count;
> blk_sq_make_request() is the one that should have been fixed. Do that
> and get rid of the unnecessary work in the mq version.
> 
> Fixes: 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
> Signed-off-by: Omar Sandoval <osandov@fb.com>
> Reviewed-by: Ming Lei <tom.leiming@gmail.com>
> Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
> Signed-off-by: Jens Axboe <axboe@fb.com>
> (cherry picked from commit 87c279e613f848c691111b29d49de8df3f4f56da)
> Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
> ---
>  block/blk-mq.c | 17 ++++++++---------
>  1 file changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index ec732f5..1b7a905 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1304,12 +1304,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
>  
>  	blk_queue_split(q, &bio, q->bio_split);
>  
> -	if (!is_flush_fua && !blk_queue_nomerges(q)) {
> -		if (blk_attempt_plug_merge(q, bio, &request_count,
> -					   &same_queue_rq))
> -			return BLK_QC_T_NONE;
> -	} else
> -		request_count = blk_plug_queued_count(q);
> +	if (!is_flush_fua && !blk_queue_nomerges(q) &&
> +	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
> +		return BLK_QC_T_NONE;
>  
>  	rq = blk_mq_map_request(q, bio, &data);
>  	if (unlikely(!rq))
> @@ -1400,9 +1397,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
>  
>  	blk_queue_split(q, &bio, q->bio_split);
>  
> -	if (!is_flush_fua && !blk_queue_nomerges(q) &&
> -	    blk_attempt_plug_merge(q, bio, &request_count, NULL))
> -		return BLK_QC_T_NONE;
> +	if (!is_flush_fua && !blk_queue_nomerges(q)) {
> +		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
> +			return BLK_QC_T_NONE;
> +	} else
> +		request_count = blk_plug_queued_count(q);
>  
>  	rq = blk_mq_map_request(q, bio, &data);
>  	if (unlikely(!rq))
> -- 
> 2.9.3
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
Stefan Bader Jan. 18, 2017, 11:14 a.m. UTC | #2

Thadeu Lima de Souza Cascardo Jan. 18, 2017, 1:56 p.m. UTC | #3
Applied to xenial master-next.
diff mbox

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index ec732f5..1b7a905 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1304,12 +1304,9 @@  static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 	blk_queue_split(q, &bio, q->bio_split);
 
-	if (!is_flush_fua && !blk_queue_nomerges(q)) {
-		if (blk_attempt_plug_merge(q, bio, &request_count,
-					   &same_queue_rq))
-			return BLK_QC_T_NONE;
-	} else
-		request_count = blk_plug_queued_count(q);
+	if (!is_flush_fua && !blk_queue_nomerges(q) &&
+	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+		return BLK_QC_T_NONE;
 
 	rq = blk_mq_map_request(q, bio, &data);
 	if (unlikely(!rq))
@@ -1400,9 +1397,11 @@  static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 
 	blk_queue_split(q, &bio, q->bio_split);
 
-	if (!is_flush_fua && !blk_queue_nomerges(q) &&
-	    blk_attempt_plug_merge(q, bio, &request_count, NULL))
-		return BLK_QC_T_NONE;
+	if (!is_flush_fua && !blk_queue_nomerges(q)) {
+		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+			return BLK_QC_T_NONE;
+	} else
+		request_count = blk_plug_queued_count(q);
 
 	rq = blk_mq_map_request(q, bio, &data);
 	if (unlikely(!rq))