From patchwork Fri Apr 15 10:39:44 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: mchristi@redhat.com X-Patchwork-Id: 610891 Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3qmZVF00Jlz9sC4 for ; Fri, 15 Apr 2016 21:06:28 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754028AbcDOLEA (ORCPT ); Fri, 15 Apr 2016 07:04:00 -0400 Received: from mx1.redhat.com ([209.132.183.28]:39345 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752285AbcDOKlD (ORCPT ); Fri, 15 Apr 2016 06:41:03 -0400 Received: from int-mx11.intmail.prod.int.phx2.redhat.com (int-mx11.intmail.prod.int.phx2.redhat.com [10.5.11.24]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id F1A0F3B70E; Fri, 15 Apr 2016 10:41:00 +0000 (UTC) Received: from rh2.redhat.com (vpn-48-158.rdu2.redhat.com [10.10.48.158]) by int-mx11.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id u3FAe32A010243; Fri, 15 Apr 2016 06:40:58 -0400 From: mchristi@redhat.com To: linux-f2fs-devel@lists.sourceforge.net, linux-ext4@vger.kernel.org, konrad.wilk@oracle.com, drbd-dev@lists.linbit.com, philipp.reisner@linbit.com, lars.ellenberg@linbit.com, linux-raid@vger.kernel.org, dm-devel@redhat.com, linux-fsdevel@vger.kernel.org, linux-bcache@vger.kernel.org, linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, linux-scsi@vger.kernel.org, linux-mtd@lists.infradead.org, target-devel@vger.kernel.org, linux-btrfs@vger.kernel.org, osd-dev@open-osd.org, xfs@oss.sgi.com, ocfs2-devel@oss.oracle.com Cc: Mike Christie Subject: [PATCH 24/42] xen: set bi_op to REQ_OP Date: Fri, 15 Apr 2016 05:39:44 -0500 Message-Id: <1460716802-2294-25-git-send-email-mchristi@redhat.com> In-Reply-To: <1460716802-2294-1-git-send-email-mchristi@redhat.com> References: <1460716802-2294-1-git-send-email-mchristi@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.24 Sender: linux-ext4-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-ext4@vger.kernel.org From: Mike Christie This patch has the xen block driver use bio->bi_op for REQ_OPs and rq_flag_bits to bio->bi_rw. Signed-off-by: Mike Christie Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke --- drivers/block/xen-blkback/blkback.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 79fe493..854ecca 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; - if ((operation != READ) && vbd->readonly) + if ((operation != REQ_OP_READ) && vbd->readonly) goto out; if (likely(req->nr_sects)) { @@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, preq.sector_number = req->u.discard.sector_number; preq.nr_sects = req->u.discard.nr_sectors; - err = xen_vbd_translate(&preq, blkif, WRITE); + err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE); if (err) { pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", preq.sector_number, @@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, struct bio **biolist = pending_req->biolist; int i, nbio = 0; int operation; + int operation_flags = 0; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; @@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, switch (req_operation) { case BLKIF_OP_READ: ring->st_rd_req++; - operation = READ; + operation = REQ_OP_READ; break; case BLKIF_OP_WRITE: ring->st_wr_req++; - operation = WRITE_ODIRECT; + operation = REQ_OP_WRITE; + operation_flags = WRITE_ODIRECT; break; case BLKIF_OP_WRITE_BARRIER: drain = true; case BLKIF_OP_FLUSH_DISKCACHE: ring->st_f_req++; - operation = WRITE_FLUSH; + operation = REQ_OP_WRITE; + operation_flags = WRITE_FLUSH; break; default: operation = 0; /* make gcc happy */ @@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, nseg = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.nr_segments : req->u.rw.nr_segments; - if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || + if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) || unlikely((req->operation != BLKIF_OP_INDIRECT) && (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && @@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", - operation == READ ? "read" : "write", + operation == REQ_OP_READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, ring->blkif->vbd.pdevice); @@ -1369,7 +1372,8 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; - bio->bi_rw = operation; + bio->bi_op = operation; + bio->bi_rw = operation_flags; } preq.sector_number += seg[i].nsec; @@ -1377,7 +1381,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, /* This will be hit if the operation was a flush or discard. */ if (!bio) { - BUG_ON(operation != WRITE_FLUSH); + BUG_ON(operation_flags != WRITE_FLUSH); bio = bio_alloc(GFP_KERNEL, 0); if (unlikely(bio == NULL)) @@ -1387,7 +1391,8 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; - bio->bi_rw = operation; + bio->bi_op = operation; + bio->bi_rw = operation_flags; } atomic_set(&pending_req->pendcnt, nbio); @@ -1399,9 +1404,9 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, /* Let the I/Os go.. */ blk_finish_plug(&plug); - if (operation == READ) + if (operation == REQ_OP_READ) ring->st_rd_sect += preq.nr_sects; - else if (operation & WRITE) + else if (operation == REQ_OP_WRITE) ring->st_wr_sect += preq.nr_sects; return 0;