Patchwork Consolidate reads and writes in nbd block device into one common routine

login
register
mail settings
Submitter Michael Tokarev
Date Feb. 28, 2012, 10:24 a.m.
Message ID <20120228102841.47ADF162@gandalf.tls.msk.ru>
Download mbox | patch
Permalink /patch/143392/
State New
Headers show

Comments

Michael Tokarev - Feb. 28, 2012, 10:24 a.m.
This removes quite some duplicated code.

Signed-off-By: Michael Tokarev <mjt@tls.msk.ru>
---
 block/nbd.c |   94 +++++++++++++++++++----------------------------------------
 1 files changed, 30 insertions(+), 64 deletions(-)

Patch

diff --git a/block/nbd.c b/block/nbd.c
index 161b299..82f2964 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -320,91 +320,57 @@  static int nbd_open(BlockDriverState *bs, const char* filename, int flags)
     return result;
 }
 
-static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
-                          int nb_sectors, QEMUIOVector *qiov,
-                          int offset)
-{
-    BDRVNBDState *s = bs->opaque;
-    struct nbd_request request;
-    struct nbd_reply reply;
-
-    request.type = NBD_CMD_READ;
-    request.from = sector_num * 512;
-    request.len = nb_sectors * 512;
-
-    nbd_coroutine_start(s, &request);
-    if (nbd_co_send_request(s, &request, NULL, 0) == -1) {
-        reply.error = errno;
-    } else {
-        nbd_co_receive_reply(s, &request, &reply, qiov->iov, offset);
-    }
-    nbd_coroutine_end(s, &request);
-    return -reply.error;
-
-}
+/* qemu-nbd has a limit of slightly less than 1M per request.  Try to
+ * remain aligned to 4K. */
+#define NBD_MAX_SECTORS 2040
 
-static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
-                           int nb_sectors, QEMUIOVector *qiov,
-                           int offset)
+static int nbd_co_rwv(BlockDriverState *bs, int64_t sector_num,
+                      int nb_sectors, QEMUIOVector *qiov, int iswrite)
 {
     BDRVNBDState *s = bs->opaque;
     struct nbd_request request;
     struct nbd_reply reply;
+    int offset = 0;
 
-    request.type = NBD_CMD_WRITE;
-    if (!bdrv_enable_write_cache(bs) && (s->nbdflags & NBD_FLAG_SEND_FUA)) {
+    request.type = iswrite ? NBD_CMD_WRITE : NBD_CMD_READ;
+    if (iswrite && !bdrv_enable_write_cache(bs) && (s->nbdflags & NBD_FLAG_SEND_FUA)) {
         request.type |= NBD_CMD_FLAG_FUA;
     }
 
-    request.from = sector_num * 512;
-    request.len = nb_sectors * 512;
+    /* we split the request into pieces of at most NBD_MAX_SECTORS size
+     * and process them in a loop... */
+    for (;;) {
+        request.from = sector_num * 512;
+        request.len = MIN(nb_sectors, NBD_MAX_SECTORS) * 512;
+
+        nbd_coroutine_start(s, &request);
+        if (nbd_co_send_request(s, &request, iswrite ? qiov->iov : NULL, 0) == -1) {
+            reply.error = errno;
+        } else {
+            nbd_co_receive_reply(s, &request, &reply, iswrite ? NULL : qiov->iov, offset);
+        }
+        nbd_coroutine_end(s, &request);
 
-    nbd_coroutine_start(s, &request);
-    if (nbd_co_send_request(s, &request, qiov->iov, offset) == -1) {
-        reply.error = errno;
-    } else {
-        nbd_co_receive_reply(s, &request, &reply, NULL, 0);
+        offset += NBD_MAX_SECTORS * 512;
+        sector_num += NBD_MAX_SECTORS;
+        nb_sectors -= NBD_MAX_SECTORS;
+        if (reply.error != 0 || nb_sectors <= 0) {
+            /* ..till we hit an error or there's nothing more to process */
+            return -reply.error;
+        }
     }
-    nbd_coroutine_end(s, &request);
-    return -reply.error;
 }
 
-/* qemu-nbd has a limit of slightly less than 1M per request.  Try to
- * remain aligned to 4K. */
-#define NBD_MAX_SECTORS 2040
-
 static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
                         int nb_sectors, QEMUIOVector *qiov)
 {
-    int offset = 0;
-    int ret;
-    while (nb_sectors > NBD_MAX_SECTORS) {
-        ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
-        if (ret < 0) {
-            return ret;
-        }
-        offset += NBD_MAX_SECTORS * 512;
-        sector_num += NBD_MAX_SECTORS;
-        nb_sectors -= NBD_MAX_SECTORS;
-    }
-    return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
+    return nbd_co_rwv(bs, sector_num, nb_sectors, qiov, 0);
 }
 
 static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num,
                          int nb_sectors, QEMUIOVector *qiov)
 {
-    int offset = 0;
-    int ret;
-    while (nb_sectors > NBD_MAX_SECTORS) {
-        ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
-        if (ret < 0) {
-            return ret;
-        }
-        offset += NBD_MAX_SECTORS * 512;
-        sector_num += NBD_MAX_SECTORS;
-        nb_sectors -= NBD_MAX_SECTORS;
-    }
-    return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset);
+    return nbd_co_rwv(bs, sector_num, nb_sectors, qiov, 1);
 }
 
 static int nbd_co_flush(BlockDriverState *bs)