[v5,3/9] block: Add VFIO based NVMe driver

Message ID 20180112085555.14447-4-famz@redhat.com
State New
Headers show
Series
  • block: Add VFIO based driver for NVMe device
Related show

Commit Message

Fam Zheng Jan. 12, 2018, 8:55 a.m.
This is a new protocol driver that exclusively opens a host NVMe
controller through VFIO. It achieves better latency than linux-aio by
completely bypassing host kernel vfs/block layer.

    $rw-$bs-$iodepth  linux-aio     nvme://
    ----------------------------------------
    randread-4k-1     10.5k         21.6k
    randread-512k-1   745           1591
    randwrite-4k-1    30.7k         37.0k
    randwrite-512k-1  1945          1980

    (unit: IOPS)

The driver also integrates with the polling mechanism of iothread.

This patch is co-authored by Paolo and me.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-Id: <20180110091846.10699-4-famz@redhat.com>
---
 MAINTAINERS         |    6 +
 block/Makefile.objs |    1 +
 block/nvme.c        | 1163 +++++++++++++++++++++++++++++++++++++++++++++++++++
 block/trace-events  |   21 +
 4 files changed, 1191 insertions(+)
 create mode 100644 block/nvme.c

Comments

Stefan Hajnoczi Jan. 12, 2018, 9:44 a.m. | #1
On Fri, Jan 12, 2018 at 04:55:49PM +0800, Fam Zheng wrote:
> +    if (progress) {
> +        /* Notify the device so it can post more completions. */
> +        smp_mb_release();
> +        *q->cq.doorbell = cpu_to_le32(q->cq.head);
> +        if (!qemu_co_queue_empty(&q->free_req_queue)) {
> +            aio_bh_schedule_oneshot(s->aio_context, nvme_free_req_queue_cb, q);
> +        }

This is not thread-safe because the queue producer does:

1   qemu_mutex_unlock(&q->lock);
2   qemu_co_queue_wait(&q->free_req_queue, NULL);
3   qemu_mutex_lock(&q->lock);

We fail to call nvme_free_req_queue_cb() when if
(!qemu_co_queue_empty(&q->free_req_queue)) runs after 1 but before 2.

This is only an issue if one thread runs the queue producer and another
runs the consumer code.  I don't know if that will ever be the case,
even with multiqueue, but I wanted to point this out so you and Paolo
can decide.

> +static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags,
> +                          Error **errp)
> +{
> +    const char *device;
> +    QemuOpts *opts;
> +    int namespace;
> +
> +    opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
> +    qemu_opts_absorb_qdict(opts, options, &error_abort);
> +    device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
> +    if (!device) {
> +        error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
> +        qemu_opts_del(opts);
> +        return -EINVAL;
> +    }
> +
> +    namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
> +    nvme_init(bs, device, namespace, errp);
> +
> +    qemu_opts_del(opts);
> +    bs->supported_write_flags = BDRV_REQ_FUA;
> +    if (nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE), errp)) {
> +        return -EINVAL;

Everything allocated in nvme_init() is leaked.
Paolo Bonzini Jan. 12, 2018, 11 a.m. | #2
On 12/01/2018 10:44, Stefan Hajnoczi wrote:
>> +    if (progress) {
>> +        /* Notify the device so it can post more completions. */
>> +        smp_mb_release();
>> +        *q->cq.doorbell = cpu_to_le32(q->cq.head);
>> +        if (!qemu_co_queue_empty(&q->free_req_queue)) {
>> +            aio_bh_schedule_oneshot(s->aio_context, nvme_free_req_queue_cb, q);
>> +        }
> This is not thread-safe because the queue producer does:
> 
> 1   qemu_mutex_unlock(&q->lock);
> 2   qemu_co_queue_wait(&q->free_req_queue, NULL);
> 3   qemu_mutex_lock(&q->lock);
> 
> We fail to call nvme_free_req_queue_cb() when if
> (!qemu_co_queue_empty(&q->free_req_queue)) runs after 1 but before 2.

Yes, it can happen.  The right solution would be to do like block/curl.c 
(which has more or less the same scenario):

    next = QSIMPLEQ_FIRST(&s->s->free_state_waitq);
    if (next) {
        QSIMPLEQ_REMOVE_HEAD(&s->s->free_state_waitq, next);
        qemu_mutex_unlock(&s->s->mutex);
        aio_co_wake(next->co);
        qemu_mutex_lock(&s->s->mutex);
    }

(where the "if" would be here, and the QSIMPLEQ_REMOVE_HEAD in 
nvme_free_req_queue_cb; by the way, nvme_free_req_queue_cb also needs to 
take q->lock).

I think we should have a variant of CoQueue that uses a QemuMutex.  Of 
course in C you have to choose between code duplication, lack of 
type-safety, or ugly code, while in C++ it would be an easy application 
of templates.  But maybe something like this (with QemuTypedMutex moved 
to another header file) is acceptable:

diff --git a/block/curl.c b/block/curl.c
index 35cf417..cd578d3 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -101,8 +101,6 @@ typedef struct CURLAIOCB {
 
     size_t start;
     size_t end;
-
-    QSIMPLEQ_ENTRY(CURLAIOCB) next;
 } CURLAIOCB;
 
 typedef struct CURLSocket {
@@ -138,7 +136,7 @@ typedef struct BDRVCURLState {
     bool accept_range;
     AioContext *aio_context;
     QemuMutex mutex;
-    QSIMPLEQ_HEAD(, CURLAIOCB) free_state_waitq;
+    CoQueue free_state_waitq;
     char *username;
     char *password;
     char *proxyusername;
@@ -538,7 +536,6 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state)
 /* Called with s->mutex held.  */
 static void curl_clean_state(CURLState *s)
 {
-    CURLAIOCB *next;
     int j;
     for (j = 0; j < CURL_NUM_ACB; j++) {
         assert(!s->acb[j]);
@@ -556,13 +553,7 @@ static void curl_clean_state(CURLState *s)
 
     s->in_use = 0;
 
-    next = QSIMPLEQ_FIRST(&s->s->free_state_waitq);
-    if (next) {
-        QSIMPLEQ_REMOVE_HEAD(&s->s->free_state_waitq, next);
-        qemu_mutex_unlock(&s->s->mutex);
-        aio_co_wake(next->co);
-        qemu_mutex_lock(&s->s->mutex);
-    }
+    qemu_co_enter_next(&s->s->free_state_waitq, &s->s->mutex);
 }
 
 static void curl_parse_filename(const char *filename, QDict *options,
@@ -784,7 +775,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
     }
 
     DPRINTF("CURL: Opening %s\n", file);
-    QSIMPLEQ_INIT(&s->free_state_waitq);
+    qemu_co_queue_init(&s->free_state_waitq);
     s->aio_context = bdrv_get_aio_context(bs);
     s->url = g_strdup(file);
     qemu_mutex_lock(&s->mutex);
@@ -888,10 +879,7 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
         if (state) {
             break;
         }
-        QSIMPLEQ_INSERT_TAIL(&s->free_state_waitq, acb, next);
-        qemu_mutex_unlock(&s->mutex);
-        qemu_coroutine_yield();
-        qemu_mutex_lock(&s->mutex);
+        qemu_co_queue_wait(&s->free_state_waitq, &s->mutex);
     }
 
     if (curl_init_state(s, state) < 0) {
diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c
index 49eebb5..1dc07fb 100644
--- a/fsdev/qemu-fsdev-throttle.c
+++ b/fsdev/qemu-fsdev-throttle.c
@@ -20,13 +20,13 @@
 static void fsdev_throttle_read_timer_cb(void *opaque)
 {
     FsThrottle *fst = opaque;
-    qemu_co_enter_next(&fst->throttled_reqs[false]);
+    qemu_co_enter_next(&fst->throttled_reqs[false], NULL);
 }
 
 static void fsdev_throttle_write_timer_cb(void *opaque)
 {
     FsThrottle *fst = opaque;
-    qemu_co_enter_next(&fst->throttled_reqs[true]);
+    qemu_co_enter_next(&fst->throttled_reqs[true], NULL);
 }
 
 void fsdev_throttle_parse_opts(QemuOpts *opts, FsThrottle *fst, Error **errp)
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index ce2eb73..ec2831a 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -178,15 +178,46 @@ typedef struct CoQueue {
  */
 void qemu_co_queue_init(CoQueue *queue);
 
+typedef struct QemuTypedMutex {
+    void *mutex;
+    void (*lock)(void *);
+    void (*unlock)(void *);
+} QemuTypedMutex;
+
+extern void *qemu_unknown_mutex_type(void *);
+#define QEMU_LOCK_FUNC(mutex)                                                       \
+   ((void (*)(void *))                                                              \
+     __builtin_choose_expr(                                                         \
+        __builtin_types_compatible_p(typeof(mutex), QemuMutex *), qemu_mutex_lock,  \
+     __builtin_choose_expr(                                                         \
+        __builtin_types_compatible_p(typeof(mutex), CoMutex *), qemu_co_mutex_lock, \
+        (mutex) ? qemu_unknown_mutex_type : NULL)))
+
+#define QEMU_UNLOCK_FUNC(mutex)                                                       \
+   ((void (*)(void *))                                                                \
+     __builtin_choose_expr(                                                           \
+        __builtin_types_compatible_p(typeof(mutex), QemuMutex *), qemu_mutex_unlock,  \
+     __builtin_choose_expr(                                                           \
+        __builtin_types_compatible_p(typeof(mutex), CoMutex *), qemu_co_mutex_unlock, \
+        (mutex) ? qemu_unknown_mutex_type : NULL)))
+
+#define QEMU_TYPED_MUTEX(mutex)                                                     \
+    ((QemuTypedMutex) { mutex, QEMU_LOCK_FUNC(mutex), QEMU_UNLOCK_FUNC(mutex) })
+
+
 /**
  * Adds the current coroutine to the CoQueue and transfers control to the
  * caller of the coroutine.  The mutex is unlocked during the wait and
  * locked again afterwards.
  */
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex);
+void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuTypedMutex mutex);
+
+#define qemu_co_queue_wait(queue, mutex)                                            \
+	qemu_co_queue_wait_impl(queue, QEMU_TYPED_MUTEX(mutex))
 
 /**
  * Restarts the next coroutine in the CoQueue and removes it from the queue.
+ * The mutex passed to qemu_co_queue_wait must be taken.
  *
  * Returns true if a coroutine was restarted, false if the queue is empty.
  */
@@ -198,9 +229,15 @@ bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
 void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
 
 /**
- * Enter the next coroutine in the queue
+ * Enter the next coroutine in the queue.
+ * The mutex passed to qemu_co_queue_wait must be taken.
+ *
+ * Returns true if a coroutine was restarted, false if the queue is empty.
  */
-bool qemu_co_enter_next(CoQueue *queue);
+bool qemu_co_enter_next_impl(CoQueue *queue, QemuTypedMutex mutex);
+
+#define qemu_co_enter_next(queue, mutex)                                            \
+	qemu_co_enter_next_impl(queue, QEMU_TYPED_MUTEX(mutex))
 
 /**
  * Checks if the CoQueue is empty.
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index 846ff91..f6bf952 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -40,13 +40,13 @@ void qemu_co_queue_init(CoQueue *queue)
     QSIMPLEQ_INIT(&queue->entries);
 }
 
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
+void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuTypedMutex mutex)
 {
     Coroutine *self = qemu_coroutine_self();
     QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
 
-    if (mutex) {
-        qemu_co_mutex_unlock(mutex);
+    if (mutex.mutex) {
+        mutex.unlock(mutex.mutex);
     }
 
     /* There is no race condition here.  Other threads will call
@@ -61,8 +61,8 @@ void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
      * primitive automatically places the woken coroutine on the
      * mutex's queue.  This avoids the thundering herd effect.
      */
-    if (mutex) {
-        qemu_co_mutex_lock(mutex);
+    if (mutex.mutex) {
+        mutex.lock(mutex.mutex);
     }
 }
 
@@ -130,7 +130,7 @@ void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue)
     qemu_co_queue_do_restart(queue, false);
 }
 
-bool qemu_co_enter_next(CoQueue *queue)
+bool qemu_co_enter_next_impl(CoQueue *queue, QemuTypedMutex mutex)
 {
     Coroutine *next;
 
@@ -140,7 +140,16 @@ bool qemu_co_enter_next(CoQueue *queue)
     }
 
     QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
-    qemu_coroutine_enter(next);
+    /* The coroutine will need the mutex: release it to
+     * avoid a deadlock.
+     */
+    if (mutex.mutex) {
+        mutex.unlock(mutex.mutex);
+    }
+    aio_co_wake(next);
+    if (mutex.mutex) {
+        mutex.lock(mutex.mutex);
+    }
     return true;
 }
 

If that's okay, I can prepare a proper series for Fam next week.

Paolo

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index 4770f105d4..bd636a4bff 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1876,6 +1876,12 @@  L: qemu-block@nongnu.org
 S: Supported
 F: block/null.c
 
+NVMe Block Driver
+M: Fam Zheng <famz@redhat.com>
+L: qemu-block@nongnu.org
+S: Supported
+F: block/nvme*
+
 Bootdevice
 M: Gonglei <arei.gonglei@huawei.com>
 S: Maintained
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 6eaf78a046..4c7e9d84a7 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -11,6 +11,7 @@  block-obj-$(CONFIG_POSIX) += file-posix.o
 block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
 block-obj-y += null.o mirror.o commit.o io.o
 block-obj-y += throttle-groups.o
+block-obj-$(CONFIG_LINUX) += nvme.o
 
 block-obj-y += nbd.o nbd-client.o sheepdog.o
 block-obj-$(CONFIG_LIBISCSI) += iscsi.o
diff --git a/block/nvme.c b/block/nvme.c
new file mode 100644
index 0000000000..97ab01686f
--- /dev/null
+++ b/block/nvme.c
@@ -0,0 +1,1163 @@ 
+/*
+ * NVMe block driver based on vfio
+ *
+ * Copyright 2016 - 2018 Red Hat, Inc.
+ *
+ * Authors:
+ *   Fam Zheng <famz@redhat.com>
+ *   Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include <linux/vfio.h>
+#include "qapi/error.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qmp/qstring.h"
+#include "qemu/error-report.h"
+#include "qemu/cutils.h"
+#include "qemu/vfio-helpers.h"
+#include "block/block_int.h"
+#include "trace.h"
+
+/* TODO: Move nvme spec definitions from hw/block/nvme.h into a separate file
+ * that doesn't depend on dma/pci headers. */
+#include "sysemu/dma.h"
+#include "hw/pci/pci.h"
+#include "hw/block/block.h"
+#include "hw/block/nvme.h"
+
+#define NVME_SQ_ENTRY_BYTES 64
+#define NVME_CQ_ENTRY_BYTES 16
+#define NVME_QUEUE_SIZE 128
+#define NVME_BAR_SIZE 8192
+
+typedef struct {
+    int32_t  head, tail;
+    uint8_t  *queue;
+    uint64_t iova;
+    /* Hardware MMIO register */
+    volatile uint32_t *doorbell;
+} NVMeQueue;
+
+typedef struct {
+    BlockCompletionFunc *cb;
+    void *opaque;
+    int cid;
+    void *prp_list_page;
+    uint64_t prp_list_iova;
+    bool busy;
+} NVMeRequest;
+
+typedef struct {
+    CoQueue     free_req_queue;
+    QemuMutex   lock;
+
+    /* Fields protected by BQL */
+    int         index;
+    uint8_t     *prp_list_pages;
+
+    /* Fields protected by @lock */
+    NVMeQueue   sq, cq;
+    int         cq_phase;
+    NVMeRequest reqs[NVME_QUEUE_SIZE];
+    bool        busy;
+    int         need_kick;
+    int         inflight;
+} NVMeQueuePair;
+
+/* Memory mapped registers */
+typedef volatile struct {
+    uint64_t cap;
+    uint32_t vs;
+    uint32_t intms;
+    uint32_t intmc;
+    uint32_t cc;
+    uint32_t reserved0;
+    uint32_t csts;
+    uint32_t nssr;
+    uint32_t aqa;
+    uint64_t asq;
+    uint64_t acq;
+    uint32_t cmbloc;
+    uint32_t cmbsz;
+    uint8_t  reserved1[0xec0];
+    uint8_t  cmd_set_specfic[0x100];
+    uint32_t doorbells[];
+} QEMU_PACKED NVMeRegs;
+
+QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
+
+typedef struct {
+    AioContext *aio_context;
+    QEMUVFIOState *vfio;
+    NVMeRegs *regs;
+    /* The submission/completion queue pairs.
+     * [0]: admin queue.
+     * [1..]: io queues.
+     */
+    NVMeQueuePair **queues;
+    int nr_queues;
+    size_t page_size;
+    /* How many uint32_t elements does each doorbell entry take. */
+    size_t doorbell_scale;
+    bool write_cache_supported;
+    EventNotifier irq_notifier;
+    uint64_t nsze; /* Namespace size reported by identify command */
+    int nsid;      /* The namespace id to read/write data. */
+    uint64_t max_transfer;
+    int plugged;
+
+    CoMutex dma_map_lock;
+    CoQueue dma_flush_queue;
+
+    /* Total size of mapped qiov, accessed under dma_map_lock */
+    int dma_map_count;
+} BDRVNVMeState;
+
+#define NVME_BLOCK_OPT_DEVICE "device"
+#define NVME_BLOCK_OPT_NAMESPACE "namespace"
+
+static QemuOptsList runtime_opts = {
+    .name = "nvme",
+    .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+    .desc = {
+        {
+            .name = NVME_BLOCK_OPT_DEVICE,
+            .type = QEMU_OPT_STRING,
+            .help = "NVMe PCI device address",
+        },
+        {
+            .name = NVME_BLOCK_OPT_NAMESPACE,
+            .type = QEMU_OPT_NUMBER,
+            .help = "NVMe namespace",
+        },
+        { /* end of list */ }
+    },
+};
+
+static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
+                            int nentries, int entry_bytes, Error **errp)
+{
+    BDRVNVMeState *s = bs->opaque;
+    size_t bytes;
+    int r;
+
+    bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
+    q->head = q->tail = 0;
+    q->queue = qemu_try_blockalign0(bs, bytes);
+
+    if (!q->queue) {
+        error_setg(errp, "Cannot allocate queue");
+        return;
+    }
+    r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
+    if (r) {
+        error_setg(errp, "Cannot map queue");
+    }
+}
+
+static void nvme_free_queue_pair(BlockDriverState *bs, NVMeQueuePair *q)
+{
+    qemu_vfree(q->prp_list_pages);
+    qemu_vfree(q->sq.queue);
+    qemu_vfree(q->cq.queue);
+    qemu_mutex_destroy(&q->lock);
+    g_free(q);
+}
+
+static void nvme_free_req_queue_cb(void *opaque)
+{
+    NVMeQueuePair *q = opaque;
+
+    while (qemu_co_enter_next(&q->free_req_queue)) {
+        /* Retry all pending requests */
+    }
+}
+
+static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
+                                             int idx, int size,
+                                             Error **errp)
+{
+    int i, r;
+    BDRVNVMeState *s = bs->opaque;
+    Error *local_err = NULL;
+    NVMeQueuePair *q = g_new0(NVMeQueuePair, 1);
+    uint64_t prp_list_iova;
+
+    qemu_mutex_init(&q->lock);
+    q->index = idx;
+    qemu_co_queue_init(&q->free_req_queue);
+    q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_QUEUE_SIZE);
+    r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
+                          s->page_size * NVME_QUEUE_SIZE,
+                          false, &prp_list_iova);
+    if (r) {
+        goto fail;
+    }
+    for (i = 0; i < NVME_QUEUE_SIZE; i++) {
+        NVMeRequest *req = &q->reqs[i];
+        req->cid = i + 1;
+        req->prp_list_page = q->prp_list_pages + i * s->page_size;
+        req->prp_list_iova = prp_list_iova + i * s->page_size;
+    }
+    nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        goto fail;
+    }
+    q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale];
+
+    nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        goto fail;
+    }
+    q->cq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale + 1];
+
+    return q;
+fail:
+    nvme_free_queue_pair(bs, q);
+    return NULL;
+}
+
+/* With q->lock */
+static void nvme_kick(BDRVNVMeState *s, NVMeQueuePair *q)
+{
+    if (s->plugged || !q->need_kick) {
+        return;
+    }
+    trace_nvme_kick(s, q->index);
+    assert(!(q->sq.tail & 0xFF00));
+    /* Fence the write to submission queue entry before notifying the device. */
+    smp_wmb();
+    *q->sq.doorbell = cpu_to_le32(q->sq.tail);
+    q->inflight += q->need_kick;
+    q->need_kick = 0;
+}
+
+/* Find a free request element if any, otherwise:
+ * a) if in coroutine context, try to wait for one to become available;
+ * b) if not in coroutine, return NULL;
+ */
+static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
+{
+    int i;
+    NVMeRequest *req = NULL;
+
+    qemu_mutex_lock(&q->lock);
+    while (q->inflight + q->need_kick > NVME_QUEUE_SIZE - 2) {
+        /* We have to leave one slot empty as that is the full queue case (head
+         * == tail + 1). */
+        if (qemu_in_coroutine()) {
+            trace_nvme_free_req_queue_wait(q);
+            qemu_mutex_unlock(&q->lock);
+            qemu_co_queue_wait(&q->free_req_queue, NULL);
+            qemu_mutex_lock(&q->lock);
+        } else {
+            qemu_mutex_unlock(&q->lock);
+            return NULL;
+        }
+    }
+    for (i = 0; i < NVME_QUEUE_SIZE; i++) {
+        if (!q->reqs[i].busy) {
+            q->reqs[i].busy = true;
+            req = &q->reqs[i];
+            break;
+        }
+    }
+    /* We have checked inflight and need_kick while holding q->lock, so one
+     * free req must be available. */
+    assert(req);
+    qemu_mutex_unlock(&q->lock);
+    return req;
+}
+
+static inline int nvme_translate_error(const NvmeCqe *c)
+{
+    uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
+    if (status) {
+        trace_nvme_error(le32_to_cpu(c->result),
+                         le16_to_cpu(c->sq_head),
+                         le16_to_cpu(c->sq_id),
+                         le16_to_cpu(c->cid),
+                         le16_to_cpu(status));
+    }
+    switch (status) {
+    case 0:
+        return 0;
+    case 1:
+        return -ENOSYS;
+    case 2:
+        return -EINVAL;
+    default:
+        return -EIO;
+    }
+}
+
+/* With q->lock */
+static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q)
+{
+    bool progress = false;
+    NVMeRequest *preq;
+    NVMeRequest req;
+    NvmeCqe *c;
+
+    trace_nvme_process_completion(s, q->index, q->inflight);
+    if (q->busy || s->plugged) {
+        trace_nvme_process_completion_queue_busy(s, q->index);
+        return false;
+    }
+    q->busy = true;
+    assert(q->inflight >= 0);
+    while (q->inflight) {
+        int16_t cid;
+        c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
+        if (!c->cid || (le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
+            break;
+        }
+        q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
+        if (!q->cq.head) {
+            q->cq_phase = !q->cq_phase;
+        }
+        cid = le16_to_cpu(c->cid);
+        if (cid == 0 || cid > NVME_QUEUE_SIZE) {
+            fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n",
+                    cid);
+            continue;
+        }
+        assert(cid <= NVME_QUEUE_SIZE);
+        trace_nvme_complete_command(s, q->index, cid);
+        preq = &q->reqs[cid - 1];
+        req = *preq;
+        assert(req.cid == cid);
+        assert(req.cb);
+        preq->busy = false;
+        preq->cb = preq->opaque = NULL;
+        qemu_mutex_unlock(&q->lock);
+        req.cb(req.opaque, nvme_translate_error(c));
+        qemu_mutex_lock(&q->lock);
+        c->cid = cpu_to_le16(0);
+        q->inflight--;
+        /* Flip Phase Tag bit. */
+        c->status = cpu_to_le16(le16_to_cpu(c->status) ^ 0x1);
+        progress = true;
+    }
+    if (progress) {
+        /* Notify the device so it can post more completions. */
+        smp_mb_release();
+        *q->cq.doorbell = cpu_to_le32(q->cq.head);
+        if (!qemu_co_queue_empty(&q->free_req_queue)) {
+            aio_bh_schedule_oneshot(s->aio_context, nvme_free_req_queue_cb, q);
+        }
+    }
+    q->busy = false;
+    return progress;
+}
+
+static void nvme_trace_command(const NvmeCmd *cmd)
+{
+    int i;
+
+    for (i = 0; i < 8; ++i) {
+        uint8_t *cmdp = (uint8_t *)cmd + i * 8;
+        trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
+                                      cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
+    }
+}
+
+static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q,
+                                NVMeRequest *req,
+                                NvmeCmd *cmd, BlockCompletionFunc cb,
+                                void *opaque)
+{
+    assert(!req->cb);
+    req->cb = cb;
+    req->opaque = opaque;
+    cmd->cid = cpu_to_le32(req->cid);
+
+    trace_nvme_submit_command(s, q->index, req->cid);
+    nvme_trace_command(cmd);
+    qemu_mutex_lock(&q->lock);
+    memcpy((uint8_t *)q->sq.queue +
+           q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
+    q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
+    q->need_kick++;
+    nvme_kick(s, q);
+    nvme_process_completion(s, q);
+    qemu_mutex_unlock(&q->lock);
+}
+
+static void nvme_cmd_sync_cb(void *opaque, int ret)
+{
+    int *pret = opaque;
+    *pret = ret;
+}
+
+static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
+                         NvmeCmd *cmd)
+{
+    NVMeRequest *req;
+    BDRVNVMeState *s = bs->opaque;
+    int ret = -EINPROGRESS;
+    req = nvme_get_free_req(q);
+    if (!req) {
+        return -EBUSY;
+    }
+    nvme_submit_command(s, q, req, cmd, nvme_cmd_sync_cb, &ret);
+
+    BDRV_POLL_WHILE(bs, ret == -EINPROGRESS);
+    return ret;
+}
+
+static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
+{
+    BDRVNVMeState *s = bs->opaque;
+    NvmeIdCtrl *idctrl;
+    NvmeIdNs *idns;
+    uint8_t *resp;
+    int r;
+    uint64_t iova;
+    NvmeCmd cmd = {
+        .opcode = NVME_ADM_CMD_IDENTIFY,
+        .cdw10 = cpu_to_le32(0x1),
+    };
+
+    resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl));
+    if (!resp) {
+        error_setg(errp, "Cannot allocate buffer for identify response");
+        goto out;
+    }
+    idctrl = (NvmeIdCtrl *)resp;
+    idns = (NvmeIdNs *)resp;
+    r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova);
+    if (r) {
+        error_setg(errp, "Cannot map buffer for DMA");
+        goto out;
+    }
+    cmd.prp1 = cpu_to_le64(iova);
+
+    if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+        error_setg(errp, "Failed to identify controller");
+        goto out;
+    }
+
+    if (le32_to_cpu(idctrl->nn) < namespace) {
+        error_setg(errp, "Invalid namespace");
+        goto out;
+    }
+    s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1;
+    s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size;
+    /* For now the page list buffer per command is one page, to hold at most
+     * s->page_size / sizeof(uint64_t) entries. */
+    s->max_transfer = MIN_NON_ZERO(s->max_transfer,
+                          s->page_size / sizeof(uint64_t) * s->page_size);
+
+    memset(resp, 0, 4096);
+
+    cmd.cdw10 = 0;
+    cmd.nsid = cpu_to_le32(namespace);
+    if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+        error_setg(errp, "Failed to identify namespace");
+        goto out;
+    }
+
+    s->nsze = le64_to_cpu(idns->nsze);
+
+out:
+    qemu_vfio_dma_unmap(s->vfio, resp);
+    qemu_vfree(resp);
+}
+
+static bool nvme_poll_queues(BDRVNVMeState *s)
+{
+    bool progress = false;
+    int i;
+
+    for (i = 0; i < s->nr_queues; i++) {
+        NVMeQueuePair *q = s->queues[i];
+        qemu_mutex_lock(&q->lock);
+        while (nvme_process_completion(s, q)) {
+            /* Keep polling */
+            progress = true;
+        }
+        qemu_mutex_unlock(&q->lock);
+    }
+    return progress;
+}
+
+static void nvme_handle_event(EventNotifier *n)
+{
+    BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
+
+    trace_nvme_handle_event(s);
+    aio_context_acquire(s->aio_context);
+    event_notifier_test_and_clear(n);
+    nvme_poll_queues(s);
+    aio_context_release(s->aio_context);
+}
+
+static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
+{
+    BDRVNVMeState *s = bs->opaque;
+    int n = s->nr_queues;
+    NVMeQueuePair *q;
+    NvmeCmd cmd;
+    int queue_size = NVME_QUEUE_SIZE;
+
+    q = nvme_create_queue_pair(bs, n, queue_size, errp);
+    if (!q) {
+        return false;
+    }
+    cmd = (NvmeCmd) {
+        .opcode = NVME_ADM_CMD_CREATE_CQ,
+        .prp1 = cpu_to_le64(q->cq.iova),
+        .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
+        .cdw11 = cpu_to_le32(0x3),
+    };
+    if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+        error_setg(errp, "Failed to create io queue [%d]", n);
+        nvme_free_queue_pair(bs, q);
+        return false;
+    }
+    cmd = (NvmeCmd) {
+        .opcode = NVME_ADM_CMD_CREATE_SQ,
+        .prp1 = cpu_to_le64(q->sq.iova),
+        .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
+        .cdw11 = cpu_to_le32(0x1 | (n << 16)),
+    };
+    if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+        error_setg(errp, "Failed to create io queue [%d]", n);
+        nvme_free_queue_pair(bs, q);
+        return false;
+    }
+    s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
+    s->queues[n] = q;
+    s->nr_queues++;
+    return true;
+}
+
+static bool nvme_poll_cb(void *opaque)
+{
+    EventNotifier *e = opaque;
+    BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
+    bool progress = false;
+
+    trace_nvme_poll_cb(s);
+    progress = nvme_poll_queues(s);
+    return progress;
+}
+
+static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
+                     Error **errp)
+{
+    BDRVNVMeState *s = bs->opaque;
+    int ret;
+    uint64_t cap;
+    uint64_t timeout_ms;
+    uint64_t deadline, now;
+    Error *local_err = NULL;
+
+    qemu_co_mutex_init(&s->dma_map_lock);
+    qemu_co_queue_init(&s->dma_flush_queue);
+    s->nsid = namespace;
+    s->aio_context = bdrv_get_aio_context(bs);
+    ret = event_notifier_init(&s->irq_notifier, 0);
+    if (ret) {
+        error_setg(errp, "Failed to init event notifier");
+        return ret;
+    }
+
+    s->vfio = qemu_vfio_open_pci(device, errp);
+    if (!s->vfio) {
+        ret = -EINVAL;
+        goto fail;
+    }
+
+    s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
+    if (!s->regs) {
+        ret = -EINVAL;
+        goto fail;
+    }
+
+    /* Perform initialize sequence as described in NVMe spec "7.6.1
+     * Initialization". */
+
+    cap = le64_to_cpu(s->regs->cap);
+    if (!(cap & (1ULL << 37))) {
+        error_setg(errp, "Device doesn't support NVMe command set");
+        ret = -EINVAL;
+        goto fail;
+    }
+
+    s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
+    s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t);
+    bs->bl.opt_mem_alignment = s->page_size;
+    timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
+
+    /* Reset device to get a clean state. */
+    s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE);
+    /* Wait for CSTS.RDY = 0. */
+    deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * 1000000ULL;
+    while (le32_to_cpu(s->regs->csts) & 0x1) {
+        if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
+            error_setg(errp, "Timeout while waiting for device to reset (%ld ms)",
+                       timeout_ms);
+            ret = -ETIMEDOUT;
+            goto fail;
+        }
+    }
+
+    /* Set up admin queue. */
+    s->queues = g_new(NVMeQueuePair *, 1);
+    s->nr_queues = 1;
+    s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
+    if (!s->queues[0]) {
+        ret = -EINVAL;
+        goto fail;
+    }
+    QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
+    s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
+    s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
+    s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova);
+
+    /* After setting up all control registers we can enable device now. */
+    s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
+                              (ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
+                              0x1);
+    /* Wait for CSTS.RDY = 1. */
+    now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+    deadline = now + timeout_ms * 1000000;
+    while (!(le32_to_cpu(s->regs->csts) & 0x1)) {
+        if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
+            error_setg(errp, "Timeout while waiting for device to start (%ld ms)",
+                       timeout_ms);
+            ret = -ETIMEDOUT;
+            goto fail_queue;
+        }
+    }
+
+    ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
+                                 VFIO_PCI_MSIX_IRQ_INDEX, errp);
+    if (ret) {
+        goto fail_queue;
+    }
+    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+                           false, nvme_handle_event, nvme_poll_cb);
+
+    nvme_identify(bs, namespace, errp);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        ret = -EIO;
+        goto fail_handler;
+    }
+
+    /* Set up command queues. */
+    if (!nvme_add_io_queue(bs, errp)) {
+        ret = -EIO;
+        goto fail_handler;
+    }
+    return 0;
+
+fail_handler:
+    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+                           false, NULL, NULL);
+fail_queue:
+    nvme_free_queue_pair(bs, s->queues[0]);
+fail:
+    g_free(s->queues);
+    qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
+    qemu_vfio_close(s->vfio);
+    event_notifier_cleanup(&s->irq_notifier);
+    return ret;
+}
+
+/* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
+ *
+ *     nvme://0000:44:00.0/1
+ *
+ * where the "nvme://" is a fixed form of the protocol prefix, the middle part
+ * is the PCI address, and the last part is the namespace number starting from
+ * 1 according to the NVMe spec. */
+static void nvme_parse_filename(const char *filename, QDict *options,
+                                Error **errp)
+{
+    int pref = strlen("nvme://");
+
+    if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
+        const char *tmp = filename + pref;
+        char *device;
+        const char *namespace;
+        unsigned long ns;
+        const char *slash = strchr(tmp, '/');
+        if (!slash) {
+            qdict_put(options, NVME_BLOCK_OPT_DEVICE,
+                      qstring_from_str(tmp));
+            return;
+        }
+        device = g_strndup(tmp, slash - tmp);
+        qdict_put(options, NVME_BLOCK_OPT_DEVICE, qstring_from_str(device));
+        g_free(device);
+        namespace = slash + 1;
+        if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
+            error_setg(errp, "Invalid namespace '%s', positive number expected",
+                       namespace);
+            return;
+        }
+        qdict_put(options, NVME_BLOCK_OPT_NAMESPACE,
+                  qstring_from_str(*namespace ? namespace : "1"));
+    }
+}
+
+static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
+                                           Error **errp)
+{
+    BDRVNVMeState *s = bs->opaque;
+    NvmeCmd cmd = {
+        .opcode = NVME_ADM_CMD_SET_FEATURES,
+        .nsid = cpu_to_le32(s->nsid),
+        .cdw10 = cpu_to_le32(0x06),
+        .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
+    };
+
+    if (enable && !s->write_cache_supported) {
+        error_setg(errp,
+                   "NVMe controller doesn't have volatile write cache");
+        return -EINVAL;
+    }
+    return nvme_cmd_sync(bs, s->queues[0], &cmd);
+}
+
+static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags,
+                          Error **errp)
+{
+    const char *device;
+    QemuOpts *opts;
+    int namespace;
+
+    opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
+    qemu_opts_absorb_qdict(opts, options, &error_abort);
+    device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
+    if (!device) {
+        error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
+        qemu_opts_del(opts);
+        return -EINVAL;
+    }
+
+    namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
+    nvme_init(bs, device, namespace, errp);
+
+    qemu_opts_del(opts);
+    bs->supported_write_flags = BDRV_REQ_FUA;
+    if (nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE), errp)) {
+        return -EINVAL;
+    }
+    return 0;
+}
+
+static void nvme_close(BlockDriverState *bs)
+{
+    int i;
+    BDRVNVMeState *s = bs->opaque;
+
+    for (i = 0; i < s->nr_queues; ++i) {
+        nvme_free_queue_pair(bs, s->queues[i]);
+    }
+    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+                           false, NULL, NULL);
+    qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
+    qemu_vfio_close(s->vfio);
+}
+
+static int64_t nvme_getlength(BlockDriverState *bs)
+{
+    BDRVNVMeState *s = bs->opaque;
+
+    return s->nsze << BDRV_SECTOR_BITS;
+}
+
+/* Called with s->dma_map_lock */
+static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
+                                            QEMUIOVector *qiov)
+{
+    int r = 0;
+    BDRVNVMeState *s = bs->opaque;
+
+    s->dma_map_count -= qiov->size;
+    if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
+        r = qemu_vfio_dma_reset_temporary(s->vfio);
+        if (!r) {
+            qemu_co_queue_restart_all(&s->dma_flush_queue);
+        }
+    }
+    return r;
+}
+
+/* Called with s->dma_map_lock */
+static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
+                                          NVMeRequest *req, QEMUIOVector *qiov)
+{
+    BDRVNVMeState *s = bs->opaque;
+    uint64_t *pagelist = req->prp_list_page;
+    int i, j, r;
+    int entries = 0;
+
+    assert(qiov->size);
+    assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
+    assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
+    for (i = 0; i < qiov->niov; ++i) {
+        bool retry = true;
+        uint64_t iova;
+try_map:
+        r = qemu_vfio_dma_map(s->vfio,
+                              qiov->iov[i].iov_base,
+                              qiov->iov[i].iov_len,
+                              true, &iova);
+        if (r == -ENOMEM && retry) {
+            retry = false;
+            trace_nvme_dma_flush_queue_wait(s);
+            if (s->dma_map_count) {
+                trace_nvme_dma_map_flush(s);
+                qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
+            } else {
+                r = qemu_vfio_dma_reset_temporary(s->vfio);
+                if (r) {
+                    goto fail;
+                }
+            }
+            goto try_map;
+        }
+        if (r) {
+            goto fail;
+        }
+
+        for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
+            pagelist[entries++] = iova + j * s->page_size;
+        }
+        trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
+                                    qiov->iov[i].iov_len / s->page_size);
+    }
+
+    s->dma_map_count += qiov->size;
+
+    assert(entries <= s->page_size / sizeof(uint64_t));
+    switch (entries) {
+    case 0:
+        abort();
+    case 1:
+        cmd->prp1 = cpu_to_le64(pagelist[0]);
+        cmd->prp2 = 0;
+        break;
+    case 2:
+        cmd->prp1 = cpu_to_le64(pagelist[0]);
+        cmd->prp2 = cpu_to_le64(pagelist[1]);;
+        break;
+    default:
+        cmd->prp1 = cpu_to_le64(pagelist[0]);
+        cmd->prp2 = cpu_to_le64(req->prp_list_iova);
+        for (i = 0; i < entries - 1; ++i) {
+            pagelist[i] = cpu_to_le64(pagelist[i + 1]);
+        }
+        pagelist[entries - 1] = 0;
+        break;
+    }
+    trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
+    for (i = 0; i < entries; ++i) {
+        trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
+    }
+    return 0;
+fail:
+    /* No need to unmap [0 - i) iovs even if we've failed, since we don't
+     * increment s->dma_map_count. This is okay for fixed mapping memory areas
+     * because they are already mapped before calling this function; for
+     * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
+     * calling qemu_vfio_dma_reset_temporary when necessary. */
+    return r;
+}
+
+typedef struct {
+    Coroutine *co;
+    int ret;
+    AioContext *ctx;
+} NVMeCoData;
+
+static void nvme_rw_cb_bh(void *opaque)
+{
+    NVMeCoData *data = opaque;
+    qemu_coroutine_enter(data->co);
+}
+
+static void nvme_rw_cb(void *opaque, int ret)
+{
+    NVMeCoData *data = opaque;
+    data->ret = ret;
+    if (!data->co) {
+        /* The rw coroutine hasn't yielded, don't try to enter. */
+        return;
+    }
+    aio_bh_schedule_oneshot(data->ctx, nvme_rw_cb_bh, data);
+}
+
+static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
+                                            uint64_t offset, uint64_t bytes,
+                                            QEMUIOVector *qiov,
+                                            bool is_write,
+                                            int flags)
+{
+    int r;
+    BDRVNVMeState *s = bs->opaque;
+    NVMeQueuePair *ioq = s->queues[1];
+    NVMeRequest *req;
+    uint32_t cdw12 = (((bytes >> BDRV_SECTOR_BITS) - 1) & 0xFFFF) |
+                       (flags & BDRV_REQ_FUA ? 1 << 30 : 0);
+    NvmeCmd cmd = {
+        .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
+        .nsid = cpu_to_le32(s->nsid),
+        .cdw10 = cpu_to_le32((offset >> BDRV_SECTOR_BITS) & 0xFFFFFFFF),
+        .cdw11 = cpu_to_le32(((offset >> BDRV_SECTOR_BITS) >> 32) & 0xFFFFFFFF),
+        .cdw12 = cpu_to_le32(cdw12),
+    };
+    NVMeCoData data = {
+        .ctx = bdrv_get_aio_context(bs),
+        .ret = -EINPROGRESS,
+    };
+
+    trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
+    assert(s->nr_queues > 1);
+    req = nvme_get_free_req(ioq);
+    assert(req);
+
+    qemu_co_mutex_lock(&s->dma_map_lock);
+    r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
+    qemu_co_mutex_unlock(&s->dma_map_lock);
+    if (r) {
+        req->busy = false;
+        return r;
+    }
+    nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
+
+    data.co = qemu_coroutine_self();
+    while (data.ret == -EINPROGRESS) {
+        qemu_coroutine_yield();
+    }
+
+    qemu_co_mutex_lock(&s->dma_map_lock);
+    r = nvme_cmd_unmap_qiov(bs, qiov);
+    qemu_co_mutex_unlock(&s->dma_map_lock);
+    if (r) {
+        return r;
+    }
+
+    trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
+    return data.ret;
+}
+
+static inline bool nvme_qiov_aligned(BlockDriverState *bs,
+                                     const QEMUIOVector *qiov)
+{
+    int i;
+    BDRVNVMeState *s = bs->opaque;
+
+    for (i = 0; i < qiov->niov; ++i) {
+        if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
+            !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
+            trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
+                                      qiov->iov[i].iov_len, s->page_size);
+            return false;
+        }
+    }
+    return true;
+}
+
+static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+                       QEMUIOVector *qiov, bool is_write, int flags)
+{
+    BDRVNVMeState *s = bs->opaque;
+    int r;
+    uint8_t *buf = NULL;
+    QEMUIOVector local_qiov;
+
+    assert(QEMU_IS_ALIGNED(offset, s->page_size));
+    assert(QEMU_IS_ALIGNED(bytes, s->page_size));
+    assert(bytes <= s->max_transfer);
+    if (nvme_qiov_aligned(bs, qiov)) {
+        return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
+    }
+    trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
+    buf = qemu_try_blockalign(bs, bytes);
+
+    if (!buf) {
+        return -ENOMEM;
+    }
+    qemu_iovec_init(&local_qiov, 1);
+    if (is_write) {
+        qemu_iovec_to_buf(qiov, 0, buf, bytes);
+    }
+    qemu_iovec_add(&local_qiov, buf, bytes);
+    r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
+    qemu_iovec_destroy(&local_qiov);
+    if (!r && !is_write) {
+        qemu_iovec_from_buf(qiov, 0, buf, bytes);
+    }
+    qemu_vfree(buf);
+    return r;
+}
+
+static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
+                                       uint64_t offset, uint64_t bytes,
+                                       QEMUIOVector *qiov, int flags)
+{
+    return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
+}
+
+static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
+                                        uint64_t offset, uint64_t bytes,
+                                        QEMUIOVector *qiov, int flags)
+{
+    return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
+}
+
+static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
+{
+    BDRVNVMeState *s = bs->opaque;
+    NVMeQueuePair *ioq = s->queues[1];
+    NVMeRequest *req;
+    NvmeCmd cmd = {
+        .opcode = NVME_CMD_FLUSH,
+        .nsid = cpu_to_le32(s->nsid),
+    };
+    NVMeCoData data = {
+        .ctx = bdrv_get_aio_context(bs),
+        .ret = -EINPROGRESS,
+    };
+
+    assert(s->nr_queues > 1);
+    req = nvme_get_free_req(ioq);
+    assert(req);
+    nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
+
+    data.co = qemu_coroutine_self();
+    if (data.ret == -EINPROGRESS) {
+        qemu_coroutine_yield();
+    }
+
+    return data.ret;
+}
+
+
+static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
+                               BlockReopenQueue *queue, Error **errp)
+{
+    return 0;
+}
+
+static int64_t coroutine_fn nvme_co_get_block_status(BlockDriverState *bs,
+                                                     int64_t sector_num,
+                                                     int nb_sectors, int *pnum,
+                                                     BlockDriverState **file)
+{
+    *pnum = nb_sectors;
+    *file = bs;
+
+    return BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_OFFSET_VALID |
+           (sector_num << BDRV_SECTOR_BITS);
+}
+
+static void nvme_refresh_filename(BlockDriverState *bs, QDict *opts)
+{
+    QINCREF(opts);
+    qdict_del(opts, "filename");
+
+    if (!qdict_size(opts)) {
+        snprintf(bs->exact_filename, sizeof(bs->exact_filename), "%s://",
+                 bs->drv->format_name);
+    }
+
+    qdict_put(opts, "driver", qstring_from_str(bs->drv->format_name));
+    bs->full_open_options = opts;
+}
+
+static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+    BDRVNVMeState *s = bs->opaque;
+
+    bs->bl.opt_mem_alignment = s->page_size;
+    bs->bl.request_alignment = s->page_size;
+    bs->bl.max_transfer = s->max_transfer;
+}
+
+static void nvme_detach_aio_context(BlockDriverState *bs)
+{
+    BDRVNVMeState *s = bs->opaque;
+
+    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+                           false, NULL, NULL);
+}
+
+static void nvme_attach_aio_context(BlockDriverState *bs,
+                                    AioContext *new_context)
+{
+    BDRVNVMeState *s = bs->opaque;
+
+    s->aio_context = new_context;
+    aio_set_event_notifier(new_context, &s->irq_notifier,
+                           false, nvme_handle_event, nvme_poll_cb);
+}
+
+static void nvme_aio_plug(BlockDriverState *bs)
+{
+    BDRVNVMeState *s = bs->opaque;
+    s->plugged++;
+}
+
+static void nvme_aio_unplug(BlockDriverState *bs)
+{
+    int i;
+    BDRVNVMeState *s = bs->opaque;
+    assert(s->plugged);
+    if (!--s->plugged) {
+        for (i = 1; i < s->nr_queues; i++) {
+            NVMeQueuePair *q = s->queues[i];
+            qemu_mutex_lock(&q->lock);
+            nvme_kick(s, q);
+            nvme_process_completion(s, q);
+            qemu_mutex_unlock(&q->lock);
+        }
+    }
+}
+
+static BlockDriver bdrv_nvme = {
+    .format_name              = "nvme",
+    .protocol_name            = "nvme",
+    .instance_size            = sizeof(BDRVNVMeState),
+
+    .bdrv_parse_filename      = nvme_parse_filename,
+    .bdrv_file_open           = nvme_file_open,
+    .bdrv_close               = nvme_close,
+    .bdrv_getlength           = nvme_getlength,
+
+    .bdrv_co_preadv           = nvme_co_preadv,
+    .bdrv_co_pwritev          = nvme_co_pwritev,
+    .bdrv_co_flush_to_disk    = nvme_co_flush,
+    .bdrv_reopen_prepare      = nvme_reopen_prepare,
+
+    .bdrv_co_get_block_status = nvme_co_get_block_status,
+
+    .bdrv_refresh_filename    = nvme_refresh_filename,
+    .bdrv_refresh_limits      = nvme_refresh_limits,
+
+    .bdrv_detach_aio_context  = nvme_detach_aio_context,
+    .bdrv_attach_aio_context  = nvme_attach_aio_context,
+
+    .bdrv_io_plug             = nvme_aio_plug,
+    .bdrv_io_unplug           = nvme_aio_unplug,
+};
+
+static void bdrv_nvme_init(void)
+{
+    bdrv_register(&bdrv_nvme);
+}
+
+block_init(bdrv_nvme_init);
diff --git a/block/trace-events b/block/trace-events
index 11c8d5f590..02dd80ff0c 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -124,3 +124,24 @@  vxhs_open_iio_open(const char *host) "Failed to connect to storage agent on host
 vxhs_parse_uri_hostinfo(char *host, int port) "Host: IP %s, Port %d"
 vxhs_close(char *vdisk_guid) "Closing vdisk %s"
 vxhs_get_creds(const char *cacert, const char *client_key, const char *client_cert) "cacert %s, client_key %s, client_cert %s"
+
+# block/nvme.c
+nvme_kick(void *s, int queue) "s %p queue %d"
+nvme_dma_flush_queue_wait(void *s) "s %p"
+nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x"
+nvme_process_completion(void *s, int index, int inflight) "s %p queue %d inflight %d"
+nvme_process_completion_queue_busy(void *s, int index) "s %p queue %d"
+nvme_complete_command(void *s, int index, int cid) "s %p queue %d cid %d"
+nvme_submit_command(void *s, int index, int cid) "s %p queue %d cid %d"
+nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) "%02x %02x %02x %02x %02x %02x %02x %02x"
+nvme_handle_event(void *s) "s %p"
+nvme_poll_cb(void *s) "s %p"
+nvme_prw_aligned(void *s, int is_write, uint64_t offset, uint64_t bytes, int flags, int niov) "s %p is_write %d offset %"PRId64" bytes %"PRId64" flags %d niov %d"
+nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int align) "qiov %p n %d base %p size 0x%zx align 0x%x"
+nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset %"PRId64" bytes %"PRId64" niov %d is_write %d"
+nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset %"PRId64" bytes %"PRId64" ret %d"
+nvme_dma_map_flush(void *s) "s %p"
+nvme_free_req_queue_wait(void *q) "q %p"
+nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d"
+nvme_cmd_map_qiov_pages(void *s, int i, uint64_t page) "s %p page[%d] 0x%"PRIx64
+nvme_cmd_map_qiov_iov(void *s, int i, void *page, int pages) "s %p iov[%d] %p pages %d"