Patchwork [V2,2/3] virtio-9p: Wait for 9p operations to complete before migration and savevm.

login
register
mail settings
Submitter Benoît Canet
Date April 11, 2013, 2:41 p.m.
Message ID <1365691268-18036-3-git-send-email-benoit@irqsave.net>
Download mbox | patch
Permalink /patch/235763/
State New
Headers show

Comments

Benoît Canet - April 11, 2013, 2:41 p.m.
The completion status is put in the virtio ring buffer which
will be send to the guest on resume by the viring vmstate code

This patch is a rewrite from the one written by Aneesh Kumar.

Signed-off-by: Benoit Canet <benoit@irqsave.net>
---
 hw/9pfs/virtio-9p-device.c |    2 ++
 hw/9pfs/virtio-9p.c        |   75 ++++++++++++++++++++++++++++++++++++++++++++
 hw/9pfs/virtio-9p.h        |    2 ++
 3 files changed, 79 insertions(+)
Paolo Bonzini - April 11, 2013, 2:52 p.m.
Il 11/04/2013 16:41, Benoît Canet ha scritto:
> The completion status is put in the virtio ring buffer which
> will be send to the guest on resume by the viring vmstate code
> 
> This patch is a rewrite from the one written by Aneesh Kumar.

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>

Patch

diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 2af4858..feaccd3 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -198,6 +198,8 @@  VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
     V9fsPath path;
     static int virtio_9p_id;
 
+    v9fs_init_vmstate_io_drain();
+
     s = (V9fsState *)virtio_common_init("virtio-9p",
                                     VIRTIO_ID_9P,
                                     sizeof(struct virtio_9p_config)+
diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
index 5cc4c92..66322ee 100644
--- a/hw/9pfs/virtio-9p.c
+++ b/hw/9pfs/virtio-9p.c
@@ -25,6 +25,11 @@  int open_fd_hw;
 int total_open_fd;
 static int open_fd_rc;
 
+static int32_t pending_initialized;
+static int32_t pending_requests;
+static QemuCond pending_cond;
+static QemuMutex pending_mutex;
+
 enum {
     Oread   = 0x00,
     Owrite  = 0x01,
@@ -37,6 +42,71 @@  enum {
     Oappend = 0x80,
 };
 
+static void v9fs_vm_change_state_handler(void *opaque, int running,
+                                         RunState state)
+{
+    if (!pending_initialized) {
+        return;
+    }
+
+    if (running) {
+        return;
+    }
+
+    if (state != RUN_STATE_FINISH_MIGRATE && state != RUN_STATE_SAVE_VM) {
+        return;
+    }
+
+    qemu_mutex_lock(&pending_mutex);
+    while (pending_requests) {
+        /* At this point ticks and vcpus will be stopped so we can safely
+         * release the BQL so pending 9p callbacks will be executed and the
+         * condition signaled.
+         */
+        qemu_mutex_unlock_iothread();
+        qemu_cond_wait(&pending_cond, &pending_mutex);
+        qemu_mutex_lock_iothread();
+    }
+    qemu_mutex_unlock(&pending_mutex);
+}
+
+void v9fs_init_vmstate_io_drain(void)
+{
+    if (pending_initialized) {
+        return;
+    }
+
+    pending_initialized = 1;
+    qemu_mutex_init(&pending_mutex);
+    qemu_cond_init(&pending_cond);
+    qemu_add_vm_change_state_handler(v9fs_vm_change_state_handler, NULL);
+}
+
+static void v9fs_inc_pending_requests(void)
+{
+    if (!pending_initialized) {
+        return;
+    }
+
+    qemu_mutex_lock(&pending_mutex);
+    pending_requests++;
+    qemu_mutex_unlock(&pending_mutex);
+}
+
+static void v9fs_dec_pending_requests(void)
+{
+    if (!pending_initialized) {
+        return;
+    }
+
+    qemu_mutex_lock(&pending_mutex);
+    pending_requests--;
+    if (!pending_requests) {
+        qemu_cond_signal(&pending_cond);
+    }
+    qemu_mutex_unlock(&pending_mutex);
+}
+
 static int omode_to_uflags(int8_t mode)
 {
     int ret = 0;
@@ -637,6 +707,8 @@  static void complete_pdu(V9fsState *s, V9fsPDU *pdu, ssize_t len)
     qemu_co_queue_next(&pdu->complete);
 
     free_pdu(s, pdu);
+
+    v9fs_dec_pending_requests();
 }
 
 static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension)
@@ -3240,6 +3312,9 @@  static void submit_pdu(V9fsState *s, V9fsPDU *pdu)
     if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) {
         handler = v9fs_fs_ro;
     }
+
+    v9fs_inc_pending_requests();
+
     co = qemu_coroutine_create(handler);
     qemu_coroutine_enter(co, pdu);
 }
diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
index 52b1c69..604502a 100644
--- a/hw/9pfs/virtio-9p.h
+++ b/hw/9pfs/virtio-9p.h
@@ -401,4 +401,6 @@  extern int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
 #define pdu_unmarshal(pdu, offset, fmt, args...)  \
     v9fs_unmarshal(pdu->elem.out_sg, pdu->elem.out_num, offset, 1, fmt, ##args)
 
+void v9fs_init_vmstate_io_drain(void);
+
 #endif