diff mbox series

[RFC,v1,3/3] migration/savevm: use qemu-file buffered mode for non-cached bdrv

Message ID 1586776334-641239-4-git-send-email-dplotnikov@virtuozzo.com
State New
Headers show
Series qemu-file writing performance improving | expand

Commit Message

Denis Plotnikov April 13, 2020, 11:12 a.m. UTC
This makes internal snapshots of HDD placed qcow2 images opened with
O_DIRECT flag 4 times faster.

The test:
   creates 500M internal snapshot for a cow2 image placed on HDD
Result times:
   with the patch: ~6 sec
   without patch: ~24 sec

This happens because the internal snapshot saving produces a lot of
pwrites, because of flushing the internal buffers with non-aligned
io vectors and direct calling qemu_fflush.

To fix it, we introduce an internal pointer and size aligned buffer.
The most of the time the buffer is flushed only when it's full regardless
of direct calling qemu_fflush. When the buffer is full, it is written
asynchronously.

This gives us a cople of advantages leading to performance improvement:

1. beacause of pointer and size aligned buffers we can use asynchronous
   os write syscall, like io_submit
2. when some buffer is being written, another buffer is filled with
   data.

Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com>
---
 migration/savevm.c | 38 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 36 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/migration/savevm.c b/migration/savevm.c
index c00a680..db0cac9 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -63,6 +63,7 @@ 
 #include "migration/colo.h"
 #include "qemu/bitmap.h"
 #include "net/announce.h"
+#include "block/block_int.h"
 
 const unsigned int postcopy_ram_discard_version = 0;
 
@@ -153,6 +154,12 @@  static int bdrv_fclose(void *opaque, Error **errp)
     return bdrv_flush(opaque);
 }
 
+static bool qemu_file_is_buffered(void *opaque)
+{
+    BlockDriverState *bs = (BlockDriverState *) opaque;
+    return !!(bs->open_flags & BDRV_O_NOCACHE);
+}
+
 static const QEMUFileOps bdrv_read_ops = {
     .get_buffer = block_get_buffer,
     .close =      bdrv_fclose
@@ -160,7 +167,8 @@  static const QEMUFileOps bdrv_read_ops = {
 
 static const QEMUFileOps bdrv_write_ops = {
     .writev_buffer  = block_writev_buffer,
-    .close          = bdrv_fclose
+    .close          = bdrv_fclose,
+    .enable_buffered = qemu_file_is_buffered
 };
 
 static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable)
@@ -2624,7 +2632,7 @@  int qemu_load_device_state(QEMUFile *f)
     return 0;
 }
 
-int save_snapshot(const char *name, Error **errp)
+static int coroutine_fn save_snapshot_fn(const char *name, Error **errp)
 {
     BlockDriverState *bs, *bs1;
     QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
@@ -2747,6 +2755,32 @@  int save_snapshot(const char *name, Error **errp)
     return ret;
 }
 
+ typedef struct SaveVMParams {
+     const char *name;
+     Error **errp;
+     int ret;
+ } SaveVMParams;
+
+static void coroutine_fn save_snapshot_entry(void *opaque)
+{
+    SaveVMParams *p = (SaveVMParams *) opaque;
+    p->ret = save_snapshot_fn(p->name, p->errp);
+}
+
+int save_snapshot(const char *name, Error **errp)
+{
+    SaveVMParams p = (SaveVMParams) {
+        .name = name,
+        .errp = errp,
+        .ret = -EINPROGRESS,
+    };
+
+    Coroutine *co = qemu_coroutine_create(save_snapshot_entry, &p);
+    aio_co_enter(qemu_get_aio_context(), co);
+    AIO_WAIT_WHILE(qemu_get_aio_context(), p.ret == -EINPROGRESS);
+    return p.ret;
+}
+
 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
                                 Error **errp)
 {