diff mbox

[v2,22/23] block migration: Add support for restore progress reporting

Message ID 4B152621.9020108@siemens.com
State New
Headers show

Commit Message

Jan Kiszka Dec. 1, 2009, 2:20 p.m. UTC
Inject progress report in percentage into the block live stream. This
can be read out and displayed easily on restore.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---

Changes in v2:
 - Print banner only if there is really some block device to restore

 block-migration.c |   30 ++++++++++++++++++++++--------
 1 files changed, 22 insertions(+), 8 deletions(-)

Comments

Pierre Riteau Dec. 1, 2009, 5:01 p.m. UTC | #1
On 1 déc. 2009, at 15:20, Jan Kiszka wrote:

> Inject progress report in percentage into the block live stream. This
> can be read out and displayed easily on restore.


I guess that this patch only reports percentage for the initial bulk copy of the image.
I haven't tested this scenario, but the next phase, sending dirty blocks, can be quite long too if the guest does a lot of I/O.
Won't it give a wrong impression to the user when qemu says "Completed 100%" but disk migration continues catching up for a while?
Jan Kiszka Dec. 1, 2009, 5:17 p.m. UTC | #2
Pierre Riteau wrote:
> On 1 déc. 2009, at 15:20, Jan Kiszka wrote:
> 
>> Inject progress report in percentage into the block live stream. This
>> can be read out and displayed easily on restore.
> 
> 
> I guess that this patch only reports percentage for the initial bulk copy of the image.
> I haven't tested this scenario, but the next phase, sending dirty blocks, can be quite long too if the guest does a lot of I/O.
> Won't it give a wrong impression to the user when qemu says "Completed 100%" but disk migration continues catching up for a while?

I does give a wrong impression (as there is also a wrong behavior) ATM.

But the plan is to update the number of pending blocks during the sync.
Theoretically we could even go backwards with this progress value if
(much) more blocks become dirty than we are able to write over a certain
period.

Effectively, the total disk sizes increases during the migration due to
dirty blocks being added. Instead of carrying this updated number over
to the receiving side, I want to let the sender do the calculation and
only transfer the result inside the stream (as this is only about
visualization).

Jan

PS: Think I just found the e1000 migration issue, which turned out to
affect all NICs.
diff mbox

Patch

diff --git a/block-migration.c b/block-migration.c
index 7510923..a066f19 100644
--- a/block-migration.c
+++ b/block-migration.c
@@ -23,6 +23,7 @@ 
 
 #define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
 #define BLK_MIG_FLAG_EOS                0x02
+#define BLK_MIG_FLAG_PROGRESS           0x04
 
 #define MAX_IS_ALLOCATED_SEARCH 65536
 #define MAX_BLOCKS_READ 10000
@@ -70,7 +71,7 @@  typedef struct BlkMigState {
     int read_done;
     int transferred;
     int64_t total_sector_sum;
-    int64_t print_completion;
+    int prev_progress;
 } BlkMigState;
 
 static BlkMigState block_mig_state;
@@ -226,7 +227,7 @@  static void init_blk_migration(Monitor *mon, QEMUFile *f)
     block_mig_state.read_done = 0;
     block_mig_state.transferred = 0;
     block_mig_state.total_sector_sum = 0;
-    block_mig_state.print_completion = 0;
+    block_mig_state.prev_progress = -1;
 
     for (bs = bdrv_first; bs != NULL; bs = bs->next) {
         if (bs->type == BDRV_TYPE_HD) {
@@ -257,6 +258,7 @@  static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async)
 {
     int64_t completed_sector_sum = 0;
     BlkMigDevState *bmds;
+    int progress;
     int ret = 0;
 
     QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
@@ -273,13 +275,13 @@  static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async)
         }
     }
 
-    if (completed_sector_sum >= block_mig_state.print_completion) {
-        monitor_printf(mon, "Completed %" PRId64 " %%\r",
-                       completed_sector_sum * 100 /
-                       block_mig_state.total_sector_sum);
+    progress = completed_sector_sum * 100 / block_mig_state.total_sector_sum;
+    if (progress != block_mig_state.prev_progress) {
+        block_mig_state.prev_progress = progress;
+        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
+                         | BLK_MIG_FLAG_PROGRESS);
+        monitor_printf(mon, "Completed %d %%\r", progress);
         monitor_flush(mon);
-        block_mig_state.print_completion +=
-            (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
     }
 
     return ret;
@@ -445,6 +447,9 @@  static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
         blk_mig_save_dirty_blocks(mon, f);
         blk_mig_cleanup(mon);
 
+        /* report completion */
+        qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
+
         if (qemu_file_has_error(f)) {
             return 0;
         }
@@ -459,6 +464,7 @@  static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
 
 static int block_load(QEMUFile *f, void *opaque, int version_id)
 {
+    static int banner_printed;
     int len, flags;
     char device_name[256];
     int64_t addr;
@@ -490,6 +496,14 @@  static int block_load(QEMUFile *f, void *opaque, int version_id)
             bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
 
             qemu_free(buf);
+        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
+            if (!banner_printed) {
+                printf("Receiving block device images\n");
+                banner_printed = 1;
+            }
+            printf("Completed %d %%%c", (int)addr,
+                   (addr == 100) ? '\n' : '\r');
+            fflush(stdout);
         } else if (!(flags & BLK_MIG_FLAG_EOS)) {
             fprintf(stderr, "Unknown flags\n");
             return -EINVAL;