diff mbox

[21/27] migration: stop all cpus correctly

Message ID 1343155012-26316-22-git-send-email-quintela@redhat.com
State New
Headers show

Commit Message

Juan Quintela July 24, 2012, 6:36 p.m. UTC
You can only stop all cpus from the iothread or an vcpu.  As we want
to do it from the migration_thread, we need to do this dance with the
botton handlers.

This patch is a request for ideas.  I can move this function to cpus.c, but
wondered if there is an easy way of doing this?

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration.c |   46 ++++++++++++++++++++++++++++++++++------------
 1 file changed, 34 insertions(+), 12 deletions(-)

Comments

Eric Blake July 26, 2012, 12:54 p.m. UTC | #1
On 07/24/2012 12:36 PM, Juan Quintela wrote:
> You can only stop all cpus from the iothread or an vcpu.  As we want
> to do it from the migration_thread, we need to do this dance with the
> botton handlers.

s/botton/bottom/ ?

> 
> This patch is a request for ideas.  I can move this function to cpus.c, but
> wondered if there is an easy way of doing this?

Sorry, I'm not able to help there.
diff mbox

Patch

diff --git a/migration.c b/migration.c
index cd1c11f..e3eec97 100644
--- a/migration.c
+++ b/migration.c
@@ -20,6 +20,7 @@ 
 #include "sysemu.h"
 #include "block.h"
 #include "qemu_socket.h"
+#include "qemu-thread.h"
 #include "block-migration.h"
 #include "qmp-commands.h"

@@ -326,14 +327,37 @@  ssize_t migrate_fd_put_buffer(MigrationState *s, const void *data,
     return ret;
 }

+static QemuCond migrate_vm_stop_cond;
+
+static void migrate_vm_stop(void *opaque)
+{
+    QEMUBH **bh = opaque;
+    vm_stop(RUN_STATE_FINISH_MIGRATE);
+    qemu_bh_delete(*bh);
+    qemu_cond_signal(&migrate_vm_stop_cond);
+}
+
+extern QemuMutex qemu_global_mutex;
+
 void migrate_fd_put_ready(MigrationState *s)
 {
     int ret;
+    static bool first_time = true;

     if (s->state != MIG_STATE_ACTIVE) {
         DPRINTF("put_ready returning because of non-active state\n");
         return;
     }
+    if (first_time) {
+        first_time = false;
+        DPRINTF("beginning savevm\n");
+        ret = qemu_savevm_state_begin(s->file, &s->params);
+        if (ret < 0) {
+            DPRINTF("failed, %d\n", ret);
+            migrate_fd_error(s);
+            return;
+        }
+    }

     DPRINTF("iterate\n");
     ret = qemu_savevm_state_iterate(s->file);
@@ -344,7 +368,16 @@  void migrate_fd_put_ready(MigrationState *s)

         DPRINTF("done iterating\n");
         qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
-        vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
+        if (old_vm_running) {
+            QEMUBH *bh;
+
+            qemu_cond_init(&migrate_vm_stop_cond);
+            bh = qemu_bh_new(migrate_vm_stop, &bh);
+            qemu_bh_schedule(bh);
+            qemu_cond_wait(&migrate_vm_stop_cond, &qemu_global_mutex);
+        } else {
+            vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
+        }

         if (qemu_savevm_state_complete(s->file) < 0) {
             migrate_fd_error(s);
@@ -430,19 +463,8 @@  bool migration_has_failed(MigrationState *s)

 void migrate_fd_connect(MigrationState *s)
 {
-    int ret;
-
     s->state = MIG_STATE_ACTIVE;
     qemu_fopen_ops_buffered(s);
-
-    DPRINTF("beginning savevm\n");
-    ret = qemu_savevm_state_begin(s->file, &s->params);
-    if (ret < 0) {
-        DPRINTF("failed, %d\n", ret);
-        migrate_fd_error(s);
-        return;
-    }
-    migrate_fd_put_ready(s);
 }

 static MigrationState *migrate_init(const MigrationParams *params)