diff mbox

[v2] migration: re-active images while migration been canceled after inactive them

Message ID 1485244792-11248-1-git-send-email-zhang.zhanghailiang@huawei.com
State New
Headers show

Commit Message

Zhanghailiang Jan. 24, 2017, 7:59 a.m. UTC
commit fe904ea8242cbae2d7e69c052c754b8f5f1ba1d6 fixed a case
which migration aborted QEMU because it didn't regain the control
of images while some errors happened.

Actually, there are another two cases can trigger the same error reports:
" bdrv_co_do_pwritev: Assertion `!(bs->open_flags & 0x0800)' failed",

Case 1, codes path:
migration_thread()
    migration_completion()
        bdrv_inactivate_all() ----------------> inactivate images
        qemu_savevm_state_complete_precopy()
            socket_writev_buffer() --------> error because destination fails
                qemu_fflush() ----------------> set error on migration stream
-> qmp_migrate_cancel() ----------------> user cancelled migration concurrently
    -> migrate_set_state() ------------------> set migrate CANCELLIN
    migration_completion() -----------------> go on to fail_invalidate
	if (s->state == MIGRATION_STATUS_ACTIVE) -> Jump this branch

Case 2, codes path:
migration_thread()
    migration_completion()
        bdrv_inactivate_all() ----------------> inactivate images
    migreation_completion() finished
-> qmp_migrate_cancel() ---------------> user cancelled migration concurrently
    qemu_mutex_lock_iothread();
    qemu_bh_schedule (s->cleanup_bh);

As we can see from above, qmp_migrate_cancel can slip in whenever
migration_thread does not hold the global lock. If this happens after
bdrv_inactive_all() been called, the above error reports will appear.

To prevent this, we can call bdrv_invalidate_cache_all() in qmp_migrate_cancel()
directly if we find images become inactive.

Besides, bdrv_invalidate_cache_all() in migration_completion() doesn't have the
protection of big lock, fix it by add the missing qemu_mutex_lock_iothread();

Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
---
v2:
 - Fix a bug introduced by commit fe904 which didn't get big lock
   before call bdrv_invalidate_cache_all. (Suggested by Dave)
---
 include/migration/migration.h |  3 +++
 migration/migration.c         | 15 +++++++++++++++
 2 files changed, 18 insertions(+)

Comments

Dr. David Alan Gilbert Jan. 24, 2017, 10:47 a.m. UTC | #1
* zhanghailiang (zhang.zhanghailiang@huawei.com) wrote:
> commit fe904ea8242cbae2d7e69c052c754b8f5f1ba1d6 fixed a case
> which migration aborted QEMU because it didn't regain the control
> of images while some errors happened.
> 
> Actually, there are another two cases can trigger the same error reports:
> " bdrv_co_do_pwritev: Assertion `!(bs->open_flags & 0x0800)' failed",
> 
> Case 1, codes path:
> migration_thread()
>     migration_completion()
>         bdrv_inactivate_all() ----------------> inactivate images
>         qemu_savevm_state_complete_precopy()
>             socket_writev_buffer() --------> error because destination fails
>                 qemu_fflush() ----------------> set error on migration stream
> -> qmp_migrate_cancel() ----------------> user cancelled migration concurrently
>     -> migrate_set_state() ------------------> set migrate CANCELLIN
>     migration_completion() -----------------> go on to fail_invalidate
> 	if (s->state == MIGRATION_STATUS_ACTIVE) -> Jump this branch
> 
> Case 2, codes path:
> migration_thread()
>     migration_completion()
>         bdrv_inactivate_all() ----------------> inactivate images
>     migreation_completion() finished
> -> qmp_migrate_cancel() ---------------> user cancelled migration concurrently
>     qemu_mutex_lock_iothread();
>     qemu_bh_schedule (s->cleanup_bh);
> 
> As we can see from above, qmp_migrate_cancel can slip in whenever
> migration_thread does not hold the global lock. If this happens after
> bdrv_inactive_all() been called, the above error reports will appear.
> 
> To prevent this, we can call bdrv_invalidate_cache_all() in qmp_migrate_cancel()
> directly if we find images become inactive.
> 
> Besides, bdrv_invalidate_cache_all() in migration_completion() doesn't have the
> protection of big lock, fix it by add the missing qemu_mutex_lock_iothread();
> 
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>

Yes, I think tht's better:

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

> ---
> v2:
>  - Fix a bug introduced by commit fe904 which didn't get big lock
>    before call bdrv_invalidate_cache_all. (Suggested by Dave)
> ---
>  include/migration/migration.h |  3 +++
>  migration/migration.c         | 15 +++++++++++++++
>  2 files changed, 18 insertions(+)
> 
> diff --git a/include/migration/migration.h b/include/migration/migration.h
> index c309d23..2d5b724 100644
> --- a/include/migration/migration.h
> +++ b/include/migration/migration.h
> @@ -177,6 +177,9 @@ struct MigrationState
>      /* Flag set once the migration thread is running (and needs joining) */
>      bool migration_thread_running;
>  
> +    /* Flag set once the migration thread called bdrv_inactivate_all */
> +    bool block_inactive;
> +
>      /* Queue of outstanding page requests from the destination */
>      QemuMutex src_page_req_mutex;
>      QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
> diff --git a/migration/migration.c b/migration/migration.c
> index f498ab8..5b50afe 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1006,6 +1006,16 @@ static void migrate_fd_cancel(MigrationState *s)
>      if (s->state == MIGRATION_STATUS_CANCELLING && f) {
>          qemu_file_shutdown(f);
>      }
> +    if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
> +        Error *local_err = NULL;
> +
> +        bdrv_invalidate_cache_all(&local_err);
> +        if (local_err) {
> +            error_report_err(local_err);
> +        } else {
> +            s->block_inactive = false;
> +        }
> +    }
>  }
>  
>  void add_migration_state_change_notifier(Notifier *notify)
> @@ -1705,6 +1715,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
>              if (ret >= 0) {
>                  qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
>                  qemu_savevm_state_complete_precopy(s->to_dst_file, false);
> +                s->block_inactive = true;
>              }
>          }
>          qemu_mutex_unlock_iothread();
> @@ -1755,10 +1766,14 @@ fail_invalidate:
>      if (s->state == MIGRATION_STATUS_ACTIVE) {
>          Error *local_err = NULL;
>  
> +        qemu_mutex_lock_iothread();
>          bdrv_invalidate_cache_all(&local_err);
>          if (local_err) {
>              error_report_err(local_err);
> +        } else {
> +            s->block_inactive = false;
>          }
> +        qemu_mutex_unlock_iothread();
>      }
>  
>  fail:
> -- 
> 1.8.3.1
> 
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Dr. David Alan Gilbert Jan. 24, 2017, 11:52 a.m. UTC | #2
* zhanghailiang (zhang.zhanghailiang@huawei.com) wrote:
> commit fe904ea8242cbae2d7e69c052c754b8f5f1ba1d6 fixed a case
> which migration aborted QEMU because it didn't regain the control
> of images while some errors happened.
> 
> Actually, there are another two cases can trigger the same error reports:
> " bdrv_co_do_pwritev: Assertion `!(bs->open_flags & 0x0800)' failed",

Queued.

Dave

> 
> Case 1, codes path:
> migration_thread()
>     migration_completion()
>         bdrv_inactivate_all() ----------------> inactivate images
>         qemu_savevm_state_complete_precopy()
>             socket_writev_buffer() --------> error because destination fails
>                 qemu_fflush() ----------------> set error on migration stream
> -> qmp_migrate_cancel() ----------------> user cancelled migration concurrently
>     -> migrate_set_state() ------------------> set migrate CANCELLIN
>     migration_completion() -----------------> go on to fail_invalidate
> 	if (s->state == MIGRATION_STATUS_ACTIVE) -> Jump this branch
> 
> Case 2, codes path:
> migration_thread()
>     migration_completion()
>         bdrv_inactivate_all() ----------------> inactivate images
>     migreation_completion() finished
> -> qmp_migrate_cancel() ---------------> user cancelled migration concurrently
>     qemu_mutex_lock_iothread();
>     qemu_bh_schedule (s->cleanup_bh);
> 
> As we can see from above, qmp_migrate_cancel can slip in whenever
> migration_thread does not hold the global lock. If this happens after
> bdrv_inactive_all() been called, the above error reports will appear.
> 
> To prevent this, we can call bdrv_invalidate_cache_all() in qmp_migrate_cancel()
> directly if we find images become inactive.
> 
> Besides, bdrv_invalidate_cache_all() in migration_completion() doesn't have the
> protection of big lock, fix it by add the missing qemu_mutex_lock_iothread();
> 
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
> ---
> v2:
>  - Fix a bug introduced by commit fe904 which didn't get big lock
>    before call bdrv_invalidate_cache_all. (Suggested by Dave)
> ---
>  include/migration/migration.h |  3 +++
>  migration/migration.c         | 15 +++++++++++++++
>  2 files changed, 18 insertions(+)
> 
> diff --git a/include/migration/migration.h b/include/migration/migration.h
> index c309d23..2d5b724 100644
> --- a/include/migration/migration.h
> +++ b/include/migration/migration.h
> @@ -177,6 +177,9 @@ struct MigrationState
>      /* Flag set once the migration thread is running (and needs joining) */
>      bool migration_thread_running;
>  
> +    /* Flag set once the migration thread called bdrv_inactivate_all */
> +    bool block_inactive;
> +
>      /* Queue of outstanding page requests from the destination */
>      QemuMutex src_page_req_mutex;
>      QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
> diff --git a/migration/migration.c b/migration/migration.c
> index f498ab8..5b50afe 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1006,6 +1006,16 @@ static void migrate_fd_cancel(MigrationState *s)
>      if (s->state == MIGRATION_STATUS_CANCELLING && f) {
>          qemu_file_shutdown(f);
>      }
> +    if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
> +        Error *local_err = NULL;
> +
> +        bdrv_invalidate_cache_all(&local_err);
> +        if (local_err) {
> +            error_report_err(local_err);
> +        } else {
> +            s->block_inactive = false;
> +        }
> +    }
>  }
>  
>  void add_migration_state_change_notifier(Notifier *notify)
> @@ -1705,6 +1715,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
>              if (ret >= 0) {
>                  qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
>                  qemu_savevm_state_complete_precopy(s->to_dst_file, false);
> +                s->block_inactive = true;
>              }
>          }
>          qemu_mutex_unlock_iothread();
> @@ -1755,10 +1766,14 @@ fail_invalidate:
>      if (s->state == MIGRATION_STATUS_ACTIVE) {
>          Error *local_err = NULL;
>  
> +        qemu_mutex_lock_iothread();
>          bdrv_invalidate_cache_all(&local_err);
>          if (local_err) {
>              error_report_err(local_err);
> +        } else {
> +            s->block_inactive = false;
>          }
> +        qemu_mutex_unlock_iothread();
>      }
>  
>  fail:
> -- 
> 1.8.3.1
> 
> 
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Stefan Hajnoczi Jan. 24, 2017, 12:53 p.m. UTC | #3
On Tue, Jan 24, 2017 at 03:59:52PM +0800, zhanghailiang wrote:
> commit fe904ea8242cbae2d7e69c052c754b8f5f1ba1d6 fixed a case
> which migration aborted QEMU because it didn't regain the control
> of images while some errors happened.
> 
> Actually, there are another two cases can trigger the same error reports:
> " bdrv_co_do_pwritev: Assertion `!(bs->open_flags & 0x0800)' failed",
> 
> Case 1, codes path:
> migration_thread()
>     migration_completion()
>         bdrv_inactivate_all() ----------------> inactivate images
>         qemu_savevm_state_complete_precopy()
>             socket_writev_buffer() --------> error because destination fails
>                 qemu_fflush() ----------------> set error on migration stream
> -> qmp_migrate_cancel() ----------------> user cancelled migration concurrently
>     -> migrate_set_state() ------------------> set migrate CANCELLIN
>     migration_completion() -----------------> go on to fail_invalidate
> 	if (s->state == MIGRATION_STATUS_ACTIVE) -> Jump this branch
> 
> Case 2, codes path:
> migration_thread()
>     migration_completion()
>         bdrv_inactivate_all() ----------------> inactivate images
>     migreation_completion() finished
> -> qmp_migrate_cancel() ---------------> user cancelled migration concurrently
>     qemu_mutex_lock_iothread();
>     qemu_bh_schedule (s->cleanup_bh);
> 
> As we can see from above, qmp_migrate_cancel can slip in whenever
> migration_thread does not hold the global lock. If this happens after
> bdrv_inactive_all() been called, the above error reports will appear.
> 
> To prevent this, we can call bdrv_invalidate_cache_all() in qmp_migrate_cancel()
> directly if we find images become inactive.
> 
> Besides, bdrv_invalidate_cache_all() in migration_completion() doesn't have the
> protection of big lock, fix it by add the missing qemu_mutex_lock_iothread();
> 
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
> ---
> v2:
>  - Fix a bug introduced by commit fe904 which didn't get big lock
>    before call bdrv_invalidate_cache_all. (Suggested by Dave)
> ---
>  include/migration/migration.h |  3 +++
>  migration/migration.c         | 15 +++++++++++++++
>  2 files changed, 18 insertions(+)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
diff mbox

Patch

diff --git a/include/migration/migration.h b/include/migration/migration.h
index c309d23..2d5b724 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -177,6 +177,9 @@  struct MigrationState
     /* Flag set once the migration thread is running (and needs joining) */
     bool migration_thread_running;
 
+    /* Flag set once the migration thread called bdrv_inactivate_all */
+    bool block_inactive;
+
     /* Queue of outstanding page requests from the destination */
     QemuMutex src_page_req_mutex;
     QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
diff --git a/migration/migration.c b/migration/migration.c
index f498ab8..5b50afe 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1006,6 +1006,16 @@  static void migrate_fd_cancel(MigrationState *s)
     if (s->state == MIGRATION_STATUS_CANCELLING && f) {
         qemu_file_shutdown(f);
     }
+    if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
+        Error *local_err = NULL;
+
+        bdrv_invalidate_cache_all(&local_err);
+        if (local_err) {
+            error_report_err(local_err);
+        } else {
+            s->block_inactive = false;
+        }
+    }
 }
 
 void add_migration_state_change_notifier(Notifier *notify)
@@ -1705,6 +1715,7 @@  static void migration_completion(MigrationState *s, int current_active_state,
             if (ret >= 0) {
                 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
                 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
+                s->block_inactive = true;
             }
         }
         qemu_mutex_unlock_iothread();
@@ -1755,10 +1766,14 @@  fail_invalidate:
     if (s->state == MIGRATION_STATUS_ACTIVE) {
         Error *local_err = NULL;
 
+        qemu_mutex_lock_iothread();
         bdrv_invalidate_cache_all(&local_err);
         if (local_err) {
             error_report_err(local_err);
+        } else {
+            s->block_inactive = false;
         }
+        qemu_mutex_unlock_iothread();
     }
 
 fail: