diff mbox

[1/1] migration: fix deadlock

Message ID 1443440518-4384-1-git-send-email-den@openvz.org
State New
Headers show

Commit Message

Denis V. Lunev Sept. 28, 2015, 11:41 a.m. UTC
Release qemu global mutex before call synchronize_rcu().
synchronize_rcu() waiting for all readers to finish their critical
sections. There is at least one critical section in which we try
to get QGM (critical section is in address_space_rw() and
prepare_mmio_access() is trying to aquire QGM).

Both functions (migration_end() and migration_bitmap_extend())
are called from main thread which is holding QGM.

Thus there is a race condition that ends up with deadlock:
main thread     working thread
Lock QGA                |
|             Call KVM_EXIT_IO handler
|                       |
|        Open rcu reader's critical section
Migration cleanup bh    |
|                       |
synchronize_rcu() is    |
waiting for readers     |
|            prepare_mmio_access() is waiting for QGM
  \                   /
         deadlock

The patch changes bitmap freeing from direct g_free after synchronize_rcu
to free inside call_rcu.

Signed-off-by: Denis V. Lunev <den@openvz.org>
Reported-by: Igor Redko <redkoi@virtuozzo.com>
Tested-by: Igor Redko <redkoi@virtuozzo.com>
CC: Anna Melekhova <annam@virtuozzo.com>
CC: Juan Quintela <quintela@redhat.com>
CC: Amit Shah <amit.shah@redhat.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Wen Congyang <wency@cn.fujitsu.com>
---
 migration/ram.c | 44 +++++++++++++++++++++++++++-----------------
 1 file changed, 27 insertions(+), 17 deletions(-)

Comments

Paolo Bonzini Sept. 28, 2015, 11:55 a.m. UTC | #1
On 28/09/2015 13:41, Denis V. Lunev wrote:
> Release qemu global mutex before call synchronize_rcu().
> synchronize_rcu() waiting for all readers to finish their critical
> sections. There is at least one critical section in which we try
> to get QGM (critical section is in address_space_rw() and
> prepare_mmio_access() is trying to aquire QGM).
> 
> Both functions (migration_end() and migration_bitmap_extend())
> are called from main thread which is holding QGM.
> 
> Thus there is a race condition that ends up with deadlock:
> main thread     working thread
> Lock QGA                |
> |             Call KVM_EXIT_IO handler
> |                       |
> |        Open rcu reader's critical section
> Migration cleanup bh    |
> |                       |
> synchronize_rcu() is    |
> waiting for readers     |
> |            prepare_mmio_access() is waiting for QGM
>   \                   /
>          deadlock
> 
> The patch changes bitmap freeing from direct g_free after synchronize_rcu
> to free inside call_rcu.
> 
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> Reported-by: Igor Redko <redkoi@virtuozzo.com>
> Tested-by: Igor Redko <redkoi@virtuozzo.com>
> CC: Anna Melekhova <annam@virtuozzo.com>
> CC: Juan Quintela <quintela@redhat.com>
> CC: Amit Shah <amit.shah@redhat.com>
> CC: Paolo Bonzini <pbonzini@redhat.com>
> CC: Wen Congyang <wency@cn.fujitsu.com>
> ---
>  migration/ram.c | 44 +++++++++++++++++++++++++++-----------------
>  1 file changed, 27 insertions(+), 17 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index 7f007e6..e7c5bcf 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -221,12 +221,16 @@ static RAMBlock *last_seen_block;
>  /* This is the last block from where we have sent data */
>  static RAMBlock *last_sent_block;
>  static ram_addr_t last_offset;
> -static unsigned long *migration_bitmap;
>  static QemuMutex migration_bitmap_mutex;
>  static uint64_t migration_dirty_pages;
>  static uint32_t last_version;
>  static bool ram_bulk_stage;
>  
> +static struct BitmapRcu {
> +    struct rcu_head rcu;
> +    unsigned long *bmap;
> +} *migration_bitmap_rcu;
> +
>  struct CompressParam {
>      bool start;
>      bool done;
> @@ -508,7 +512,7 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>  
>      unsigned long next;
>  
> -    bitmap = atomic_rcu_read(&migration_bitmap);
> +    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
>      if (ram_bulk_stage && nr > base) {
>          next = nr + 1;
>      } else {
> @@ -526,7 +530,7 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>  static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
>  {
>      unsigned long *bitmap;
> -    bitmap = atomic_rcu_read(&migration_bitmap);
> +    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
>      migration_dirty_pages +=
>          cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
>  }
> @@ -1024,17 +1028,22 @@ void free_xbzrle_decoded_buf(void)
>      xbzrle_decoded_buf = NULL;
>  }
>  
> +static void migration_bitmap_free(struct BitmapRcu *bmap)
> +{
> +    g_free(bmap->bmap);
> +    g_free(bmap);
> +}
> +
>  static void migration_end(void)
>  {
>      /* caller have hold iothread lock or is in a bh, so there is
>       * no writing race against this migration_bitmap
>       */
> -    unsigned long *bitmap = migration_bitmap;
> -    atomic_rcu_set(&migration_bitmap, NULL);
> +    struct BitmapRcu *bitmap = migration_bitmap_rcu;
> +    atomic_rcu_set(&migration_bitmap_rcu, NULL);
>      if (bitmap) {
>          memory_global_dirty_log_stop();
> -        synchronize_rcu();
> -        g_free(bitmap);
> +        call_rcu(bitmap, migration_bitmap_free, rcu);
>      }
>  
>      XBZRLE_cache_lock();
> @@ -1070,9 +1079,10 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
>      /* called in qemu main thread, so there is
>       * no writing race against this migration_bitmap
>       */
> -    if (migration_bitmap) {
> -        unsigned long *old_bitmap = migration_bitmap, *bitmap;
> -        bitmap = bitmap_new(new);
> +    if (migration_bitmap_rcu) {
> +        struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
> +        bitmap = g_new(struct BitmapRcu, 1);
> +        bitmap->bmap = bitmap_new(new);
>  
>          /* prevent migration_bitmap content from being set bit
>           * by migration_bitmap_sync_range() at the same time.
> @@ -1080,13 +1090,12 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
>           * at the same time.
>           */
>          qemu_mutex_lock(&migration_bitmap_mutex);
> -        bitmap_copy(bitmap, old_bitmap, old);
> -        bitmap_set(bitmap, old, new - old);
> -        atomic_rcu_set(&migration_bitmap, bitmap);
> +        bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
> +        bitmap_set(bitmap->bmap, old, new - old);
> +        atomic_rcu_set(&migration_bitmap_rcu, bitmap);
>          qemu_mutex_unlock(&migration_bitmap_mutex);
>          migration_dirty_pages += new - old;
> -        synchronize_rcu();
> -        g_free(old_bitmap);
> +        call_rcu(old_bitmap, migration_bitmap_free, rcu);
>      }
>  }
>  
> @@ -1145,8 +1154,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>      reset_ram_globals();
>  
>      ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
> -    migration_bitmap = bitmap_new(ram_bitmap_pages);
> -    bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
> +    migration_bitmap_rcu = g_new(struct BitmapRcu, 1);
> +    migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
> +    bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
>  
>      /*
>       * Count the total number of pages used by ram blocks not including any
> 

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Amit Shah Sept. 29, 2015, 5:13 a.m. UTC | #2
There have been multiple versions of this patch on the list, can you
please annotate that this is v3 so it supersedes the earlier v2?

Also, please include a changelog in the description in patch 0 so we
know what happened between the various versions.

Thanks,

On (Mon) 28 Sep 2015 [14:41:58], Denis V. Lunev wrote:
> Release qemu global mutex before call synchronize_rcu().
> synchronize_rcu() waiting for all readers to finish their critical
> sections. There is at least one critical section in which we try
> to get QGM (critical section is in address_space_rw() and
> prepare_mmio_access() is trying to aquire QGM).
> 
> Both functions (migration_end() and migration_bitmap_extend())
> are called from main thread which is holding QGM.
> 
> Thus there is a race condition that ends up with deadlock:
> main thread     working thread
> Lock QGA                |
> |             Call KVM_EXIT_IO handler
> |                       |
> |        Open rcu reader's critical section
> Migration cleanup bh    |
> |                       |
> synchronize_rcu() is    |
> waiting for readers     |
> |            prepare_mmio_access() is waiting for QGM
>   \                   /
>          deadlock
> 
> The patch changes bitmap freeing from direct g_free after synchronize_rcu
> to free inside call_rcu.
> 
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> Reported-by: Igor Redko <redkoi@virtuozzo.com>
> Tested-by: Igor Redko <redkoi@virtuozzo.com>
> CC: Anna Melekhova <annam@virtuozzo.com>
> CC: Juan Quintela <quintela@redhat.com>
> CC: Amit Shah <amit.shah@redhat.com>
> CC: Paolo Bonzini <pbonzini@redhat.com>
> CC: Wen Congyang <wency@cn.fujitsu.com>
> ---
>  migration/ram.c | 44 +++++++++++++++++++++++++++-----------------
>  1 file changed, 27 insertions(+), 17 deletions(-)

		Amit
Denis V. Lunev Sept. 29, 2015, 5:43 a.m. UTC | #3
On 09/29/2015 08:13 AM, Amit Shah wrote:
> There have been multiple versions of this patch on the list, can you
> please annotate that this is v3 so it supersedes the earlier v2?
>
> Also, please include a changelog in the description in patch 0 so we
> know what happened between the various versions.
>
> Thanks,
>
> On (Mon) 28 Sep 2015 [14:41:58], Denis V. Lunev wrote:
>> Release qemu global mutex before call synchronize_rcu().
>> synchronize_rcu() waiting for all readers to finish their critical
>> sections. There is at least one critical section in which we try
>> to get QGM (critical section is in address_space_rw() and
>> prepare_mmio_access() is trying to aquire QGM).
>>
>> Both functions (migration_end() and migration_bitmap_extend())
>> are called from main thread which is holding QGM.
>>
>> Thus there is a race condition that ends up with deadlock:
>> main thread     working thread
>> Lock QGA                |
>> |             Call KVM_EXIT_IO handler
>> |                       |
>> |        Open rcu reader's critical section
>> Migration cleanup bh    |
>> |                       |
>> synchronize_rcu() is    |
>> waiting for readers     |
>> |            prepare_mmio_access() is waiting for QGM
>>    \                   /
>>           deadlock
>>
>> The patch changes bitmap freeing from direct g_free after synchronize_rcu
>> to free inside call_rcu.
>>
>> Signed-off-by: Denis V. Lunev <den@openvz.org>
>> Reported-by: Igor Redko <redkoi@virtuozzo.com>
>> Tested-by: Igor Redko <redkoi@virtuozzo.com>
>> CC: Anna Melekhova <annam@virtuozzo.com>
>> CC: Juan Quintela <quintela@redhat.com>
>> CC: Amit Shah <amit.shah@redhat.com>
>> CC: Paolo Bonzini <pbonzini@redhat.com>
>> CC: Wen Congyang <wency@cn.fujitsu.com>
>> ---
>>   migration/ram.c | 44 +++++++++++++++++++++++++++-----------------
>>   1 file changed, 27 insertions(+), 17 deletions(-)
> 		Amit
this one is correct. I am sorry, I have missed v3 here in the subject.

Den
Denis V. Lunev Sept. 29, 2015, 5:46 a.m. UTC | #4
On 09/29/2015 08:13 AM, Amit Shah wrote:
> There have been multiple versions of this patch on the list, can you
> please annotate that this is v3 so it supersedes the earlier v2?
>
> Also, please include a changelog in the description in patch 0 so we
> know what happened between the various versions.
>
> Thanks,
Changes from v2:
- switched from single allocation and bitmap_alloc_rcu helper and g_free_rcu
   to separate allocation of bitmap and RCU object and free via call_rcu

Changes from v1:
- unlocking is replaced with g_free_rcu

Den

P.S. Sorry for inconvenience.
Juan Quintela Sept. 30, 2015, 4:16 p.m. UTC | #5
"Denis V. Lunev" <den@openvz.org> wrote:
> Release qemu global mutex before call synchronize_rcu().
> synchronize_rcu() waiting for all readers to finish their critical
> sections. There is at least one critical section in which we try
> to get QGM (critical section is in address_space_rw() and
> prepare_mmio_access() is trying to aquire QGM).
>
> Both functions (migration_end() and migration_bitmap_extend())
> are called from main thread which is holding QGM.
>
> Thus there is a race condition that ends up with deadlock:
> main thread     working thread
> Lock QGA                |
> |             Call KVM_EXIT_IO handler
> |                       |
> |        Open rcu reader's critical section
> Migration cleanup bh    |
> |                       |
> synchronize_rcu() is    |
> waiting for readers     |
> |            prepare_mmio_access() is waiting for QGM
>   \                   /
>          deadlock
>
> The patch changes bitmap freeing from direct g_free after synchronize_rcu
> to free inside call_rcu.
>
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> Reported-by: Igor Redko <redkoi@virtuozzo.com>
> Tested-by: Igor Redko <redkoi@virtuozzo.com>
> CC: Anna Melekhova <annam@virtuozzo.com>
> CC: Juan Quintela <quintela@redhat.com>
> CC: Amit Shah <amit.shah@redhat.com>
> CC: Paolo Bonzini <pbonzini@redhat.com>
> CC: Wen Congyang <wency@cn.fujitsu.com>

Reviewed-by: Juan Quintela <quintela@redhat.com>

Appliefd to my tree.

PD, no I still don't understood how RCU gave us so many corner cases wrong.
diff mbox

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 7f007e6..e7c5bcf 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -221,12 +221,16 @@  static RAMBlock *last_seen_block;
 /* This is the last block from where we have sent data */
 static RAMBlock *last_sent_block;
 static ram_addr_t last_offset;
-static unsigned long *migration_bitmap;
 static QemuMutex migration_bitmap_mutex;
 static uint64_t migration_dirty_pages;
 static uint32_t last_version;
 static bool ram_bulk_stage;
 
+static struct BitmapRcu {
+    struct rcu_head rcu;
+    unsigned long *bmap;
+} *migration_bitmap_rcu;
+
 struct CompressParam {
     bool start;
     bool done;
@@ -508,7 +512,7 @@  ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
 
     unsigned long next;
 
-    bitmap = atomic_rcu_read(&migration_bitmap);
+    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
     if (ram_bulk_stage && nr > base) {
         next = nr + 1;
     } else {
@@ -526,7 +530,7 @@  ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
 {
     unsigned long *bitmap;
-    bitmap = atomic_rcu_read(&migration_bitmap);
+    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
     migration_dirty_pages +=
         cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
 }
@@ -1024,17 +1028,22 @@  void free_xbzrle_decoded_buf(void)
     xbzrle_decoded_buf = NULL;
 }
 
+static void migration_bitmap_free(struct BitmapRcu *bmap)
+{
+    g_free(bmap->bmap);
+    g_free(bmap);
+}
+
 static void migration_end(void)
 {
     /* caller have hold iothread lock or is in a bh, so there is
      * no writing race against this migration_bitmap
      */
-    unsigned long *bitmap = migration_bitmap;
-    atomic_rcu_set(&migration_bitmap, NULL);
+    struct BitmapRcu *bitmap = migration_bitmap_rcu;
+    atomic_rcu_set(&migration_bitmap_rcu, NULL);
     if (bitmap) {
         memory_global_dirty_log_stop();
-        synchronize_rcu();
-        g_free(bitmap);
+        call_rcu(bitmap, migration_bitmap_free, rcu);
     }
 
     XBZRLE_cache_lock();
@@ -1070,9 +1079,10 @@  void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
     /* called in qemu main thread, so there is
      * no writing race against this migration_bitmap
      */
-    if (migration_bitmap) {
-        unsigned long *old_bitmap = migration_bitmap, *bitmap;
-        bitmap = bitmap_new(new);
+    if (migration_bitmap_rcu) {
+        struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
+        bitmap = g_new(struct BitmapRcu, 1);
+        bitmap->bmap = bitmap_new(new);
 
         /* prevent migration_bitmap content from being set bit
          * by migration_bitmap_sync_range() at the same time.
@@ -1080,13 +1090,12 @@  void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
          * at the same time.
          */
         qemu_mutex_lock(&migration_bitmap_mutex);
-        bitmap_copy(bitmap, old_bitmap, old);
-        bitmap_set(bitmap, old, new - old);
-        atomic_rcu_set(&migration_bitmap, bitmap);
+        bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
+        bitmap_set(bitmap->bmap, old, new - old);
+        atomic_rcu_set(&migration_bitmap_rcu, bitmap);
         qemu_mutex_unlock(&migration_bitmap_mutex);
         migration_dirty_pages += new - old;
-        synchronize_rcu();
-        g_free(old_bitmap);
+        call_rcu(old_bitmap, migration_bitmap_free, rcu);
     }
 }
 
@@ -1145,8 +1154,9 @@  static int ram_save_setup(QEMUFile *f, void *opaque)
     reset_ram_globals();
 
     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
-    migration_bitmap = bitmap_new(ram_bitmap_pages);
-    bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
+    migration_bitmap_rcu = g_new(struct BitmapRcu, 1);
+    migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
+    bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
 
     /*
      * Count the total number of pages used by ram blocks not including any