diff mbox

[PULL,27/28] migration: protect migration_bitmap

Message ID 1436274549-28826-28-git-send-email-quintela@redhat.com
State New
Headers show

Commit Message

Juan Quintela July 7, 2015, 1:09 p.m. UTC
From: Li Zhijian <lizhijian@cn.fujitsu.com>

Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/ram.c | 23 +++++++++++++++++------
 1 file changed, 17 insertions(+), 6 deletions(-)

Comments

Kevin Wolf July 8, 2015, 7:13 p.m. UTC | #1
Am 07.07.2015 um 15:09 hat Juan Quintela geschrieben:
> From: Li Zhijian <lizhijian@cn.fujitsu.com>
> 
> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
> Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  migration/ram.c | 23 +++++++++++++++++------
>  1 file changed, 17 insertions(+), 6 deletions(-)

In current master, HMP 'savevm' is broken (looks like a deadlock in RCU
code, it just hangs indefinitely). git bisect points to this patch.

The stack trace looks like this:

(gdb) thread apply all bt

Thread 3 (Thread 0x7f06febfe700 (LWP 5717)):
#0  0x00007f070e749f7d in __lll_lock_wait () from /lib64/libpthread.so.0
#1  0x00007f070e745d32 in _L_lock_791 () from /lib64/libpthread.so.0
#2  0x00007f070e745c38 in pthread_mutex_lock () from /lib64/libpthread.so.0
#3  0x00007f070fed8bc9 in qemu_mutex_lock (mutex=mutex@entry=0x7f07107e6700 <rcu_gp_lock>) at util/qemu-thread-posix.c:73
#4  0x00007f070fee7631 in synchronize_rcu () at util/rcu.c:129
#5  0x00007f070fee77d9 in call_rcu_thread (opaque=<optimized out>) at util/rcu.c:240
#6  0x00007f070e743df5 in start_thread () from /lib64/libpthread.so.0
#7  0x00007f07066ab1ad in clone () from /lib64/libc.so.6

Thread 2 (Thread 0x7f06f940f700 (LWP 5719)):
#0  0x00007f070e749f7d in __lll_lock_wait () from /lib64/libpthread.so.0
#1  0x00007f070e74c4ec in _L_cond_lock_792 () from /lib64/libpthread.so.0
#2  0x00007f070e74c3c8 in __pthread_mutex_cond_lock () from /lib64/libpthread.so.0
#3  0x00007f070e747795 in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
#4  0x00007f070fed8ca9 in qemu_cond_wait (cond=<optimized out>, mutex=mutex@entry=0x7f07103b0400 <qemu_global_mutex>) at util/qemu-thread-posix.c:132
#5  0x00007f070fc4daab in qemu_tcg_cpu_thread_fn (arg=<optimized out>) at /home/kwolf/source/qemu/cpus.c:1050
#6  0x00007f070e743df5 in start_thread () from /lib64/libpthread.so.0
#7  0x00007f07066ab1ad in clone () from /lib64/libc.so.6

Thread 1 (Thread 0x7f070fb20bc0 (LWP 5716)):
#0  0x00007f07066a5949 in syscall () from /lib64/libc.so.6
#1  0x00007f070fed8fa2 in futex_wait (val=4294967295, ev=0x7f07107e66c0 <rcu_gp_event>) at util/qemu-thread-posix.c:301
#2  qemu_event_wait (ev=ev@entry=0x7f07107e66c0 <rcu_gp_event>) at util/qemu-thread-posix.c:399
#3  0x00007f070fee7713 in wait_for_readers () at util/rcu.c:120
#4  synchronize_rcu () at util/rcu.c:149
#5  0x00007f070fc6e0c2 in migration_end () at /home/kwolf/source/qemu/migration/ram.c:1033
#6  0x00007f070fc6ef23 in ram_save_complete (f=0x7f07122f9aa0, opaque=<optimized out>) at /home/kwolf/source/qemu/migration/ram.c:1241
#7  0x00007f070fc71d75 in qemu_savevm_state_complete (f=f@entry=0x7f07122f9aa0) at /home/kwolf/source/qemu/migration/savevm.c:836
#8  0x00007f070fc7298e in qemu_savevm_state (errp=0x7ffe2a081ff8, f=0x7f07122f9aa0) at /home/kwolf/source/qemu/migration/savevm.c:945
#9  hmp_savevm (mon=0x7f071233b500, qdict=<optimized out>) at /home/kwolf/source/qemu/migration/savevm.c:1353
#10 0x00007f070fc552d0 in handle_hmp_command (mon=mon@entry=0x7f071233b500, cmdline=0x7f0712350197 "foo") at /home/kwolf/source/qemu/monitor.c:4058
#11 0x00007f070fc56467 in monitor_command_cb (opaque=0x7f071233b500, cmdline=<optimized out>, readline_opaque=<optimized out>)
    at /home/kwolf/source/qemu/monitor.c:5081
#12 0x00007f070fee6dbf in readline_handle_byte (rs=0x7f0712350190, ch=<optimized out>) at util/readline.c:391
#13 0x00007f070fc55387 in monitor_read (opaque=<optimized out>, buf=<optimized out>, size=<optimized out>) at /home/kwolf/source/qemu/monitor.c:5064
#14 0x00007f070fd17b21 in qemu_chr_be_write (len=<optimized out>, buf=0x7ffe2a082640 "\n\331\367\325\001\200\377\377\200&\b*\376\177", s=0x7f0712304670)
    at qemu-char.c:306
#15 fd_chr_read (chan=<optimized out>, cond=<optimized out>, opaque=0x7f0712304670) at qemu-char.c:1012
#16 0x00007f070e04c9ba in g_main_context_dispatch () from /lib64/libglib-2.0.so.0
#17 0x00007f070fe61678 in glib_pollfds_poll () at main-loop.c:199
#18 os_host_main_loop_wait (timeout=<optimized out>) at main-loop.c:244
#19 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:493
#20 0x00007f070fc24e9e in main_loop () at vl.c:1901
#21 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4649

Kevin

> diff --git a/migration/ram.c b/migration/ram.c
> index 644f52a..9c0bcfe 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -494,6 +494,7 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
>      return 1;
>  }
> 
> +/* Called with rcu_read_lock() to protect migration_bitmap */
>  static inline
>  ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>                                                   ram_addr_t start)
> @@ -502,26 +503,31 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>      unsigned long nr = base + (start >> TARGET_PAGE_BITS);
>      uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
>      unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
> +    unsigned long *bitmap;
> 
>      unsigned long next;
> 
> +    bitmap = atomic_rcu_read(&migration_bitmap);
>      if (ram_bulk_stage && nr > base) {
>          next = nr + 1;
>      } else {
> -        next = find_next_bit(migration_bitmap, size, nr);
> +        next = find_next_bit(bitmap, size, nr);
>      }
> 
>      if (next < size) {
> -        clear_bit(next, migration_bitmap);
> +        clear_bit(next, bitmap);
>          migration_dirty_pages--;
>      }
>      return (next - base) << TARGET_PAGE_BITS;
>  }
> 
> +/* Called with rcu_read_lock() to protect migration_bitmap */
>  static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
>  {
> +    unsigned long *bitmap;
> +    bitmap = atomic_rcu_read(&migration_bitmap);
>      migration_dirty_pages +=
> -        cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length);
> +        cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
>  }
> 
> 
> @@ -1017,10 +1023,15 @@ void free_xbzrle_decoded_buf(void)
> 
>  static void migration_end(void)
>  {
> -    if (migration_bitmap) {
> +    /* caller have hold iothread lock or is in a bh, so there is
> +     * no writing race against this migration_bitmap
> +     */
> +    unsigned long *bitmap = migration_bitmap;
> +    atomic_rcu_set(&migration_bitmap, NULL);
> +    if (bitmap) {
>          memory_global_dirty_log_stop();
> -        g_free(migration_bitmap);
> -        migration_bitmap = NULL;
> +        synchronize_rcu();
> +        g_free(bitmap);
>      }
> 
>      XBZRLE_cache_lock();
> -- 
> 2.4.3
> 
>
Paolo Bonzini July 8, 2015, 8:35 p.m. UTC | #2
On 08/07/2015 21:13, Kevin Wolf wrote:
> Am 07.07.2015 um 15:09 hat Juan Quintela geschrieben:
>> From: Li Zhijian <lizhijian@cn.fujitsu.com>
>>
>> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
>> Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>> ---
>>  migration/ram.c | 23 +++++++++++++++++------
>>  1 file changed, 17 insertions(+), 6 deletions(-)
> 
> In current master, HMP 'savevm' is broken (looks like a deadlock in RCU
> code, it just hangs indefinitely). git bisect points to this patch.

This looks like synchronize_rcu() is being called within
rcu_read_lock()/rcu_read_unlock().

The easiest fix is to somehow use call_rcu, but I haven't looked at the
code very well.

I found another embarrassing bug in the RCU code, but it's been there
forever and can wait for after -rc0 (and it wasn't really a problem
until BQL-less MMIO was merged a couple days ago).

Paolo

> The stack trace looks like this:
> 
> (gdb) thread apply all bt
> 
> Thread 3 (Thread 0x7f06febfe700 (LWP 5717)):
> #0  0x00007f070e749f7d in __lll_lock_wait () from /lib64/libpthread.so.0
> #1  0x00007f070e745d32 in _L_lock_791 () from /lib64/libpthread.so.0
> #2  0x00007f070e745c38 in pthread_mutex_lock () from /lib64/libpthread.so.0
> #3  0x00007f070fed8bc9 in qemu_mutex_lock (mutex=mutex@entry=0x7f07107e6700 <rcu_gp_lock>) at util/qemu-thread-posix.c:73
> #4  0x00007f070fee7631 in synchronize_rcu () at util/rcu.c:129
> #5  0x00007f070fee77d9 in call_rcu_thread (opaque=<optimized out>) at util/rcu.c:240
> #6  0x00007f070e743df5 in start_thread () from /lib64/libpthread.so.0
> #7  0x00007f07066ab1ad in clone () from /lib64/libc.so.6
> 
> Thread 2 (Thread 0x7f06f940f700 (LWP 5719)):
> #0  0x00007f070e749f7d in __lll_lock_wait () from /lib64/libpthread.so.0
> #1  0x00007f070e74c4ec in _L_cond_lock_792 () from /lib64/libpthread.so.0
> #2  0x00007f070e74c3c8 in __pthread_mutex_cond_lock () from /lib64/libpthread.so.0
> #3  0x00007f070e747795 in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
> #4  0x00007f070fed8ca9 in qemu_cond_wait (cond=<optimized out>, mutex=mutex@entry=0x7f07103b0400 <qemu_global_mutex>) at util/qemu-thread-posix.c:132
> #5  0x00007f070fc4daab in qemu_tcg_cpu_thread_fn (arg=<optimized out>) at /home/kwolf/source/qemu/cpus.c:1050
> #6  0x00007f070e743df5 in start_thread () from /lib64/libpthread.so.0
> #7  0x00007f07066ab1ad in clone () from /lib64/libc.so.6
> 
> Thread 1 (Thread 0x7f070fb20bc0 (LWP 5716)):
> #0  0x00007f07066a5949 in syscall () from /lib64/libc.so.6
> #1  0x00007f070fed8fa2 in futex_wait (val=4294967295, ev=0x7f07107e66c0 <rcu_gp_event>) at util/qemu-thread-posix.c:301
> #2  qemu_event_wait (ev=ev@entry=0x7f07107e66c0 <rcu_gp_event>) at util/qemu-thread-posix.c:399
> #3  0x00007f070fee7713 in wait_for_readers () at util/rcu.c:120
> #4  synchronize_rcu () at util/rcu.c:149
> #5  0x00007f070fc6e0c2 in migration_end () at /home/kwolf/source/qemu/migration/ram.c:1033
> #6  0x00007f070fc6ef23 in ram_save_complete (f=0x7f07122f9aa0, opaque=<optimized out>) at /home/kwolf/source/qemu/migration/ram.c:1241
> #7  0x00007f070fc71d75 in qemu_savevm_state_complete (f=f@entry=0x7f07122f9aa0) at /home/kwolf/source/qemu/migration/savevm.c:836
> #8  0x00007f070fc7298e in qemu_savevm_state (errp=0x7ffe2a081ff8, f=0x7f07122f9aa0) at /home/kwolf/source/qemu/migration/savevm.c:945
> #9  hmp_savevm (mon=0x7f071233b500, qdict=<optimized out>) at /home/kwolf/source/qemu/migration/savevm.c:1353
> #10 0x00007f070fc552d0 in handle_hmp_command (mon=mon@entry=0x7f071233b500, cmdline=0x7f0712350197 "foo") at /home/kwolf/source/qemu/monitor.c:4058
> #11 0x00007f070fc56467 in monitor_command_cb (opaque=0x7f071233b500, cmdline=<optimized out>, readline_opaque=<optimized out>)
>     at /home/kwolf/source/qemu/monitor.c:5081
> #12 0x00007f070fee6dbf in readline_handle_byte (rs=0x7f0712350190, ch=<optimized out>) at util/readline.c:391
> #13 0x00007f070fc55387 in monitor_read (opaque=<optimized out>, buf=<optimized out>, size=<optimized out>) at /home/kwolf/source/qemu/monitor.c:5064
> #14 0x00007f070fd17b21 in qemu_chr_be_write (len=<optimized out>, buf=0x7ffe2a082640 "\n\331\367\325\001\200\377\377\200&\b*\376\177", s=0x7f0712304670)
>     at qemu-char.c:306
> #15 fd_chr_read (chan=<optimized out>, cond=<optimized out>, opaque=0x7f0712304670) at qemu-char.c:1012
> #16 0x00007f070e04c9ba in g_main_context_dispatch () from /lib64/libglib-2.0.so.0
> #17 0x00007f070fe61678 in glib_pollfds_poll () at main-loop.c:199
> #18 os_host_main_loop_wait (timeout=<optimized out>) at main-loop.c:244
> #19 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:493
> #20 0x00007f070fc24e9e in main_loop () at vl.c:1901
> #21 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4649
> 
> Kevin
> 
>> diff --git a/migration/ram.c b/migration/ram.c
>> index 644f52a..9c0bcfe 100644
>> --- a/migration/ram.c
>> +++ b/migration/ram.c
>> @@ -494,6 +494,7 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
>>      return 1;
>>  }
>>
>> +/* Called with rcu_read_lock() to protect migration_bitmap */
>>  static inline
>>  ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>>                                                   ram_addr_t start)
>> @@ -502,26 +503,31 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>>      unsigned long nr = base + (start >> TARGET_PAGE_BITS);
>>      uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
>>      unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
>> +    unsigned long *bitmap;
>>
>>      unsigned long next;
>>
>> +    bitmap = atomic_rcu_read(&migration_bitmap);
>>      if (ram_bulk_stage && nr > base) {
>>          next = nr + 1;
>>      } else {
>> -        next = find_next_bit(migration_bitmap, size, nr);
>> +        next = find_next_bit(bitmap, size, nr);
>>      }
>>
>>      if (next < size) {
>> -        clear_bit(next, migration_bitmap);
>> +        clear_bit(next, bitmap);
>>          migration_dirty_pages--;
>>      }
>>      return (next - base) << TARGET_PAGE_BITS;
>>  }
>>
>> +/* Called with rcu_read_lock() to protect migration_bitmap */
>>  static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
>>  {
>> +    unsigned long *bitmap;
>> +    bitmap = atomic_rcu_read(&migration_bitmap);
>>      migration_dirty_pages +=
>> -        cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length);
>> +        cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
>>  }
>>
>>
>> @@ -1017,10 +1023,15 @@ void free_xbzrle_decoded_buf(void)
>>
>>  static void migration_end(void)
>>  {
>> -    if (migration_bitmap) {
>> +    /* caller have hold iothread lock or is in a bh, so there is
>> +     * no writing race against this migration_bitmap
>> +     */
>> +    unsigned long *bitmap = migration_bitmap;
>> +    atomic_rcu_set(&migration_bitmap, NULL);
>> +    if (bitmap) {
>>          memory_global_dirty_log_stop();
>> -        g_free(migration_bitmap);
>> -        migration_bitmap = NULL;
>> +        synchronize_rcu();
>> +        g_free(bitmap);
>>      }
>>
>>      XBZRLE_cache_lock();
>> -- 
>> 2.4.3
>>
>>
> 
>
Wen Congyang July 9, 2015, 1:19 a.m. UTC | #3
On 07/09/2015 04:35 AM, Paolo Bonzini wrote:
> 
> 
> On 08/07/2015 21:13, Kevin Wolf wrote:
>> Am 07.07.2015 um 15:09 hat Juan Quintela geschrieben:
>>> From: Li Zhijian <lizhijian@cn.fujitsu.com>
>>>
>>> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
>>> Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
>>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>>> ---
>>>  migration/ram.c | 23 +++++++++++++++++------
>>>  1 file changed, 17 insertions(+), 6 deletions(-)
>>
>> In current master, HMP 'savevm' is broken (looks like a deadlock in RCU
>> code, it just hangs indefinitely). git bisect points to this patch.
> 
> This looks like synchronize_rcu() is being called within
> rcu_read_lock()/rcu_read_unlock().
> 
> The easiest fix is to somehow use call_rcu, but I haven't looked at the
> code very well.

Yes, why migration doesn't trigger this problem? We will fix it soon.

Thanks
Wen Congyang

> 
> I found another embarrassing bug in the RCU code, but it's been there
> forever and can wait for after -rc0 (and it wasn't really a problem
> until BQL-less MMIO was merged a couple days ago).
> 
> Paolo
> 
>> The stack trace looks like this:
>>
>> (gdb) thread apply all bt
>>
>> Thread 3 (Thread 0x7f06febfe700 (LWP 5717)):
>> #0  0x00007f070e749f7d in __lll_lock_wait () from /lib64/libpthread.so.0
>> #1  0x00007f070e745d32 in _L_lock_791 () from /lib64/libpthread.so.0
>> #2  0x00007f070e745c38 in pthread_mutex_lock () from /lib64/libpthread.so.0
>> #3  0x00007f070fed8bc9 in qemu_mutex_lock (mutex=mutex@entry=0x7f07107e6700 <rcu_gp_lock>) at util/qemu-thread-posix.c:73
>> #4  0x00007f070fee7631 in synchronize_rcu () at util/rcu.c:129
>> #5  0x00007f070fee77d9 in call_rcu_thread (opaque=<optimized out>) at util/rcu.c:240
>> #6  0x00007f070e743df5 in start_thread () from /lib64/libpthread.so.0
>> #7  0x00007f07066ab1ad in clone () from /lib64/libc.so.6
>>
>> Thread 2 (Thread 0x7f06f940f700 (LWP 5719)):
>> #0  0x00007f070e749f7d in __lll_lock_wait () from /lib64/libpthread.so.0
>> #1  0x00007f070e74c4ec in _L_cond_lock_792 () from /lib64/libpthread.so.0
>> #2  0x00007f070e74c3c8 in __pthread_mutex_cond_lock () from /lib64/libpthread.so.0
>> #3  0x00007f070e747795 in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
>> #4  0x00007f070fed8ca9 in qemu_cond_wait (cond=<optimized out>, mutex=mutex@entry=0x7f07103b0400 <qemu_global_mutex>) at util/qemu-thread-posix.c:132
>> #5  0x00007f070fc4daab in qemu_tcg_cpu_thread_fn (arg=<optimized out>) at /home/kwolf/source/qemu/cpus.c:1050
>> #6  0x00007f070e743df5 in start_thread () from /lib64/libpthread.so.0
>> #7  0x00007f07066ab1ad in clone () from /lib64/libc.so.6
>>
>> Thread 1 (Thread 0x7f070fb20bc0 (LWP 5716)):
>> #0  0x00007f07066a5949 in syscall () from /lib64/libc.so.6
>> #1  0x00007f070fed8fa2 in futex_wait (val=4294967295, ev=0x7f07107e66c0 <rcu_gp_event>) at util/qemu-thread-posix.c:301
>> #2  qemu_event_wait (ev=ev@entry=0x7f07107e66c0 <rcu_gp_event>) at util/qemu-thread-posix.c:399
>> #3  0x00007f070fee7713 in wait_for_readers () at util/rcu.c:120
>> #4  synchronize_rcu () at util/rcu.c:149
>> #5  0x00007f070fc6e0c2 in migration_end () at /home/kwolf/source/qemu/migration/ram.c:1033
>> #6  0x00007f070fc6ef23 in ram_save_complete (f=0x7f07122f9aa0, opaque=<optimized out>) at /home/kwolf/source/qemu/migration/ram.c:1241
>> #7  0x00007f070fc71d75 in qemu_savevm_state_complete (f=f@entry=0x7f07122f9aa0) at /home/kwolf/source/qemu/migration/savevm.c:836
>> #8  0x00007f070fc7298e in qemu_savevm_state (errp=0x7ffe2a081ff8, f=0x7f07122f9aa0) at /home/kwolf/source/qemu/migration/savevm.c:945
>> #9  hmp_savevm (mon=0x7f071233b500, qdict=<optimized out>) at /home/kwolf/source/qemu/migration/savevm.c:1353
>> #10 0x00007f070fc552d0 in handle_hmp_command (mon=mon@entry=0x7f071233b500, cmdline=0x7f0712350197 "foo") at /home/kwolf/source/qemu/monitor.c:4058
>> #11 0x00007f070fc56467 in monitor_command_cb (opaque=0x7f071233b500, cmdline=<optimized out>, readline_opaque=<optimized out>)
>>     at /home/kwolf/source/qemu/monitor.c:5081
>> #12 0x00007f070fee6dbf in readline_handle_byte (rs=0x7f0712350190, ch=<optimized out>) at util/readline.c:391
>> #13 0x00007f070fc55387 in monitor_read (opaque=<optimized out>, buf=<optimized out>, size=<optimized out>) at /home/kwolf/source/qemu/monitor.c:5064
>> #14 0x00007f070fd17b21 in qemu_chr_be_write (len=<optimized out>, buf=0x7ffe2a082640 "\n\331\367\325\001\200\377\377\200&\b*\376\177", s=0x7f0712304670)
>>     at qemu-char.c:306
>> #15 fd_chr_read (chan=<optimized out>, cond=<optimized out>, opaque=0x7f0712304670) at qemu-char.c:1012
>> #16 0x00007f070e04c9ba in g_main_context_dispatch () from /lib64/libglib-2.0.so.0
>> #17 0x00007f070fe61678 in glib_pollfds_poll () at main-loop.c:199
>> #18 os_host_main_loop_wait (timeout=<optimized out>) at main-loop.c:244
>> #19 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:493
>> #20 0x00007f070fc24e9e in main_loop () at vl.c:1901
>> #21 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4649
>>
>> Kevin
>>
>>> diff --git a/migration/ram.c b/migration/ram.c
>>> index 644f52a..9c0bcfe 100644
>>> --- a/migration/ram.c
>>> +++ b/migration/ram.c
>>> @@ -494,6 +494,7 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
>>>      return 1;
>>>  }
>>>
>>> +/* Called with rcu_read_lock() to protect migration_bitmap */
>>>  static inline
>>>  ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>>>                                                   ram_addr_t start)
>>> @@ -502,26 +503,31 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>>>      unsigned long nr = base + (start >> TARGET_PAGE_BITS);
>>>      uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
>>>      unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
>>> +    unsigned long *bitmap;
>>>
>>>      unsigned long next;
>>>
>>> +    bitmap = atomic_rcu_read(&migration_bitmap);
>>>      if (ram_bulk_stage && nr > base) {
>>>          next = nr + 1;
>>>      } else {
>>> -        next = find_next_bit(migration_bitmap, size, nr);
>>> +        next = find_next_bit(bitmap, size, nr);
>>>      }
>>>
>>>      if (next < size) {
>>> -        clear_bit(next, migration_bitmap);
>>> +        clear_bit(next, bitmap);
>>>          migration_dirty_pages--;
>>>      }
>>>      return (next - base) << TARGET_PAGE_BITS;
>>>  }
>>>
>>> +/* Called with rcu_read_lock() to protect migration_bitmap */
>>>  static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
>>>  {
>>> +    unsigned long *bitmap;
>>> +    bitmap = atomic_rcu_read(&migration_bitmap);
>>>      migration_dirty_pages +=
>>> -        cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length);
>>> +        cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
>>>  }
>>>
>>>
>>> @@ -1017,10 +1023,15 @@ void free_xbzrle_decoded_buf(void)
>>>
>>>  static void migration_end(void)
>>>  {
>>> -    if (migration_bitmap) {
>>> +    /* caller have hold iothread lock or is in a bh, so there is
>>> +     * no writing race against this migration_bitmap
>>> +     */
>>> +    unsigned long *bitmap = migration_bitmap;
>>> +    atomic_rcu_set(&migration_bitmap, NULL);
>>> +    if (bitmap) {
>>>          memory_global_dirty_log_stop();
>>> -        g_free(migration_bitmap);
>>> -        migration_bitmap = NULL;
>>> +        synchronize_rcu();
>>> +        g_free(bitmap);
>>>      }
>>>
>>>      XBZRLE_cache_lock();
>>> -- 
>>> 2.4.3
>>>
>>>
>>
>>
> 
> .
>
diff mbox

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 644f52a..9c0bcfe 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -494,6 +494,7 @@  static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
     return 1;
 }

+/* Called with rcu_read_lock() to protect migration_bitmap */
 static inline
 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
                                                  ram_addr_t start)
@@ -502,26 +503,31 @@  ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
     uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
     unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
+    unsigned long *bitmap;

     unsigned long next;

+    bitmap = atomic_rcu_read(&migration_bitmap);
     if (ram_bulk_stage && nr > base) {
         next = nr + 1;
     } else {
-        next = find_next_bit(migration_bitmap, size, nr);
+        next = find_next_bit(bitmap, size, nr);
     }

     if (next < size) {
-        clear_bit(next, migration_bitmap);
+        clear_bit(next, bitmap);
         migration_dirty_pages--;
     }
     return (next - base) << TARGET_PAGE_BITS;
 }

+/* Called with rcu_read_lock() to protect migration_bitmap */
 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
 {
+    unsigned long *bitmap;
+    bitmap = atomic_rcu_read(&migration_bitmap);
     migration_dirty_pages +=
-        cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length);
+        cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
 }


@@ -1017,10 +1023,15 @@  void free_xbzrle_decoded_buf(void)

 static void migration_end(void)
 {
-    if (migration_bitmap) {
+    /* caller have hold iothread lock or is in a bh, so there is
+     * no writing race against this migration_bitmap
+     */
+    unsigned long *bitmap = migration_bitmap;
+    atomic_rcu_set(&migration_bitmap, NULL);
+    if (bitmap) {
         memory_global_dirty_log_stop();
-        g_free(migration_bitmap);
-        migration_bitmap = NULL;
+        synchronize_rcu();
+        g_free(bitmap);
     }

     XBZRLE_cache_lock();