diff mbox

[1/3] colo-compare: serialize compare thread's initialization with main thread

Message ID 1492674416-9408-2-git-send-email-zhang.zhanghailiang@huawei.com
State New
Headers show

Commit Message

Zhanghailiang April 20, 2017, 7:46 a.m. UTC
We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
to detach watched fd from default main context, so it has chance to
handle the same watched fd with main thread concurrently, which will
trigger an error report:
"qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src == ((void *)0)' failed."

Fix it by serializing compare thread's initialization with main thread.

Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
---
 net/colo-compare.c | 6 ++++++
 1 file changed, 6 insertions(+)

Comments

Jason Wang April 24, 2017, 4:10 a.m. UTC | #1
On 2017年04月20日 15:46, zhanghailiang wrote:
> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
> to detach watched fd from default main context, so it has chance to
> handle the same watched fd with main thread concurrently, which will
> trigger an error report:
> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src == ((void *)0)' failed."

Anyway to prevent fd from being handled by main thread before creating 
colo thread? Using semaphore seems not elegant.

Thanks

>
> Fix it by serializing compare thread's initialization with main thread.
>
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
> ---
>   net/colo-compare.c | 6 ++++++
>   1 file changed, 6 insertions(+)
>
> diff --git a/net/colo-compare.c b/net/colo-compare.c
> index 54e6d40..a6bf419 100644
> --- a/net/colo-compare.c
> +++ b/net/colo-compare.c
> @@ -83,6 +83,7 @@ typedef struct CompareState {
>       GHashTable *connection_track_table;
>       /* compare thread, a thread for each NIC */
>       QemuThread thread;
> +    QemuSemaphore thread_ready;
>   
>       GMainContext *worker_context;
>       GMainLoop *compare_loop;
> @@ -557,6 +558,8 @@ static void *colo_compare_thread(void *opaque)
>                             (GSourceFunc)check_old_packet_regular, s, NULL);
>       g_source_attach(timeout_source, s->worker_context);
>   
> +    qemu_sem_post(&s->thread_ready);
> +
>       g_main_loop_run(s->compare_loop);
>   
>       g_source_unref(timeout_source);
> @@ -707,12 +710,15 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp)
>                                                         connection_key_equal,
>                                                         g_free,
>                                                         connection_destroy);
> +    qemu_sem_init(&s->thread_ready, 0);
>   
>       sprintf(thread_name, "colo-compare %d", compare_id);
>       qemu_thread_create(&s->thread, thread_name,
>                          colo_compare_thread, s,
>                          QEMU_THREAD_JOINABLE);
>       compare_id++;
> +    qemu_sem_wait(&s->thread_ready);
> +    qemu_sem_destroy(&s->thread_ready);
>   
>       return;
>   }
Zhanghailiang April 24, 2017, 6:03 a.m. UTC | #2
On 2017/4/24 12:10, Jason Wang wrote:
>
> On 2017年04月20日 15:46, zhanghailiang wrote:
>> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
>> to detach watched fd from default main context, so it has chance to
>> handle the same watched fd with main thread concurrently, which will
>> trigger an error report:
>> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src == ((void *)0)' failed."
> Anyway to prevent fd from being handled by main thread before creating
> colo thread? Using semaphore seems not elegant.

So how about calling qemu_mutex_lock_iothread() before qemu_chr_fe_set_handlers() ?

> Thanks
>
>> Fix it by serializing compare thread's initialization with main thread.
>>
>> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
>> ---
>>    net/colo-compare.c | 6 ++++++
>>    1 file changed, 6 insertions(+)
>>
>> diff --git a/net/colo-compare.c b/net/colo-compare.c
>> index 54e6d40..a6bf419 100644
>> --- a/net/colo-compare.c
>> +++ b/net/colo-compare.c
>> @@ -83,6 +83,7 @@ typedef struct CompareState {
>>        GHashTable *connection_track_table;
>>        /* compare thread, a thread for each NIC */
>>        QemuThread thread;
>> +    QemuSemaphore thread_ready;
>>    
>>        GMainContext *worker_context;
>>        GMainLoop *compare_loop;
>> @@ -557,6 +558,8 @@ static void *colo_compare_thread(void *opaque)
>>                              (GSourceFunc)check_old_packet_regular, s, NULL);
>>        g_source_attach(timeout_source, s->worker_context);
>>    
>> +    qemu_sem_post(&s->thread_ready);
>> +
>>        g_main_loop_run(s->compare_loop);
>>    
>>        g_source_unref(timeout_source);
>> @@ -707,12 +710,15 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp)
>>                                                          connection_key_equal,
>>                                                          g_free,
>>                                                          connection_destroy);
>> +    qemu_sem_init(&s->thread_ready, 0);
>>    
>>        sprintf(thread_name, "colo-compare %d", compare_id);
>>        qemu_thread_create(&s->thread, thread_name,
>>                           colo_compare_thread, s,
>>                           QEMU_THREAD_JOINABLE);
>>        compare_id++;
>> +    qemu_sem_wait(&s->thread_ready);
>> +    qemu_sem_destroy(&s->thread_ready);
>>    
>>        return;
>>    }
>
> .
>
Jason Wang April 25, 2017, 8:41 a.m. UTC | #3
On 2017年04月24日 14:03, Hailiang Zhang wrote:
> On 2017/4/24 12:10, Jason Wang wrote:
>>
>> On 2017年04月20日 15:46, zhanghailiang wrote:
>>> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
>>> to detach watched fd from default main context, so it has chance to
>>> handle the same watched fd with main thread concurrently, which will
>>> trigger an error report:
>>> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src == 
>>> ((void *)0)' failed."
>> Anyway to prevent fd from being handled by main thread before creating
>> colo thread? Using semaphore seems not elegant.
>
> So how about calling qemu_mutex_lock_iothread() before 
> qemu_chr_fe_set_handlers() ?

Looks better, but I needs more information e.g how main thread can touch it?

Thanks
Zhanghailiang April 25, 2017, 9:59 a.m. UTC | #4
On 2017/4/25 16:41, Jason Wang wrote:
>
> On 2017年04月24日 14:03, Hailiang Zhang wrote:
>> On 2017/4/24 12:10, Jason Wang wrote:
>>> On 2017年04月20日 15:46, zhanghailiang wrote:
>>>> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
>>>> to detach watched fd from default main context, so it has chance to
>>>> handle the same watched fd with main thread concurrently, which will
>>>> trigger an error report:
>>>> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src ==
>>>> ((void *)0)' failed."
>>> Anyway to prevent fd from being handled by main thread before creating
>>> colo thread? Using semaphore seems not elegant.
>> So how about calling qemu_mutex_lock_iothread() before
>> qemu_chr_fe_set_handlers() ?
> Looks better, but I needs more information e.g how main thread can touch it?

Hmm, this happened quite occasionally, and we didn't catch the first place (backtrace)
of removing fd from been watched, but  from the codes logic, we found there should
be such possible cases:
tcp_chr_write (Or tcp_chr_read/tcp_chr_sync_read/chr_disconnect)
  ->tcp_chr_disconnect (Or char_socket_finalize)
     ->tcp_chr_free_connection
       -> remove_fd_in_watch(chr);

Anyway, it needs the protection from been freed twice.

Thanks,
Hailiang
> Thanks
>
> .
>
Jason Wang April 25, 2017, 11:33 a.m. UTC | #5
On 2017年04月25日 17:59, Hailiang Zhang wrote:
> On 2017/4/25 16:41, Jason Wang wrote:
>>
>> On 2017年04月24日 14:03, Hailiang Zhang wrote:
>>> On 2017/4/24 12:10, Jason Wang wrote:
>>>> On 2017年04月20日 15:46, zhanghailiang wrote:
>>>>> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
>>>>> to detach watched fd from default main context, so it has chance to
>>>>> handle the same watched fd with main thread concurrently, which will
>>>>> trigger an error report:
>>>>> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src ==
>>>>> ((void *)0)' failed."
>>>> Anyway to prevent fd from being handled by main thread before creating
>>>> colo thread? Using semaphore seems not elegant.
>>> So how about calling qemu_mutex_lock_iothread() before
>>> qemu_chr_fe_set_handlers() ?
>> Looks better, but I needs more information e.g how main thread can 
>> touch it?
>
> Hmm, this happened quite occasionally, and we didn't catch the first 
> place (backtrace)
> of removing fd from been watched, but  from the codes logic, we found 
> there should
> be such possible cases:
> tcp_chr_write (Or tcp_chr_read/tcp_chr_sync_read/chr_disconnect)
>  ->tcp_chr_disconnect (Or char_socket_finalize)
>     ->tcp_chr_free_connection
>       -> remove_fd_in_watch(chr);
>
> Anyway, it needs the protection from been freed twice.
>
> Thanks,
> Hailiang

Still a little bit confused. The question is how could main thread still 
call tcp_chr_write or other in the above case?

Thanks

>> Thanks
>>
>> .
>>
>
>
Zhanghailiang April 26, 2017, 7:51 a.m. UTC | #6
On 2017/4/25 19:33, Jason Wang wrote:
>
> On 2017年04月25日 17:59, Hailiang Zhang wrote:
>> On 2017/4/25 16:41, Jason Wang wrote:
>>> On 2017年04月24日 14:03, Hailiang Zhang wrote:
>>>> On 2017/4/24 12:10, Jason Wang wrote:
>>>>> On 2017年04月20日 15:46, zhanghailiang wrote:
>>>>>> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
>>>>>> to detach watched fd from default main context, so it has chance to
>>>>>> handle the same watched fd with main thread concurrently, which will
>>>>>> trigger an error report:
>>>>>> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src ==
>>>>>> ((void *)0)' failed."
>>>>> Anyway to prevent fd from being handled by main thread before creating
>>>>> colo thread? Using semaphore seems not elegant.
>>>> So how about calling qemu_mutex_lock_iothread() before
>>>> qemu_chr_fe_set_handlers() ?
>>> Looks better, but I needs more information e.g how main thread can
>>> touch it?
>> Hmm, this happened quite occasionally, and we didn't catch the first
>> place (backtrace)
>> of removing fd from been watched, but  from the codes logic, we found
>> there should
>> be such possible cases:
>> tcp_chr_write (Or tcp_chr_read/tcp_chr_sync_read/chr_disconnect)
>>   ->tcp_chr_disconnect (Or char_socket_finalize)
>>      ->tcp_chr_free_connection
>>        -> remove_fd_in_watch(chr);
>>
>> Anyway, it needs the protection from been freed twice.
>>
>> Thanks,
>> Hailiang
> Still a little bit confused. The question is how could main thread still
> call tcp_chr_write or other in the above case?

The 'char_socekt_finalize' ? Hmm, I'd better to reproduce it again to get the first
time of removing the fd been watched...

> Thanks
>
>>> Thanks
>>>
>>> .
>>>
>>
>
> .
>
Zhanghailiang May 4, 2017, 2:51 a.m. UTC | #7
Hi Jason,

On 2017/4/25 19:33, Jason Wang wrote:
>
> On 2017年04月25日 17:59, Hailiang Zhang wrote:
>> On 2017/4/25 16:41, Jason Wang wrote:
>>> On 2017年04月24日 14:03, Hailiang Zhang wrote:
>>>> On 2017/4/24 12:10, Jason Wang wrote:
>>>>> On 2017年04月20日 15:46, zhanghailiang wrote:
>>>>>> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is used
>>>>>> to detach watched fd from default main context, so it has chance to
>>>>>> handle the same watched fd with main thread concurrently, which will
>>>>>> trigger an error report:
>>>>>> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src ==
>>>>>> ((void *)0)' failed."
>>>>> Anyway to prevent fd from being handled by main thread before creating
>>>>> colo thread? Using semaphore seems not elegant.
>>>> So how about calling qemu_mutex_lock_iothread() before
>>>> qemu_chr_fe_set_handlers() ?
>>> Looks better, but I needs more information e.g how main thread can
>>> touch it?
>> Hmm, this happened quite occasionally, and we didn't catch the first
>> place (backtrace)
>> of removing fd from been watched, but  from the codes logic, we found
>> there should
>> be such possible cases:
>> tcp_chr_write (Or tcp_chr_read/tcp_chr_sync_read/chr_disconnect)
>>   ->tcp_chr_disconnect (Or char_socket_finalize)
>>      ->tcp_chr_free_connection
>>        -> remove_fd_in_watch(chr);
>>
>> Anyway, it needs the protection from been freed twice.
>>
>> Thanks,
>> Hailiang
> Still a little bit confused. The question is how could main thread still
> call tcp_chr_write or other in the above case?

Finally, we reproduced this bug (We use qemu 2.6), and got the follow backtrace of this problem:

(gdb) thread apply all bt

Thread 7 (Thread 0x7f407a1ff700 (LWP 23144)):
#0  0x00007f41037e0db5 in _int_malloc () from /usr/lib64/libc.so.6
#1  0x00007f41037e3b96 in calloc () from /usr/lib64/libc.so.6
#2  0x00007f41041ad4d7 in g_malloc0 () from /usr/lib64/libglib-2.0.so.0
#3  0x00007f41041a5437 in g_source_new () from /usr/lib64/libglib-2.0.so.0
#4  0x00007f410a2cec9c in qio_channel_create_fd_watch (ioc=ioc@entry=0x7f410d6238c0, fd=20, condition=condition@entry=
     (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL)) at io/channel-watch.c:259
#5  0x00007f410a2ced01 in qio_channel_create_socket_watch (ioc=ioc@entry=0x7f410d6238c0, socket=<optimized out>,
     condition=condition@entry=(G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL)) at io/channel-watch.c:311
#6  0x00007f410a2cbea7 in qio_channel_socket_create_watch (ioc=0x7f410d6238c0, condition=(G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL))
     at io/channel-socket.c:732
#7  0x00007f410a2c94d2 in qio_channel_create_watch (ioc=0x7f410d6238c0, condition=condition@entry=
     (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL)) at io/channel.c:132
#8  0x00007f410a003cd6 in io_watch_poll_prepare (source=0x7f4070000d00, timeout_=<optimized out>) at qemu-char.c:883
#9  0x00007f41041a72ed in g_main_context_prepare () from /usr/lib64/libglib-2.0.so.0
#10 0x00007f41041a7b7b in g_main_context_iterate.isra.24 () from /usr/lib64/libglib-2.0.so.0
#11 0x00007f41041a7fba in g_main_loop_run () from /usr/lib64/libglib-2.0.so.0
#12 0x00007f410a1e528f in colo_compare_thread (opaque=0x7f410d7d6800) at net/colo-compare.c:651
#13 0x00007f4103b2bdc5 in start_thread () from /usr/lib64/libpthread.so.0
#14 0x00007f410385971d in clone () from /usr/lib64/libc.so.6

Thread 6 (Thread 0x7f40799fe700 (LWP 19368)):
#0  0x00007f4103b2f6d5 in pthread_cond_wait@@GLIBC_2.3.2 () from /usr/lib64/libpthread.so.0
#1  0x00007f410a3138d1 in qemu_cond_wait (cond=cond@entry=0x7f410cce44c0, mutex=mutex@entry=0x7f410cce44f0)
     at util/qemu-thread-posix.c:132
---Type <return> to continue, or q <return> to quit---
#2  0x00007f410a22b1a3 in vnc_worker_thread_loop (queue=queue@entry=0x7f410cce44c0) at ui/vnc-jobs.c:228
#3  0x00007f410a22b810 in vnc_worker_thread (arg=0x7f410cce44c0) at ui/vnc-jobs.c:335
#4  0x00007f4103b2bdc5 in start_thread () from /usr/lib64/libpthread.so.0
#5  0x00007f410385971d in clone () from /usr/lib64/libc.so.6

Thread 5 (Thread 0x7f407abff700 (LWP 19366)):
#0  0x00007f4103b2f6d5 in pthread_cond_wait@@GLIBC_2.3.2 () from /usr/lib64/libpthread.so.0
#1  0x00007f410a3138d1 in qemu_cond_wait (cond=cond@entry=0x7f410a9fc368 <mlock_struct+40>,
     mutex=mutex@entry=0x7f410a9fc340 <mlock_struct>) at util/qemu-thread-posix.c:132
#2  0x00007f4109e99060 in mlock_wait () at /work/zhanghailiang/qemu-kvm/exec.c:392
#3  mlock_thread (opaque=<optimized out>) at /work/zhanghailiang/qemu-kvm/exec.c:407
#4  0x00007f4103b2bdc5 in start_thread () from /usr/lib64/libpthread.so.0
#5  0x00007f410385971d in clone () from /usr/lib64/libc.so.6

Thread 4 (Thread 0x7f40fcd83700 (LWP 19364)):
#0  0x00007f4103b2f6d5 in pthread_cond_wait@@GLIBC_2.3.2 () from /usr/lib64/libpthread.so.0
#1  0x00007f410a3138d1 in qemu_cond_wait (cond=<optimized out>, mutex=mutex@entry=0x7f410aa66ca0 <qemu_global_mutex>)
     at util/qemu-thread-posix.c:132
#2  0x00007f4109ed5b3b in qemu_kvm_wait_io_event (cpu=0x7f410c2bda30) at /work/zhanghailiang/qemu-kvm/cpus.c:1087
#3  qemu_kvm_cpu_thread_fn (arg=0x7f410c2bda30) at /work/zhanghailiang/qemu-kvm/cpus.c:1126
#4  0x00007f4103b2bdc5 in start_thread () from /usr/lib64/libpthread.so.0
#5  0x00007f410385971d in clone () from /usr/lib64/libc.so.6

Thread 3 (Thread 0x7f40fd584700 (LWP 19363)):
#0  0x00007f4103b2f6d5 in pthread_cond_wait@@GLIBC_2.3.2 () from /usr/lib64/libpthread.so.0
#1  0x00007f410a3138d1 in qemu_cond_wait (cond=<optimized out>, mutex=mutex@entry=0x7f410aa66ca0 <qemu_global_mutex>)
---Type <return> to continue, or q <return> to quit---
     at util/qemu-thread-posix.c:132
#2  0x00007f4109ed5b3b in qemu_kvm_wait_io_event (cpu=0x7f410c24e690) at /work/zhanghailiang/qemu-kvm/cpus.c:1087
#3  qemu_kvm_cpu_thread_fn (arg=0x7f410c24e690) at /work/zhanghailiang/qemu-kvm/cpus.c:1126
#4  0x00007f4103b2bdc5 in start_thread () from /usr/lib64/libpthread.so.0
#5  0x00007f410385971d in clone () from /usr/lib64/libc.so.6

Thread 2 (Thread 0x7f40fde76700 (LWP 19311)):
#0  0x00007f4103853e99 in syscall () from /usr/lib64/libc.so.6
#1  0x00007f410a313d52 in futex_wait (val=4294967295, ev=0x7f410afe4fc8 <rcu_call_ready_event>) at util/qemu-thread-posix.c:301
#2  qemu_event_wait (ev=ev@entry=0x7f410afe4fc8 <rcu_call_ready_event>) at util/qemu-thread-posix.c:408
#3  0x00007f410a329846 in call_rcu_thread (opaque=<optimized out>) at util/rcu.c:250
#4  0x00007f4103b2bdc5 in start_thread () from /usr/lib64/libpthread.so.0
#5  0x00007f410385971d in clone () from /usr/lib64/libc.so.6

Thread 1 (Thread 0x7f4109b73bc0 (LWP 19310)):
#0  0x00007f41037985d7 in raise () from /usr/lib64/libc.so.6
#1  0x00007f4103799cc8 in abort () from /usr/lib64/libc.so.6
#2  0x00007f4103791546 in __assert_fail_base () from /usr/lib64/libc.so.6
#3  0x00007f41037915f2 in __assert_fail () from /usr/lib64/libc.so.6
#4  0x00007f410a003778 in io_watch_poll_finalize (source=<optimized out>) at qemu-char.c:919
#5  0x00007f41041a4de2 in g_source_unref_internal () from /usr/lib64/libglib-2.0.so.0
#6  0x00007f41041a4fee in g_source_iter_next () from /usr/lib64/libglib-2.0.so.0
#7  0x00007f41041a728b in g_main_context_prepare () from /usr/lib64/libglib-2.0.so.0
#8  0x00007f410a23fdca in glib_pollfds_fill (cur_timeout=<synthetic pointer>) at main-loop.c:196
#9  os_host_main_loop_wait (timeout=2491118968) at main-loop.c:235
#10 main_loop_wait (nonblocking=<optimized out>) at main-loop.c:517
---Type <return> to continue, or q <return> to quit---
#11 0x00007f4109e890fd in main_loop () at vl.c:2202
#12 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:5124

In this case,  main thread and colo compare thread process the same GSource concurrently.
(I still didn't figure out why g_main_context_prepare call finalize callback here).

Thanks,


> Thanks
>
>>> Thanks
>>>
>>> .
>>>
>>
>
> .
>
Jason Wang May 5, 2017, 3:03 a.m. UTC | #8
On 2017年05月04日 10:51, Hailiang Zhang wrote:
> Hi Jason,
>
> On 2017/4/25 19:33, Jason Wang wrote:
>>
>> On 2017年04月25日 17:59, Hailiang Zhang wrote:
>>> On 2017/4/25 16:41, Jason Wang wrote:
>>>> On 2017年04月24日 14:03, Hailiang Zhang wrote:
>>>>> On 2017/4/24 12:10, Jason Wang wrote:
>>>>>> On 2017年04月20日 15:46, zhanghailiang wrote:
>>>>>>> We call qemu_chr_fe_set_handlers() in colo-compare thread, it is 
>>>>>>> used
>>>>>>> to detach watched fd from default main context, so it has chance to
>>>>>>> handle the same watched fd with main thread concurrently, which 
>>>>>>> will
>>>>>>> trigger an error report:
>>>>>>> "qemu-char.c:918: io_watch_poll_finalize: Assertion `iwp->src ==
>>>>>>> ((void *)0)' failed."
>>>>>> Anyway to prevent fd from being handled by main thread before 
>>>>>> creating
>>>>>> colo thread? Using semaphore seems not elegant.
>>>>> So how about calling qemu_mutex_lock_iothread() before
>>>>> qemu_chr_fe_set_handlers() ?
>>>> Looks better, but I needs more information e.g how main thread can
>>>> touch it?
>>> Hmm, this happened quite occasionally, and we didn't catch the first
>>> place (backtrace)
>>> of removing fd from been watched, but  from the codes logic, we found
>>> there should
>>> be such possible cases:
>>> tcp_chr_write (Or tcp_chr_read/tcp_chr_sync_read/chr_disconnect)
>>>   ->tcp_chr_disconnect (Or char_socket_finalize)
>>>      ->tcp_chr_free_connection
>>>        -> remove_fd_in_watch(chr);
>>>
>>> Anyway, it needs the protection from been freed twice.
>>>
>>> Thanks,
>>> Hailiang
>> Still a little bit confused. The question is how could main thread still
>> call tcp_chr_write or other in the above case?
>
> Finally, we reproduced this bug (We use qemu 2.6), and got the follow 
> backtrace of this problem:
>
> (gdb) thread apply all bt
>
> Thread 7 (Thread 0x7f407a1ff700 (LWP 23144)):
> #0  0x00007f41037e0db5 in _int_malloc () from /usr/lib64/libc.so.6
> #1  0x00007f41037e3b96 in calloc () from /usr/lib64/libc.so.6
> #2  0x00007f41041ad4d7 in g_malloc0 () from /usr/lib64/libglib-2.0.so.0
> #3  0x00007f41041a5437 in g_source_new () from 
> /usr/lib64/libglib-2.0.so.0
> #4  0x00007f410a2cec9c in qio_channel_create_fd_watch 
> (ioc=ioc@entry=0x7f410d6238c0, fd=20, condition=condition@entry=
>     (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL)) at 
> io/channel-watch.c:259
> #5  0x00007f410a2ced01 in qio_channel_create_socket_watch 
> (ioc=ioc@entry=0x7f410d6238c0, socket=<optimized out>,
>     condition=condition@entry=(G_IO_IN | G_IO_ERR | G_IO_HUP | 
> G_IO_NVAL)) at io/channel-watch.c:311
> #6  0x00007f410a2cbea7 in qio_channel_socket_create_watch 
> (ioc=0x7f410d6238c0, condition=(G_IO_IN | G_IO_ERR | G_IO_HUP | 
> G_IO_NVAL))
>     at io/channel-socket.c:732
> #7  0x00007f410a2c94d2 in qio_channel_create_watch 
> (ioc=0x7f410d6238c0, condition=condition@entry=
>     (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL)) at io/channel.c:132
> #8  0x00007f410a003cd6 in io_watch_poll_prepare 
> (source=0x7f4070000d00, timeout_=<optimized out>) at qemu-char.c:883
> #9  0x00007f41041a72ed in g_main_context_prepare () from 
> /usr/lib64/libglib-2.0.so.0
> #10 0x00007f41041a7b7b in g_main_context_iterate.isra.24 () from 
> /usr/lib64/libglib-2.0.so.0
> #11 0x00007f41041a7fba in g_main_loop_run () from 
> /usr/lib64/libglib-2.0.so.0
> #12 0x00007f410a1e528f in colo_compare_thread (opaque=0x7f410d7d6800) 
> at net/colo-compare.c:651
> #13 0x00007f4103b2bdc5 in start_thread () from /usr/lib64/libpthread.so.0
> #14 0x00007f410385971d in clone () from /usr/lib64/libc.so.6 

It looks like we use main context which is wrong, maybe you can track 
io_add_watch_poll() and its caller get the reason.

Thanks
diff mbox

Patch

diff --git a/net/colo-compare.c b/net/colo-compare.c
index 54e6d40..a6bf419 100644
--- a/net/colo-compare.c
+++ b/net/colo-compare.c
@@ -83,6 +83,7 @@  typedef struct CompareState {
     GHashTable *connection_track_table;
     /* compare thread, a thread for each NIC */
     QemuThread thread;
+    QemuSemaphore thread_ready;
 
     GMainContext *worker_context;
     GMainLoop *compare_loop;
@@ -557,6 +558,8 @@  static void *colo_compare_thread(void *opaque)
                           (GSourceFunc)check_old_packet_regular, s, NULL);
     g_source_attach(timeout_source, s->worker_context);
 
+    qemu_sem_post(&s->thread_ready);
+
     g_main_loop_run(s->compare_loop);
 
     g_source_unref(timeout_source);
@@ -707,12 +710,15 @@  static void colo_compare_complete(UserCreatable *uc, Error **errp)
                                                       connection_key_equal,
                                                       g_free,
                                                       connection_destroy);
+    qemu_sem_init(&s->thread_ready, 0);
 
     sprintf(thread_name, "colo-compare %d", compare_id);
     qemu_thread_create(&s->thread, thread_name,
                        colo_compare_thread, s,
                        QEMU_THREAD_JOINABLE);
     compare_id++;
+    qemu_sem_wait(&s->thread_ready);
+    qemu_sem_destroy(&s->thread_ready);
 
     return;
 }