diff mbox

vnc: Fix qemu crash on vnc client disconnection

Message ID 33183CC9F5247A488A2544077AF19020815A091D@SZXEMA503-MBS.china.huawei.com
State New
Headers show

Commit Message

Gonglei (Arei) Oct. 24, 2013, 5:14 a.m. UTC
Hi,

I encount a qemu crash when the vnc client disconnection, and I got the next log:

qemu: qemu_mutex_lock: Invalid argument

and the backtrace listed:

Core was generated by `/mnt/sdd/gonglei/kvm/qemu-unstable/x86_64-softmmu/qemu-system-x86_64 -name suse'.
Program terminated with signal 6, Aborted.
#0  0x00007fab8498ed95 in raise () from /lib64/libc.so.6
(gdb) bt
#0  0x00007fab8498ed95 in raise () from /lib64/libc.so.6
#1  0x00007fab849902ab in abort () from /lib64/libc.so.6
#2  0x00007fab87c22915 in error_exit (err=22, msg=0x7fab87c97a70 <__func__.4762> "qemu_mutex_lock") at util/qemu-thread-posix.c:28
#3  0x00007fab87c22a19 in qemu_mutex_lock (mutex=0x7fab8858e688) at util/qemu-thread-posix.c:59
#4  0x00007fab87ae52ea in vnc_lock_output (vs=0x7fab885823f0) at ui/vnc-jobs.h:63
#5  0x00007fab87ae5217 in vnc_jobs_consume_buffer (vs=0x7fab885823f0) at ui/vnc-jobs.c:166
#6  0x00007fab87ae51dd in vnc_jobs_join (vs=0x7fab885823f0) at ui/vnc-jobs.c:159
#7  0x00007fab87aea776 in vnc_update_client_sync (vs=0x7fab885823f0, has_dirty=1) at ui/vnc.c:880
#8  0x00007fab87aea007 in vnc_dpy_copy (dcl=0x7fab8088f048, src_x=746, src_y=578, dst_x=772, dst_y=578, w=22, h=22) at ui/vnc.c:753
#9  0x00007fab87ac8df6 in dpy_gfx_copy (con=0x7fab885cdb40, src_x=746, src_y=578, dst_x=772, dst_y=578, w=22, h=22)
    at ui/console.c:1455
#10 0x00007fab87ac9fd7 in qemu_console_copy (con=0x7fab885cdb40, src_x=746, src_y=578, dst_x=772, dst_y=578, w=22, h=22)
    at ui/console.c:1837
#11 0x00007fab8799bd91 in cirrus_do_copy (s=0x7fab885ff450, dst=1228339, src=1228287, w=22, h=22) at hw/display/cirrus_vga.c:738
#12 0x00007fab8799bf03 in cirrus_bitblt_videotovideo_copy (s=0x7fab885ff450) at hw/display/cirrus_vga.c:757
#13 0x00007fab8799c48c in cirrus_bitblt_videotovideo (s=0x7fab885ff450) at hw/display/cirrus_vga.c:879
#14 0x00007fab8799cc00 in cirrus_bitblt_start (s=0x7fab885ff450) at hw/display/cirrus_vga.c:1020
#15 0x00007fab8799cfbb in cirrus_write_bitblt (s=0x7fab885ff450, reg_value=2) at hw/display/cirrus_vga.c:1041
#16 0x00007fab8799dedb in cirrus_vga_write_gr (s=0x7fab885ff450, reg_index=49, reg_value=2) at hw/display/cirrus_vga.c:1536
#17 0x00007fab8799e721 in cirrus_mmio_blt_write (s=0x7fab885ff450, address=64, value=2 '\002') at hw/display/cirrus_vga.c:1890
#18 0x00007fab879a068d in cirrus_mmio_write (opaque=0x7fab885ff450, addr=320, val=2, size=1) at hw/display/cirrus_vga.c:2670
#19 0x00007fab87b77921 in memory_region_write_accessor (mr=0x7fab8860fe90, addr=320, value=0x7fab818d5cc8, size=1, shift=0, mask=
    255) at /mnt/sdd/gonglei/kvm/qemu-unstable/memory.c:440
#20 0x00007fab87b77a5d in access_with_adjusted_size (addr=320, value=0x7fab818d5cc8, size=4, access_size_min=1, access_size_max=1, 
    access=0x7fab87b77898 <memory_region_write_accessor>, mr=0x7fab8860fe90) at /mnt/sdd/gonglei/kvm/qemu-unstable/memory.c:477
#21 0x00007fab87b7a8c0 in memory_region_dispatch_write (mr=0x7fab8860fe90, addr=320, data=18446744073709551362, size=4)
    at /mnt/sdd/gonglei/kvm/qemu-unstable/memory.c:984
#22 0x00007fab87b7e176 in io_mem_write (mr=0x7fab8860fe90, addr=320, val=18446744073709551362, size=4)
    at /mnt/sdd/gonglei/kvm/qemu-unstable/memory.c:1748
#23 0x00007fab87b0e91e in address_space_rw (as=0x7fab8848b960 <address_space_memory>, addr=4273832256, buf=
    0x7fab87830028 <Address 0x7fab87830028 out of bounds>, len=4, is_write=true) at /mnt/sdd/gonglei/kvm/qemu-unstable/exec.c:1963
#24 0x00007fab87b0eec0 in cpu_physical_memory_rw (addr=4273832256, buf=0x7fab87830028 <Address 0x7fab87830028 out of bounds>, len=
    4, is_write=1) at /mnt/sdd/gonglei/kvm/qemu-unstable/exec.c:2042
#25 0x00007fab87b74b47 in kvm_cpu_exec (cpu=0x7fab88520c20) at /mnt/sdd/gonglei/kvm/qemu-unstable/kvm-all.c:1673
#26 0x00007fab87b022e9 in qemu_kvm_cpu_thread_fn (arg=0x7fab88520c20) at /mnt/sdd/gonglei/kvm/qemu-unstable/cpus.c:785
#27 0x00007fab85b07f05 in start_thread () from /lib64/libpthread.so.0
#28 0x00007fab84a3353d in clone () from /lib64/libc.so.6

When Vnc client was disconnected, the vs->csock will be set to -1 in function vnc_disconnect_start. And on the next loop, in case of the function transfer:
vnc_dpy_copy-->vnc_update_client_sync-->vnc_update_client-->vnc_disconnect_finish(vs)
and
vnc_dpy_copy-->vnc_update_client_sync--> vnc_jobs_consume_buffer-->vnc_lock_output(vs)--> qemu_mutex_lock(&vs->output_mutex);
because the vs has been freed, the qemu_mutex_lock(&vs->output_mutex) will cause qemu abort.

The patch fixed the bug:

when the vs object be freed, function vnc_update_client return -1,
and vnc_update_client_sync do not deal with the situation avoid that
qemu abort.
Signed-off-by: Gonglei <arei.gonglei@huawei.com>

---
 ui/vnc.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

-- 
1.8.3.4

Best regards,
-Gonglei

Comments

Gerd Hoffmann Oct. 28, 2013, 7:52 a.m. UTC | #1
Hi,

> diff --git a/ui/vnc.c b/ui/vnc.c
> index 5601cc3..2177704 100644
> --- a/ui/vnc.c
> +++ b/ui/vnc.c
> @@ -876,7 +876,8 @@ static int find_and_clear_dirty_height(struct VncState *vs,
>  static int vnc_update_client_sync(VncState *vs, int has_dirty)
>  {
>      int ret = vnc_update_client(vs, has_dirty);
> -    vnc_jobs_join(vs);
> +    if (ret >= 0)
> +        vnc_jobs_join(vs);

What happens with any running jobs if you skip the jouin call here?

cheers,
  Gerd
Gonglei (Arei) Oct. 28, 2013, 8:47 a.m. UTC | #2
> -----Original Message-----

> From: Gerd Hoffmann [mailto:kraxel@redhat.com]

> Sent: Monday, October 28, 2013 3:53 PM

> To: Gonglei (Arei)

> Cc: qemu-devel@nongnu.org; Stefan Hajnoczi; Yanqiangjun; Luonengjun;

> Huangweidong (Hardware)

> Subject: Re: [Qemu-devel] [PATCH] vnc: Fix qemu crash on vnc client

> disconnection

> 

>   Hi,

> 

> > diff --git a/ui/vnc.c b/ui/vnc.c

> > index 5601cc3..2177704 100644

> > --- a/ui/vnc.c

> > +++ b/ui/vnc.c

> > @@ -876,7 +876,8 @@ static int find_and_clear_dirty_height(struct

> VncState *vs,

> >  static int vnc_update_client_sync(VncState *vs, int has_dirty)

> >  {

> >      int ret = vnc_update_client(vs, has_dirty);

> > -    vnc_jobs_join(vs);

> > +    if (ret >= 0)

> > +        vnc_jobs_join(vs);

> 

> What happens with any running jobs if you skip the jouin call here?


Hi, Gerd. The other jobs are unaffected, and other clients still work.

Best regards,
-Gonglei
Gerd Hoffmann Nov. 4, 2013, 12:50 p.m. UTC | #3
On Mo, 2013-10-28 at 08:47 +0000, Gonglei (Arei) wrote:
> > -----Original Message-----
> > From: Gerd Hoffmann [mailto:kraxel@redhat.com]
> > Sent: Monday, October 28, 2013 3:53 PM
> > To: Gonglei (Arei)
> > Cc: qemu-devel@nongnu.org; Stefan Hajnoczi; Yanqiangjun; Luonengjun;
> > Huangweidong (Hardware)
> > Subject: Re: [Qemu-devel] [PATCH] vnc: Fix qemu crash on vnc client
> > disconnection
> > 
> >   Hi,
> > 
> > > diff --git a/ui/vnc.c b/ui/vnc.c
> > > index 5601cc3..2177704 100644
> > > --- a/ui/vnc.c
> > > +++ b/ui/vnc.c
> > > @@ -876,7 +876,8 @@ static int find_and_clear_dirty_height(struct
> > VncState *vs,
> > >  static int vnc_update_client_sync(VncState *vs, int has_dirty)
> > >  {
> > >      int ret = vnc_update_client(vs, has_dirty);
> > > -    vnc_jobs_join(vs);
> > > +    if (ret >= 0)
> > > +        vnc_jobs_join(vs);
> > 
> > What happens with any running jobs if you skip the jouin call here?
> 
> Hi, Gerd. The other jobs are unaffected, and other clients still work.

My concern is more that we might keep threads running which should not
run any more.

cheers,
  Gerd
diff mbox

Patch

diff --git a/ui/vnc.c b/ui/vnc.c
index 5601cc3..2177704 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -876,7 +876,8 @@  static int find_and_clear_dirty_height(struct VncState *vs,
 static int vnc_update_client_sync(VncState *vs, int has_dirty)
 {
     int ret = vnc_update_client(vs, has_dirty);
-    vnc_jobs_join(vs);
+    if (ret >= 0)
+        vnc_jobs_join(vs);
     return ret;
 }
 
@@ -939,8 +940,10 @@  static int vnc_update_client(VncState *vs, int has_dirty)
         return n;
     }
 
-    if (vs->csock == -1)
+    if (vs->csock == -1) {
         vnc_disconnect_finish(vs);
+	 return -1;
+    } 
 
     return 0;
 }
@@ -2737,6 +2740,7 @@  static void vnc_refresh(DisplayChangeListener *dcl)
     VncDisplay *vd = container_of(dcl, VncDisplay, dcl);
     VncState *vs, *vn;
     int has_dirty, rects = 0;
+    int ret;
 
     graphic_hw_update(NULL);
 
@@ -2749,8 +2753,10 @@  static void vnc_refresh(DisplayChangeListener *dcl)
     vnc_unlock_display(vd);
 
     QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) {
-        rects += vnc_update_client(vs, has_dirty);
+        ret = vnc_update_client(vs, has_dirty);
         /* vs might be free()ed here */
+        if (ret >= 0)
+	    rects += ret;
     }
 
     if (QTAILQ_EMPTY(&vd->clients)) {