diff mbox

[2/2] vnc: don't mess up with iohandlers in the vnc thread

Message ID 1299761979-15197-2-git-send-email-corentin.chary@gmail.com
State New
Headers show

Commit Message

Corentin Chary March 10, 2011, 12:59 p.m. UTC
The threaded VNC servers messed up with QEMU fd handlers without
any kind of locking, and that can cause some nasty race conditions.

Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(),
which will wait for the current job queue to finish, can be called with
the iothread lock held.

Instead, we now store the data in a temporary buffer, and use a socket
pair to notify the main thread that new data is available. The data
is not directly send through this socket because it would make the code
more complex without any performance benefit.

vnc_[un]lock_ouput() is still needed to access VncState members like
abort, csock or jobs_buffer.

Thanks to Jan Kiszka for helping me solve this issue.

Signed-off-by: Corentin Chary <corentin.chary@gmail.com>
---
 ui/vnc-jobs-async.c |   50 ++++++++++++++++++++++++++++++++------------------
 ui/vnc-jobs-sync.c  |    4 ++++
 ui/vnc-jobs.h       |    1 +
 ui/vnc.c            |   16 ++++++++++++++++
 ui/vnc.h            |    2 ++
 5 files changed, 55 insertions(+), 18 deletions(-)

Comments

Paolo Bonzini March 10, 2011, 1:06 p.m. UTC | #1
On 03/10/2011 01:59 PM, Corentin Chary wrote:
> Instead, we now store the data in a temporary buffer, and use a socket
> pair to notify the main thread that new data is available.

You can use a bottom half for this instead of a special socket. 
Signaling a bottom half is async-signal- and thread-safe.

Paolo
Anthony Liguori March 10, 2011, 1:45 p.m. UTC | #2
On 03/10/2011 07:06 AM, Paolo Bonzini wrote:
> On 03/10/2011 01:59 PM, Corentin Chary wrote:
>> Instead, we now store the data in a temporary buffer, and use a socket
>> pair to notify the main thread that new data is available.
>
> You can use a bottom half for this instead of a special socket. 
> Signaling a bottom half is async-signal- and thread-safe.

Bottom halves are thread safe?

I don't think so.

They probably should be but they aren't today.

Regards,

Anthony Liguori

>
> Paolo
Peter Lieven March 10, 2011, 1:47 p.m. UTC | #3
On 10.03.2011 13:59, Corentin Chary wrote:
> The threaded VNC servers messed up with QEMU fd handlers without
> any kind of locking, and that can cause some nasty race conditions.
>
> Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(),
> which will wait for the current job queue to finish, can be called with
> the iothread lock held.
>
> Instead, we now store the data in a temporary buffer, and use a socket
> pair to notify the main thread that new data is available. The data
> is not directly send through this socket because it would make the code
> more complex without any performance benefit.
>
> vnc_[un]lock_ouput() is still needed to access VncState members like
> abort, csock or jobs_buffer.
>

is this compatible with qemu-kvm?

thanks,
peter


> Thanks to Jan Kiszka for helping me solve this issue.
>
> Signed-off-by: Corentin Chary<corentin.chary@gmail.com>
> ---
>   ui/vnc-jobs-async.c |   50 ++++++++++++++++++++++++++++++++------------------
>   ui/vnc-jobs-sync.c  |    4 ++++
>   ui/vnc-jobs.h       |    1 +
>   ui/vnc.c            |   16 ++++++++++++++++
>   ui/vnc.h            |    2 ++
>   5 files changed, 55 insertions(+), 18 deletions(-)
>
> diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c
> index f596247..543b5a9 100644
> --- a/ui/vnc-jobs-async.c
> +++ b/ui/vnc-jobs-async.c
> @@ -28,6 +28,7 @@
>
>   #include "vnc.h"
>   #include "vnc-jobs.h"
> +#include "qemu_socket.h"
>
>   /*
>    * Locking:
> @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs)
>           qemu_cond_wait(&queue->cond,&queue->mutex);
>       }
>       vnc_unlock_queue(queue);
> +    vnc_jobs_consume_buffer(vs);
> +}
> +
> +void vnc_jobs_consume_buffer(VncState *vs)
> +{
> +    bool flush;
> +
> +    vnc_lock_output(vs);
> +    if (vs->jobs_buffer.offset) {
> +        vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset);
> +        buffer_reset(&vs->jobs_buffer);
> +    }
> +    flush = vs->csock != -1&&  vs->abort != true;
> +    vnc_unlock_output(vs);
> +
> +    if (flush) {
> +      vnc_flush(vs);
> +    }
>   }
>
>   /*
> @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>       VncState vs;
>       int n_rectangles;
>       int saved_offset;
> -    bool flush;
>
>       vnc_lock_queue(queue);
>       while (QTAILQ_EMPTY(&queue->jobs)&&  !queue->exit) {
> @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>
>       vnc_lock_output(job->vs);
>       if (job->vs->csock == -1 || job->vs->abort == true) {
> +        vnc_unlock_output(job->vs);
>           goto disconnected;
>       }
>       vnc_unlock_output(job->vs);
> @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>
>           if (job->vs->csock == -1) {
>               vnc_unlock_display(job->vs->vd);
> -            /* output mutex must be locked before going to
> -             * disconnected:
> -             */
> -            vnc_lock_output(job->vs);
>               goto disconnected;
>           }
>
> @@ -254,24 +269,23 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>       vs.output.buffer[saved_offset] = (n_rectangles>>  8)&  0xFF;
>       vs.output.buffer[saved_offset + 1] = n_rectangles&  0xFF;
>
> -    /* Switch back buffers */
>       vnc_lock_output(job->vs);
> -    if (job->vs->csock == -1) {
> -        goto disconnected;
> -    }
> -
> -    vnc_write(job->vs, vs.output.buffer, vs.output.offset);
> +    if (job->vs->csock != -1) {
> +        int notify = !job->vs->jobs_buffer.offset;
>
> -disconnected:
> -    /* Copy persistent encoding data */
> -    vnc_async_encoding_end(job->vs,&vs);
> -    flush = (job->vs->csock != -1&&  job->vs->abort != true);
> -    vnc_unlock_output(job->vs);
> +        buffer_reserve(&job->vs->jobs_buffer, vs.output.offset);
> +        buffer_append(&job->vs->jobs_buffer, vs.output.buffer,
> +                      vs.output.offset);
> +        /* Copy persistent encoding data */
> +        vnc_async_encoding_end(job->vs,&vs);
>
> -    if (flush) {
> -        vnc_flush(job->vs);
> +        if (notify) {
> +            send(job->vs->jobs_socks[1], (char []){1}, 1, 0);
> +        }
>       }
> +    vnc_unlock_output(job->vs);
>
> +disconnected:
>       vnc_lock_queue(queue);
>       QTAILQ_REMOVE(&queue->jobs, job, next);
>       vnc_unlock_queue(queue);
> diff --git a/ui/vnc-jobs-sync.c b/ui/vnc-jobs-sync.c
> index 49b77af..3a4d7df 100644
> --- a/ui/vnc-jobs-sync.c
> +++ b/ui/vnc-jobs-sync.c
> @@ -36,6 +36,10 @@ void vnc_jobs_join(VncState *vs)
>   {
>   }
>
> +void vnc_jobs_consume_buffer(VncState *vs)
> +{
> +}
> +
>   VncJob *vnc_job_new(VncState *vs)
>   {
>       vs->job.vs = vs;
> diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h
> index b8dab81..7c529b7 100644
> --- a/ui/vnc-jobs.h
> +++ b/ui/vnc-jobs.h
> @@ -37,6 +37,7 @@ void vnc_job_push(VncJob *job);
>   bool vnc_has_job(VncState *vs);
>   void vnc_jobs_clear(VncState *vs);
>   void vnc_jobs_join(VncState *vs);
> +void vnc_jobs_consume_buffer(VncState *vs);
>
>   #ifdef CONFIG_VNC_THREAD
>
> diff --git a/ui/vnc.c b/ui/vnc.c
> index 610f884..48a81a2 100644
> --- a/ui/vnc.c
> +++ b/ui/vnc.c
> @@ -1011,6 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs)
>
>   #ifdef CONFIG_VNC_THREAD
>       qemu_mutex_destroy(&vs->output_mutex);
> +    qemu_set_fd_handler2(vs->jobs_socks[0], NULL, NULL, NULL, vs);
> +    close(vs->jobs_socks[0]);
> +    close(vs->jobs_socks[1]);
> +    buffer_free(&vs->jobs_buffer);
>   #endif
>       for (i = 0; i<  VNC_STAT_ROWS; ++i) {
>           qemu_free(vs->lossy_rect[i]);
> @@ -1226,6 +1230,16 @@ static long vnc_client_read_plain(VncState *vs)
>       return ret;
>   }
>
> +#ifdef CONFIG_VNC_THREAD
> +static void vnc_jobs_read(void *opaque)
> +{
> +    VncState *vs = opaque;
> +    char dummy;
> +
> +    recv(vs->jobs_socks[0], (void *)&dummy, 1, 0);
> +    vnc_jobs_consume_buffer(vs);
> +}
> +#endif
>
>   /*
>    * First function called whenever there is more data to be read from
> @@ -2525,6 +2539,8 @@ static void vnc_connect(VncDisplay *vd, int csock)
>
>   #ifdef CONFIG_VNC_THREAD
>       qemu_mutex_init(&vs->output_mutex);
> +    qemu_socketpair(AF_UNIX, SOCK_STREAM, 0, vs->jobs_socks);
> +    qemu_set_fd_handler2(vs->jobs_socks[0], NULL, vnc_jobs_read, NULL, vs);
>   #endif
>
>       QTAILQ_INSERT_HEAD(&vd->clients, vs, next);
> diff --git a/ui/vnc.h b/ui/vnc.h
> index 8a1e7b9..0f98f2e 100644
> --- a/ui/vnc.h
> +++ b/ui/vnc.h
> @@ -283,6 +283,8 @@ struct VncState
>       VncJob job;
>   #else
>       QemuMutex output_mutex;
> +    Buffer jobs_buffer;
> +    int jobs_socks[2];
>   #endif
>
>       /* Encoding specific, if you add something here, don't forget to
Corentin Chary March 10, 2011, 1:54 p.m. UTC | #4
On Thu, Mar 10, 2011 at 1:45 PM, Anthony Liguori <aliguori@us.ibm.com> wrote:
> On 03/10/2011 07:06 AM, Paolo Bonzini wrote:
>>
>> On 03/10/2011 01:59 PM, Corentin Chary wrote:
>>>
>>> Instead, we now store the data in a temporary buffer, and use a socket
>>> pair to notify the main thread that new data is available.
>>
>> You can use a bottom half for this instead of a special socket. Signaling
>> a bottom half is async-signal- and thread-safe.
>
> Bottom halves are thread safe?
>
> I don't think so.

The bottom halves API is not thread safe, but calling
qemu_bh_schedule_idle() in another thread *seems* to be safe (here, it
would be protected from qemu_bh_delete() by vnc_lock_output()).
Paolo Bonzini March 10, 2011, 1:56 p.m. UTC | #5
On 03/10/2011 02:45 PM, Anthony Liguori wrote:
> On 03/10/2011 07:06 AM, Paolo Bonzini wrote:
>> On 03/10/2011 01:59 PM, Corentin Chary wrote:
>>> Instead, we now store the data in a temporary buffer, and use a socket
>>> pair to notify the main thread that new data is available.
>>
>> You can use a bottom half for this instead of a special socket.
>> Signaling a bottom half is async-signal- and thread-safe.
>
> Bottom halves are thread safe?
>
> I don't think so.
>
> They probably should be but they aren't today.

Creating a new bottom half is not thread-safe, but scheduling one is. 
Assuming that you never use qemu_bh_schedule_idle, qemu_bh_schedule 
boils down to:

     if (bh->scheduled)
         return;
     bh->scheduled = 1;
     /* stop the currently executing CPU to execute the BH ASAP */
     qemu_notify_event();

You may have a spurious wakeup if two threads race on the same bottom 
half (including the signaling thread racing with the IO thread), but 
overall you can safely treat them as thread-safe.

Paolo
Paolo Bonzini March 10, 2011, 1:58 p.m. UTC | #6
On 03/10/2011 02:54 PM, Corentin Chary wrote:
> > >  You can use a bottom half for this instead of a special socket. Signaling
> > >  a bottom half is async-signal- and thread-safe.
> >
> >  Bottom halves are thread safe?
> >
> >  I don't think so.
>
> The bottom halves API is not thread safe, but calling
> qemu_bh_schedule_idle()

Not _idle please.

> in another thread *seems* to be safe (here, it
> would be protected from qemu_bh_delete() by vnc_lock_output()).

If it weren't protected against qemu_bh_delete, you would have already 
the same race between writing to the socket and closing it in another 
thread.

Paolo
diff mbox

Patch

diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c
index f596247..543b5a9 100644
--- a/ui/vnc-jobs-async.c
+++ b/ui/vnc-jobs-async.c
@@ -28,6 +28,7 @@ 
 
 #include "vnc.h"
 #include "vnc-jobs.h"
+#include "qemu_socket.h"
 
 /*
  * Locking:
@@ -155,6 +156,24 @@  void vnc_jobs_join(VncState *vs)
         qemu_cond_wait(&queue->cond, &queue->mutex);
     }
     vnc_unlock_queue(queue);
+    vnc_jobs_consume_buffer(vs);
+}
+
+void vnc_jobs_consume_buffer(VncState *vs)
+{
+    bool flush;
+
+    vnc_lock_output(vs);
+    if (vs->jobs_buffer.offset) {
+        vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset);
+        buffer_reset(&vs->jobs_buffer);
+    }
+    flush = vs->csock != -1 && vs->abort != true;
+    vnc_unlock_output(vs);
+
+    if (flush) {
+      vnc_flush(vs);
+    }
 }
 
 /*
@@ -197,7 +216,6 @@  static int vnc_worker_thread_loop(VncJobQueue *queue)
     VncState vs;
     int n_rectangles;
     int saved_offset;
-    bool flush;
 
     vnc_lock_queue(queue);
     while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) {
@@ -213,6 +231,7 @@  static int vnc_worker_thread_loop(VncJobQueue *queue)
 
     vnc_lock_output(job->vs);
     if (job->vs->csock == -1 || job->vs->abort == true) {
+        vnc_unlock_output(job->vs);
         goto disconnected;
     }
     vnc_unlock_output(job->vs);
@@ -233,10 +252,6 @@  static int vnc_worker_thread_loop(VncJobQueue *queue)
 
         if (job->vs->csock == -1) {
             vnc_unlock_display(job->vs->vd);
-            /* output mutex must be locked before going to
-             * disconnected:
-             */
-            vnc_lock_output(job->vs);
             goto disconnected;
         }
 
@@ -254,24 +269,23 @@  static int vnc_worker_thread_loop(VncJobQueue *queue)
     vs.output.buffer[saved_offset] = (n_rectangles >> 8) & 0xFF;
     vs.output.buffer[saved_offset + 1] = n_rectangles & 0xFF;
 
-    /* Switch back buffers */
     vnc_lock_output(job->vs);
-    if (job->vs->csock == -1) {
-        goto disconnected;
-    }
-
-    vnc_write(job->vs, vs.output.buffer, vs.output.offset);
+    if (job->vs->csock != -1) {
+        int notify = !job->vs->jobs_buffer.offset;
 
-disconnected:
-    /* Copy persistent encoding data */
-    vnc_async_encoding_end(job->vs, &vs);
-    flush = (job->vs->csock != -1 && job->vs->abort != true);
-    vnc_unlock_output(job->vs);
+        buffer_reserve(&job->vs->jobs_buffer, vs.output.offset);
+        buffer_append(&job->vs->jobs_buffer, vs.output.buffer,
+                      vs.output.offset);
+        /* Copy persistent encoding data */
+        vnc_async_encoding_end(job->vs, &vs);
 
-    if (flush) {
-        vnc_flush(job->vs);
+        if (notify) {
+            send(job->vs->jobs_socks[1], (char []){1}, 1, 0);
+        }
     }
+    vnc_unlock_output(job->vs);
 
+disconnected:
     vnc_lock_queue(queue);
     QTAILQ_REMOVE(&queue->jobs, job, next);
     vnc_unlock_queue(queue);
diff --git a/ui/vnc-jobs-sync.c b/ui/vnc-jobs-sync.c
index 49b77af..3a4d7df 100644
--- a/ui/vnc-jobs-sync.c
+++ b/ui/vnc-jobs-sync.c
@@ -36,6 +36,10 @@  void vnc_jobs_join(VncState *vs)
 {
 }
 
+void vnc_jobs_consume_buffer(VncState *vs)
+{
+}
+
 VncJob *vnc_job_new(VncState *vs)
 {
     vs->job.vs = vs;
diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h
index b8dab81..7c529b7 100644
--- a/ui/vnc-jobs.h
+++ b/ui/vnc-jobs.h
@@ -37,6 +37,7 @@  void vnc_job_push(VncJob *job);
 bool vnc_has_job(VncState *vs);
 void vnc_jobs_clear(VncState *vs);
 void vnc_jobs_join(VncState *vs);
+void vnc_jobs_consume_buffer(VncState *vs);
 
 #ifdef CONFIG_VNC_THREAD
 
diff --git a/ui/vnc.c b/ui/vnc.c
index 610f884..48a81a2 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -1011,6 +1011,10 @@  static void vnc_disconnect_finish(VncState *vs)
 
 #ifdef CONFIG_VNC_THREAD
     qemu_mutex_destroy(&vs->output_mutex);
+    qemu_set_fd_handler2(vs->jobs_socks[0], NULL, NULL, NULL, vs);
+    close(vs->jobs_socks[0]);
+    close(vs->jobs_socks[1]);
+    buffer_free(&vs->jobs_buffer);
 #endif
     for (i = 0; i < VNC_STAT_ROWS; ++i) {
         qemu_free(vs->lossy_rect[i]);
@@ -1226,6 +1230,16 @@  static long vnc_client_read_plain(VncState *vs)
     return ret;
 }
 
+#ifdef CONFIG_VNC_THREAD
+static void vnc_jobs_read(void *opaque)
+{
+    VncState *vs = opaque;
+    char dummy;
+
+    recv(vs->jobs_socks[0], (void *)&dummy, 1, 0);
+    vnc_jobs_consume_buffer(vs);
+}
+#endif
 
 /*
  * First function called whenever there is more data to be read from
@@ -2525,6 +2539,8 @@  static void vnc_connect(VncDisplay *vd, int csock)
 
 #ifdef CONFIG_VNC_THREAD
     qemu_mutex_init(&vs->output_mutex);
+    qemu_socketpair(AF_UNIX, SOCK_STREAM, 0, vs->jobs_socks);
+    qemu_set_fd_handler2(vs->jobs_socks[0], NULL, vnc_jobs_read, NULL, vs);
 #endif
 
     QTAILQ_INSERT_HEAD(&vd->clients, vs, next);
diff --git a/ui/vnc.h b/ui/vnc.h
index 8a1e7b9..0f98f2e 100644
--- a/ui/vnc.h
+++ b/ui/vnc.h
@@ -283,6 +283,8 @@  struct VncState
     VncJob job;
 #else
     QemuMutex output_mutex;
+    Buffer jobs_buffer;
+    int jobs_socks[2];
 #endif
 
     /* Encoding specific, if you add something here, don't forget to