From patchwork Wed Mar 14 06:58:47 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Corentin Chary X-Patchwork-Id: 146561 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 7F490B6EE8 for ; Wed, 14 Mar 2012 17:55:30 +1100 (EST) Received: from localhost ([::1]:36691 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1S7i7K-0007y4-W5 for incoming@patchwork.ozlabs.org; Wed, 14 Mar 2012 02:55:26 -0400 Received: from eggs.gnu.org ([208.118.235.92]:34708) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1S7i6l-0006vQ-0g for qemu-devel@nongnu.org; Wed, 14 Mar 2012 02:55:15 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1S7i6e-0007hY-GQ for qemu-devel@nongnu.org; Wed, 14 Mar 2012 02:54:50 -0400 Received: from smtp5-g21.free.fr ([212.27.42.5]:44172) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1S7i6d-0007gp-U6 for qemu-devel@nongnu.org; Wed, 14 Mar 2012 02:54:44 -0400 Received: from localhost.localdomain (unknown [82.241.209.44]) by smtp5-g21.free.fr (Postfix) with ESMTP id 80A7DD480E4; Wed, 14 Mar 2012 07:54:37 +0100 (CET) From: Corentin Chary To: Anthony Liguori Date: Wed, 14 Mar 2012 07:58:47 +0100 Message-Id: <1331708328-21414-2-git-send-email-corentincj@iksaif.net> X-Mailer: git-send-email 1.7.3.4 In-Reply-To: <1331708328-21414-1-git-send-email-corentincj@iksaif.net> References: <1331708328-21414-1-git-send-email-corentincj@iksaif.net> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.6 (newer, 3) X-Received-From: 212.27.42.5 Cc: Peter Lieven , Corentin Chary , qemu-devel@nongnu.org, kvm@vger.kernel.org, weil@mail.berlios.de Subject: [Qemu-devel] [PATCH 1/2] vnc: don't mess up with iohandlers in the vnc thread X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org From: Corentin Chary The threaded VNC servers messed up with QEMU fd handlers without any kind of locking, and that can cause some nasty race conditions. Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(), which will wait for the current job queue to finish, can be called with the iothread lock held. Instead, we now store the data in a temporary buffer, and use a bottom half to notify the main thread that new data is available. vnc_[un]lock_ouput() is still needed to access VncState members like abort, csock or jobs_buffer. Signed-off-by: Corentin Chary --- ui/vnc-jobs-async.c | 48 +++++++++++++++++++++++++++++------------------- ui/vnc-jobs.h | 1 + ui/vnc.c | 12 ++++++++++++ ui/vnc.h | 2 ++ 4 files changed, 44 insertions(+), 19 deletions(-) diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c index 9b3016c..087b84d 100644 --- a/ui/vnc-jobs-async.c +++ b/ui/vnc-jobs-async.c @@ -28,6 +28,7 @@ #include "vnc.h" #include "vnc-jobs.h" +#include "qemu_socket.h" /* * Locking: @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs) qemu_cond_wait(&queue->cond, &queue->mutex); } vnc_unlock_queue(queue); + vnc_jobs_consume_buffer(vs); +} + +void vnc_jobs_consume_buffer(VncState *vs) +{ + bool flush; + + vnc_lock_output(vs); + if (vs->jobs_buffer.offset) { + vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset); + buffer_reset(&vs->jobs_buffer); + } + flush = vs->csock != -1 && vs->abort != true; + vnc_unlock_output(vs); + + if (flush) { + vnc_flush(vs); + } } /* @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) VncState vs; int n_rectangles; int saved_offset; - bool flush; vnc_lock_queue(queue); while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) { @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) vnc_lock_output(job->vs); if (job->vs->csock == -1 || job->vs->abort == true) { + vnc_unlock_output(job->vs); goto disconnected; } vnc_unlock_output(job->vs); @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) if (job->vs->csock == -1) { vnc_unlock_display(job->vs->vd); - /* output mutex must be locked before going to - * disconnected: - */ - vnc_lock_output(job->vs); goto disconnected; } @@ -254,24 +269,19 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) vs.output.buffer[saved_offset] = (n_rectangles >> 8) & 0xFF; vs.output.buffer[saved_offset + 1] = n_rectangles & 0xFF; - /* Switch back buffers */ vnc_lock_output(job->vs); - if (job->vs->csock == -1) { - goto disconnected; + if (job->vs->csock != -1) { + buffer_reserve(&job->vs->jobs_buffer, vs.output.offset); + buffer_append(&job->vs->jobs_buffer, vs.output.buffer, + vs.output.offset); + /* Copy persistent encoding data */ + vnc_async_encoding_end(job->vs, &vs); + + qemu_bh_schedule(job->vs->bh); } - - vnc_write(job->vs, vs.output.buffer, vs.output.offset); - -disconnected: - /* Copy persistent encoding data */ - vnc_async_encoding_end(job->vs, &vs); - flush = (job->vs->csock != -1 && job->vs->abort != true); vnc_unlock_output(job->vs); - if (flush) { - vnc_flush(job->vs); - } - +disconnected: vnc_lock_queue(queue); QTAILQ_REMOVE(&queue->jobs, job, next); vnc_unlock_queue(queue); diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h index b8dab81..4c661f9 100644 --- a/ui/vnc-jobs.h +++ b/ui/vnc-jobs.h @@ -40,6 +40,7 @@ void vnc_jobs_join(VncState *vs); #ifdef CONFIG_VNC_THREAD +void vnc_jobs_consume_buffer(VncState *vs); void vnc_start_worker_thread(void); bool vnc_worker_thread_running(void); void vnc_stop_worker_thread(void); diff --git a/ui/vnc.c b/ui/vnc.c index bdec33a..aef6d3a 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -1068,7 +1068,10 @@ static void vnc_disconnect_finish(VncState *vs) #ifdef CONFIG_VNC_THREAD qemu_mutex_destroy(&vs->output_mutex); + qemu_bh_delete(vs->bh); + buffer_free(&vs->jobs_buffer); #endif + for (i = 0; i < VNC_STAT_ROWS; ++i) { g_free(vs->lossy_rect[i]); } @@ -1283,6 +1286,14 @@ static long vnc_client_read_plain(VncState *vs) return ret; } +#ifdef CONFIG_VNC_THREAD +static void vnc_jobs_bh(void *opaque) +{ + VncState *vs = opaque; + + vnc_jobs_consume_buffer(vs); +} +#endif /* * First function called whenever there is more data to be read from @@ -2687,6 +2698,7 @@ static void vnc_connect(VncDisplay *vd, int csock, int skipauth) #ifdef CONFIG_VNC_THREAD qemu_mutex_init(&vs->output_mutex); + vs->bh = qemu_bh_new(vnc_jobs_bh, vs); #endif QTAILQ_INSERT_HEAD(&vd->clients, vs, next); diff --git a/ui/vnc.h b/ui/vnc.h index 0bd1fc6..a851ebd 100644 --- a/ui/vnc.h +++ b/ui/vnc.h @@ -304,6 +304,8 @@ struct VncState VncJob job; #else QemuMutex output_mutex; + QEMUBH *bh; + Buffer jobs_buffer; #endif /* Encoding specific, if you add something here, don't forget to