get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.1/patches/2222099/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2222099,
    "url": "http://patchwork.ozlabs.org/api/1.1/patches/2222099/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20260410-qemu-vnc-v2-46-231416f76dc3@redhat.com/",
    "project": {
        "id": 14,
        "url": "http://patchwork.ozlabs.org/api/1.1/projects/14/?format=api",
        "name": "QEMU Development",
        "link_name": "qemu-devel",
        "list_id": "qemu-devel.nongnu.org",
        "list_email": "qemu-devel@nongnu.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": ""
    },
    "msgid": "<20260410-qemu-vnc-v2-46-231416f76dc3@redhat.com>",
    "date": "2026-04-10T19:19:08",
    "name": "[v2,46/67] ui/vnc: make the worker thread per-VncDisplay",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "e599ffbf1e6cca93940738fc44b71eb0481e1130",
    "submitter": {
        "id": 66774,
        "url": "http://patchwork.ozlabs.org/api/1.1/people/66774/?format=api",
        "name": "Marc-André Lureau",
        "email": "marcandre.lureau@redhat.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20260410-qemu-vnc-v2-46-231416f76dc3@redhat.com/mbox/",
    "series": [
        {
            "id": 499494,
            "url": "http://patchwork.ozlabs.org/api/1.1/series/499494/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=499494",
            "date": "2026-04-10T19:18:23",
            "name": "ui: add standalone VNC server over D-Bus",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/499494/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2222099/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2222099/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>",
        "X-Original-To": "incoming@patchwork.ozlabs.org",
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (1024-bit key;\n unprotected) header.d=redhat.com header.i=@redhat.com header.a=rsa-sha256\n header.s=mimecast20190719 header.b=C7ohMJoH;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=nongnu.org\n (client-ip=209.51.188.17; helo=lists.gnu.org;\n envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n receiver=patchwork.ozlabs.org)"
        ],
        "Received": [
            "from lists.gnu.org (lists1p.gnu.org [209.51.188.17])\n\t(using TLSv1.2 with cipher ECDHE-ECDSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fsmw51XCWz1y2d\n\tfor <incoming@patchwork.ozlabs.org>; Sat, 11 Apr 2026 05:24:53 +1000 (AEST)",
            "from localhost ([::1] helo=lists1p.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.90_1)\n\t(envelope-from <qemu-devel-bounces@nongnu.org>)\n\tid 1wBHS5-00066d-Iy; Fri, 10 Apr 2026 15:23:37 -0400",
            "from eggs.gnu.org ([2001:470:142:3::10])\n by lists1p.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.90_1) (envelope-from <marcandre.lureau@redhat.com>)\n id 1wBHS4-00066S-KL\n for qemu-devel@nongnu.org; Fri, 10 Apr 2026 15:23:36 -0400",
            "from us-smtp-delivery-124.mimecast.com ([170.10.133.124])\n by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.90_1) (envelope-from <marcandre.lureau@redhat.com>)\n id 1wBHS2-000269-06\n for qemu-devel@nongnu.org; Fri, 10 Apr 2026 15:23:36 -0400",
            "from mx-prod-mc-06.mail-002.prod.us-west-2.aws.redhat.com\n (ec2-35-165-154-97.us-west-2.compute.amazonaws.com [35.165.154.97]) by\n relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.3,\n cipher=TLS_AES_256_GCM_SHA384) id us-mta-520-bBjnj4wFMuSlttqBDi6Sjw-1; Fri,\n 10 Apr 2026 15:23:32 -0400",
            "from mx-prod-int-08.mail-002.prod.us-west-2.aws.redhat.com\n (mx-prod-int-08.mail-002.prod.us-west-2.aws.redhat.com [10.30.177.111])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n (No client certificate requested)\n by mx-prod-mc-06.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTPS\n id 251C118007F2\n for <qemu-devel@nongnu.org>; Fri, 10 Apr 2026 19:23:31 +0000 (UTC)",
            "from localhost (unknown [10.44.22.4])\n by mx-prod-int-08.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTP\n id E272C18002A6; Fri, 10 Apr 2026 19:23:29 +0000 (UTC)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1775849013;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:cc:mime-version:mime-version:content-type:content-type:\n content-transfer-encoding:content-transfer-encoding:\n in-reply-to:in-reply-to:references:references;\n bh=RjANnCQtzhl6inA3QcoNqtJZnUi7lbkVyXloCs2jogQ=;\n b=C7ohMJoHE2wLFKxLKoxCO1kjHfNJFkq6KMA1lbrW0+rCAUFGipnzmyq9K2LSerJ6NyuAxz\n ReEzsbfU7SuT75HtdCOLlmcrNqYoJ5j0Ala5yjRh+ZCzeTTe/AsiLEhFs+7Iq+kUB+8gKz\n W0Ovs6ObEb7Jga5dfr4md2jp50EQ1u0=",
        "X-MC-Unique": "bBjnj4wFMuSlttqBDi6Sjw-1",
        "X-Mimecast-MFC-AGG-ID": "bBjnj4wFMuSlttqBDi6Sjw_1775849011",
        "From": "=?utf-8?q?Marc-Andr=C3=A9_Lureau?= <marcandre.lureau@redhat.com>",
        "Date": "Fri, 10 Apr 2026 23:19:08 +0400",
        "Subject": "[PATCH v2 46/67] ui/vnc: make the worker thread per-VncDisplay",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"utf-8\"",
        "Content-Transfer-Encoding": "8bit",
        "Message-Id": "<20260410-qemu-vnc-v2-46-231416f76dc3@redhat.com>",
        "References": "<20260410-qemu-vnc-v2-0-231416f76dc3@redhat.com>",
        "In-Reply-To": "<20260410-qemu-vnc-v2-0-231416f76dc3@redhat.com>",
        "To": "qemu-devel@nongnu.org",
        "Cc": "=?utf-8?q?Marc-Andr=C3=A9_Lureau?= <marcandre.lureau@redhat.com>",
        "X-Developer-Signature": "v=1; a=openpgp-sha256; l=6611;\n i=marcandre.lureau@redhat.com; h=from:subject:message-id;\n bh=wV7SjAq/U+a/ra5a7o/iGvyaoNptnoDE1TurKaPvPMg=;\n b=owEBbQKS/ZANAwAKAdro4Ql1lpzlAcsmYgBp2U0WEzI16+jx9n6AMtIuctj1KHkUtFoap7DHq\n D1N8ORn8oaJAjMEAAEKAB0WIQSHqb2TP4fGBtJ29i3a6OEJdZac5QUCadlNFgAKCRDa6OEJdZac\n 5aI4D/9tnb8M7lqML2GQunDfZM0ZxVFYOOury+BWxjW1+oiP+YEosFr4ouxXIgGzITbinRt+3g2\n +IyE76wchhxheb0ccjr0CojnbCM2DSrof8FCr2mDpQJ7T1tViAJkwdQVXW1P5rCictnjw8bXmfe\n O5HxSPWKQmf4ujqZ1KirNwuMaVA5CBtASR3SW5WPcFOkGK/G/4j4IaAdkGYFy6DwII89i1vv5HU\n 7GBIFV97NByWRbQsqFWuB+HVMFqN+W+9f/AS92G61OUstQDd9c8cpvcusyBv12hlCRgr1rGpi2H\n EOSbmJHSnVgkYJyWWp4/PuuExxvk6xEZ2NX+WBLmLzSoyXAMvqhmxP1FK2sd0+5hEGtTztkqds6\n 3vAxBYvrqorN4hGJVbsKborpl3V1J3D2MQjUlkHqb2aF7es6CUeJf0t7PgAUhyI2tRlt3XZiO+c\n r7LK8gZdD9cVFda7RtWThRJ79Y6NCeUIB92RxZArfiPhohYfCPb2A7GxA0oqXq/gY50Zc23d/7l\n L7sfywr6UbZkU0wxYn0SWY8Fct+yETBW4Zidob7qWbtHAz96bbhwwvUnxgevJKIIRXesvYTt67e\n Iy9XmNKvGI/z9PSqQyH8qOM8dS1VpMKRGlbz9jUy/VBXcRtMiV5ZSKHbrZCtFPBbMglFtBS49tp\n RHuIzaNaOVizRLw==",
        "X-Developer-Key": "i=marcandre.lureau@redhat.com; a=openpgp;\n fpr=87A9BD933F87C606D276F62DDAE8E10975969CE5",
        "X-Scanned-By": "MIMEDefang 3.4.1 on 10.30.177.111",
        "Received-SPF": "pass client-ip=170.10.133.124;\n envelope-from=marcandre.lureau@redhat.com;\n helo=us-smtp-delivery-124.mimecast.com",
        "X-Spam_score_int": "-25",
        "X-Spam_score": "-2.6",
        "X-Spam_bar": "--",
        "X-Spam_report": "(-2.6 / 5.0 requ) BAYES_00=-1.9, DKIMWL_WL_HIGH=-0.54,\n DKIM_SIGNED=0.1, DKIM_VALID=-0.1, DKIM_VALID_AU=-0.1, DKIM_VALID_EF=-0.1,\n RCVD_IN_DNSWL_NONE=-0.0001, RCVD_IN_MSPIKE_H2=0.001,\n RCVD_IN_VALIDITY_RPBL_BLOCKED=0.001, RCVD_IN_VALIDITY_SAFE_BLOCKED=0.001,\n SPF_HELO_PASS=-0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no",
        "X-Spam_action": "no action",
        "X-BeenThere": "qemu-devel@nongnu.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "qemu development <qemu-devel.nongnu.org>",
        "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>",
        "List-Archive": "<https://lists.nongnu.org/archive/html/qemu-devel>",
        "List-Post": "<mailto:qemu-devel@nongnu.org>",
        "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>",
        "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=subscribe>",
        "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org",
        "Sender": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org"
    },
    "content": "The VNC encoding worker thread was using a single global queue shared\nacross all VNC displays, with no way to stop it. This made it impossible\nto properly clean up resources when a VncDisplay is freed.\n\nMove the VncJobQueue from a file-scoped global to a per-VncDisplay\nmember, so each display owns its worker thread and queue. Add\nvnc_stop_worker_thread() to perform an orderly shutdown: signal the\nthread to exit, join it, and destroy the queue. The thread is now\ncreated as QEMU_THREAD_JOINABLE instead of QEMU_THREAD_DETACHED.\n\nSigned-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>\n---\n ui/vnc-jobs.h |  3 ++-\n ui/vnc.h      |  2 ++\n ui/vnc-jobs.c | 76 +++++++++++++++++++++++++++++++++--------------------------\n ui/vnc.c      |  3 ++-\n 4 files changed, 49 insertions(+), 35 deletions(-)",
    "diff": "diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h\nindex 59f66bcc353..e5ab55c1da6 100644\n--- a/ui/vnc-jobs.h\n+++ b/ui/vnc-jobs.h\n@@ -37,7 +37,8 @@ void vnc_job_push(VncJob *job);\n void vnc_jobs_join(VncState *vs);\n \n void vnc_jobs_consume_buffer(VncState *vs);\n-void vnc_start_worker_thread(void);\n+void vnc_start_worker_thread(VncDisplay *vd);\n+void vnc_stop_worker_thread(VncDisplay *vd);\n \n /* Locks */\n static inline int vnc_trylock_display(VncDisplay *vd)\ndiff --git a/ui/vnc.h b/ui/vnc.h\nindex 110c2bd4600..9a09fcdad8b 100644\n--- a/ui/vnc.h\n+++ b/ui/vnc.h\n@@ -62,6 +62,7 @@\n \n typedef struct VncState VncState;\n typedef struct VncJob VncJob;\n+typedef struct VncJobQueue VncJobQueue;\n typedef struct VncRect VncRect;\n typedef struct VncRectEntry VncRectEntry;\n \n@@ -158,6 +159,7 @@ struct VncDisplay\n     int ledstate;\n     QKbdState *kbd;\n     QemuMutex mutex;\n+    VncJobQueue *queue;\n \n     int cursor_msize;\n     uint8_t *cursor_mask;\ndiff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c\nindex 5b17ef54091..c809287dd3a 100644\n--- a/ui/vnc-jobs.c\n+++ b/ui/vnc-jobs.c\n@@ -29,8 +29,6 @@\n #include \"qemu/osdep.h\"\n #include \"vnc.h\"\n #include \"vnc-jobs.h\"\n-#include \"qemu/sockets.h\"\n-#include \"qemu/main-loop.h\"\n #include \"trace.h\"\n \n /*\n@@ -56,17 +54,10 @@ struct VncJobQueue {\n     QemuCond cond;\n     QemuMutex mutex;\n     QemuThread thread;\n+    bool exit;\n     QTAILQ_HEAD(, VncJob) jobs;\n };\n \n-typedef struct VncJobQueue VncJobQueue;\n-\n-/*\n- * We use a single global queue, but most of the functions are\n- * already reentrant, so we can easily add more than one encoding thread\n- */\n-static VncJobQueue *queue;\n-\n static void vnc_lock_queue(VncJobQueue *queue)\n {\n     qemu_mutex_lock(&queue->mutex);\n@@ -125,19 +116,22 @@ static void vnc_job_free(VncJob *job)\n  */\n void vnc_job_push(VncJob *job)\n {\n+    VncJobQueue *queue = job->vs->vd->queue;\n+\n     assert(!QTAILQ_IN_USE(job, next));\n \n     if (QLIST_EMPTY(&job->rectangles)) {\n         vnc_job_free(job);\n     } else {\n         vnc_lock_queue(queue);\n+        assert(!queue->exit);\n         QTAILQ_INSERT_TAIL(&queue->jobs, job, next);\n         qemu_cond_broadcast(&queue->cond);\n         vnc_unlock_queue(queue);\n     }\n }\n \n-static bool vnc_has_job_locked(VncState *vs)\n+static bool vnc_has_job_locked(VncJobQueue *queue, VncState *vs)\n {\n     VncJob *job;\n \n@@ -151,8 +145,10 @@ static bool vnc_has_job_locked(VncState *vs)\n \n void vnc_jobs_join(VncState *vs)\n {\n+    VncJobQueue *queue = vs->vd->queue;\n+\n     vnc_lock_queue(queue);\n-    while (vnc_has_job_locked(vs)) {\n+    while (vnc_has_job_locked(queue, vs)) {\n         qemu_cond_wait(&queue->cond, &queue->mutex);\n     }\n     vnc_unlock_queue(queue);\n@@ -252,9 +248,13 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)\n     int saved_offset;\n \n     vnc_lock_queue(queue);\n-    while (QTAILQ_EMPTY(&queue->jobs)) {\n+    while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) {\n         qemu_cond_wait(&queue->cond, &queue->mutex);\n     }\n+    if (queue->exit) {\n+        vnc_unlock_queue(queue);\n+        return 1;\n+    }\n     job = QTAILQ_FIRST(&queue->jobs);\n     vnc_unlock_queue(queue);\n \n@@ -340,39 +340,49 @@ disconnected:\n     return 0;\n }\n \n-static VncJobQueue *vnc_queue_init(void)\n-{\n-    VncJobQueue *queue = g_new0(VncJobQueue, 1);\n-\n-    qemu_cond_init(&queue->cond);\n-    qemu_mutex_init(&queue->mutex);\n-    QTAILQ_INIT(&queue->jobs);\n-    return queue;\n-}\n-\n static void *vnc_worker_thread(void *arg)\n {\n     VncJobQueue *queue = arg;\n \n     while (!vnc_worker_thread_loop(queue)) ;\n-    g_assert_not_reached();\n+\n     return NULL;\n }\n \n-static bool vnc_worker_thread_running(void)\n+void vnc_start_worker_thread(VncDisplay *vd)\n {\n-    return queue; /* Check global queue */\n+    VncJobQueue *queue;\n+\n+    assert(vd->queue == NULL);\n+\n+    queue = g_new0(VncJobQueue, 1);\n+    qemu_cond_init(&queue->cond);\n+    qemu_mutex_init(&queue->mutex);\n+    QTAILQ_INIT(&queue->jobs);\n+    vd->queue = queue;\n+\n+    qemu_thread_create(&queue->thread, \"vnc_worker\", vnc_worker_thread, queue,\n+                       QEMU_THREAD_JOINABLE);\n }\n \n-void vnc_start_worker_thread(void)\n+void vnc_stop_worker_thread(VncDisplay *vd)\n {\n-    VncJobQueue *q;\n+    VncJobQueue *queue = vd->queue;\n \n-    if (vnc_worker_thread_running())\n+    if (!queue) {\n         return;\n+    }\n+\n+    /* all VNC clients must have finished before we can stop the worker thread */\n+    vnc_lock_queue(queue);\n+    assert(QTAILQ_EMPTY(&queue->jobs));\n+    queue->exit = true;\n+    qemu_cond_broadcast(&queue->cond);\n+    vnc_unlock_queue(queue);\n \n-    q = vnc_queue_init();\n-    qemu_thread_create(&q->thread, \"vnc_worker\", vnc_worker_thread, q,\n-                       QEMU_THREAD_DETACHED);\n-    queue = q; /* Set global queue */\n+    qemu_thread_join(&queue->thread);\n+    qemu_cond_destroy(&queue->cond);\n+    qemu_mutex_destroy(&queue->mutex);\n+    g_free(queue);\n+    vd->queue = NULL;\n }\ndiff --git a/ui/vnc.c b/ui/vnc.c\nindex ba7376360e6..4e5a9ee0341 100644\n--- a/ui/vnc.c\n+++ b/ui/vnc.c\n@@ -3457,7 +3457,7 @@ void vnc_display_init(const char *id, Error **errp)\n     vd->share_policy = VNC_SHARE_POLICY_ALLOW_EXCLUSIVE;\n     vd->connections_limit = 32;\n \n-    vnc_start_worker_thread();\n+    vnc_start_worker_thread(vd);\n \n     register_displaychangelistener(&vd->dcl);\n     vd->kbd = qkbd_state_init(vd->dcl.con);\n@@ -3517,6 +3517,7 @@ static void vnc_display_free(VncDisplay *vd)\n \n     assert(QTAILQ_EMPTY(&vd->clients));\n \n+    vnc_stop_worker_thread(vd);\n     vnc_display_close(vd);\n     unregister_displaychangelistener(&vd->dcl);\n     qkbd_state_free(vd->kbd);\n",
    "prefixes": [
        "v2",
        "46/67"
    ]
}