get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/2026279/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2026279,
    "url": "http://patchwork.ozlabs.org/api/patches/2026279/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20241220104220.2007786-4-npiggin@gmail.com/",
    "project": {
        "id": 14,
        "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api",
        "name": "QEMU Development",
        "link_name": "qemu-devel",
        "list_id": "qemu-devel.nongnu.org",
        "list_email": "qemu-devel@nongnu.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20241220104220.2007786-4-npiggin@gmail.com>",
    "list_archive_url": null,
    "date": "2024-12-20T10:42:05",
    "name": "[03/17] async: rework async event API for replay",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "c82d12dd49c9dd9ba4069fae7b7f92394431b6c3",
    "submitter": {
        "id": 69518,
        "url": "http://patchwork.ozlabs.org/api/people/69518/?format=api",
        "name": "Nicholas Piggin",
        "email": "npiggin@gmail.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20241220104220.2007786-4-npiggin@gmail.com/mbox/",
    "series": [
        {
            "id": 437728,
            "url": "http://patchwork.ozlabs.org/api/series/437728/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=437728",
            "date": "2024-12-20T10:42:02",
            "name": "replay: Fixes and avocado test updates",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/437728/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2026279/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2026279/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>",
        "X-Original-To": "incoming@patchwork.ozlabs.org",
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=gmail.com header.i=@gmail.com header.a=rsa-sha256\n header.s=20230601 header.b=eAfZ8oj1;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=nongnu.org\n (client-ip=209.51.188.17; helo=lists.gnu.org;\n envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n receiver=patchwork.ozlabs.org)"
        ],
        "Received": [
            "from lists.gnu.org (lists.gnu.org [209.51.188.17])\n\t(using TLSv1.2 with cipher ECDHE-ECDSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4YF3t33J53z1yT0\n\tfor <incoming@patchwork.ozlabs.org>; Fri, 20 Dec 2024 21:43:23 +1100 (AEDT)",
            "from localhost ([::1] helo=lists1p.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.90_1)\n\t(envelope-from <qemu-devel-bounces@nongnu.org>)\n\tid 1tOaTL-000260-VS; Fri, 20 Dec 2024 05:43:08 -0500",
            "from eggs.gnu.org ([2001:470:142:3::10])\n by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.90_1) (envelope-from <npiggin@gmail.com>)\n id 1tOaTE-0001xf-Gp; Fri, 20 Dec 2024 05:43:00 -0500",
            "from mail-pj1-x1031.google.com ([2607:f8b0:4864:20::1031])\n by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_128_GCM_SHA256:128)\n (Exim 4.90_1) (envelope-from <npiggin@gmail.com>)\n id 1tOaTC-0005E7-3B; Fri, 20 Dec 2024 05:43:00 -0500",
            "by mail-pj1-x1031.google.com with SMTP id\n 98e67ed59e1d1-2f43da61ba9so1387630a91.2;\n Fri, 20 Dec 2024 02:42:57 -0800 (PST)",
            "from wheely.local0.net (14-200-18-130.tpgi.com.au. [14.200.18.130])\n by smtp.gmail.com with ESMTPSA id\n 98e67ed59e1d1-2f2ee06dfd3sm5376942a91.32.2024.12.20.02.42.48\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Fri, 20 Dec 2024 02:42:55 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=gmail.com; s=20230601; t=1734691376; x=1735296176; darn=nongnu.org;\n h=content-transfer-encoding:mime-version:references:in-reply-to\n :message-id:date:subject:cc:to:from:from:to:cc:subject:date\n :message-id:reply-to;\n bh=L7UjIHOVv+OKh4Jj/WPmNrW7ybF61f5r2ui4Jn5w6rc=;\n b=eAfZ8oj1JI5QOaKdSP9jZA+pU4ZK2fm5QZIVdel+4sPtM9pDxuw+ClN3CFLQRuLwWJ\n VfQngWOyligW03Vq+xp0754O94JOH0xYzWZmf1+x4aZh6QmAjldheVVEHsoQfEcv2Goz\n Xu2loBFjLNqSG5aZM2KXfwMM9jBqRQKKe7rXHP49FJQYrX9Gl6jQl0lEfmjpL7F9j3fG\n 2K/7zUpYQErNGFQQOH45KDUa+yJq1vRj2L0YKgljgKcm1jHlb6K9Ygl80s2zd+nvqJfi\n 1qxtrADroIEXvgIqClFs5id7lVJ0vnoOZTFCbqOsa5gFV3Fdi3PgEIxJI/LjFPxeVXkJ\n /trw==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20230601; t=1734691376; x=1735296176;\n h=content-transfer-encoding:mime-version:references:in-reply-to\n :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc\n :subject:date:message-id:reply-to;\n bh=L7UjIHOVv+OKh4Jj/WPmNrW7ybF61f5r2ui4Jn5w6rc=;\n b=r/sVQTPWEE3XbjGKAZPb/dHFwhGU3YNEm5HuRiUP5SaHJxM1NCIQi/K70iGd2Z0Iy/\n e8rXcJGBZg97rVgBGSbTgRC5wqbvc8ngfnG9xBPUXH7Mk3HI2dLfwyhBQkjjsgm9IsiH\n KhEGkOvkouiW3OnRpuRwjyn0G8oWQJfCzvGH70jr1Mq9IzVJFhqQXMxJ+Igvk0Z2Qt2B\n MyVC4Q3RQnn7zCwFio2DsK1MXvVPXZvR3z9f36jPueez0Xh/sqC9/ZbRkaDLOrleebwT\n 1Kb8Xuu9ofMGH7TqD5rNUMuFltfl8Uq5SrzYHYae5djJJzD89oiyFzd9yA14aLrUIKvi\n +zPQ==",
        "X-Forwarded-Encrypted": "i=1;\n AJvYcCWzxBagxVmck8t+TG+rZRbOoVAqupXqnXvYATDPhcc7uWsFIzYgkcHE1hBOZWvPBo0SxHVjIz8cey3V@nongnu.org",
        "X-Gm-Message-State": "AOJu0Yx64PfN0nh95Z6SgA+rrJ42UudzrPsWjrLmWibBfYZgGek/giMX\n v1pcJ+RY0t4TwiUBTfUbLu2qjpDnnz+t5end4JPGDKlJhOGdW7AgtQyccg==",
        "X-Gm-Gg": "ASbGncsK8DN2sgfxYwBDcE1YwzU6tsDkeXfV+2pBAF4+1tivaM61Ux9Muts9Tn89/Yk\n aGcIYqPrHA2ZT0PxcDlcGxmr2j606FcGsowVtcAmg8UvqJioIEWmiwGg4YALhB0JUuR+TkeXYgX\n epJzF+quaG0BrGGP+L0K69aZEVFfkOdSVWuB4E+daojkKLvf6QOtPXrK8jLjWelT9xXUhEFmYDH\n Inn4CHkfRk8OiZGZok2qSWK40wS8vomLpafVxcxS0O5pd1ai6Ocpn1cVIA4AGEmcF2/3nAy0hEL\n cTtjQAQZ+Q==",
        "X-Google-Smtp-Source": "\n AGHT+IFpEO2mU3YazmwaSvwTXh5RjLvdXAy/gg0immkL7bGmAtb+tnPhxDRh5ieZbEDB5u8m3z1bpg==",
        "X-Received": "by 2002:a17:90b:2cc3:b0:2ee:693e:ed7c with SMTP id\n 98e67ed59e1d1-2f452eeb5ffmr3909648a91.33.1734691376160;\n Fri, 20 Dec 2024 02:42:56 -0800 (PST)",
        "From": "Nicholas Piggin <npiggin@gmail.com>",
        "To": "qemu-devel@nongnu.org",
        "Cc": "Nicholas Piggin <npiggin@gmail.com>, qemu-block@nongnu.org, =?utf-8?q?Al?=\n\t=?utf-8?q?ex_Benn=C3=A9e?= <alex.bennee@linaro.org>,\n Kevin Wolf <kwolf@redhat.com>, Hanna Reitz <hreitz@redhat.com>,\n Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru>,\n Paolo Bonzini <pbonzini@redhat.com>, Stefan Hajnoczi <stefanha@redhat.com>,\n Fam Zheng <fam@euphon.net>, Ronnie Sahlberg <ronniesahlberg@gmail.com>,\n John Snow <jsnow@redhat.com>, \"Michael S. Tsirkin\" <mst@redhat.com>,\n Jason Wang <jasowang@redhat.com>,\n Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>,\n Peter Xu <peterx@redhat.com>, Fabiano Rosas <farosas@suse.de>,\n \"Dr. David Alan Gilbert\" <dave@treblig.org>,\n Markus Armbruster <armbru@redhat.com>, Michael Roth <michael.roth@amd.com>,\n Wainer dos Santos Moschetta <wainersm@redhat.com>",
        "Subject": "[PATCH 03/17] async: rework async event API for replay",
        "Date": "Fri, 20 Dec 2024 20:42:05 +1000",
        "Message-ID": "<20241220104220.2007786-4-npiggin@gmail.com>",
        "X-Mailer": "git-send-email 2.45.2",
        "In-Reply-To": "<20241220104220.2007786-1-npiggin@gmail.com>",
        "References": "<20241220104220.2007786-1-npiggin@gmail.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Received-SPF": "pass client-ip=2607:f8b0:4864:20::1031;\n envelope-from=npiggin@gmail.com; helo=mail-pj1-x1031.google.com",
        "X-Spam_score_int": "-20",
        "X-Spam_score": "-2.1",
        "X-Spam_bar": "--",
        "X-Spam_report": "(-2.1 / 5.0 requ) BAYES_00=-1.9, DKIM_SIGNED=0.1,\n DKIM_VALID=-0.1, DKIM_VALID_AU=-0.1, DKIM_VALID_EF=-0.1, FREEMAIL_FROM=0.001,\n RCVD_IN_DNSWL_NONE=-0.0001, SPF_HELO_NONE=0.001,\n SPF_PASS=-0.001 autolearn=ham autolearn_force=no",
        "X-Spam_action": "no action",
        "X-BeenThere": "qemu-devel@nongnu.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "<qemu-devel.nongnu.org>",
        "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>",
        "List-Archive": "<https://lists.nongnu.org/archive/html/qemu-devel>",
        "List-Post": "<mailto:qemu-devel@nongnu.org>",
        "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>",
        "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=subscribe>",
        "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org",
        "Sender": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org"
    },
    "content": "Replace the functions replay_bh_schedule_event() and\nreplay_bh_schedule_oneshot_event() with qemu_bh_schedule_event() and\naio_bh_schedule_oneshot_event(), respectively.\n\nDirect API conversions use QEMU_CLOCK_VIRTUAL, because the virtual\nclocks then go via the replay path when replay is enabled.\n\nSigned-off-by: Nicholas Piggin <npiggin@gmail.com>\n---\n docs/devel/replay.rst   |  7 ++++---\n include/block/aio.h     | 35 +++++++++++++++++++++++++++++++----\n include/sysemu/replay.h |  2 +-\n backends/rng-builtin.c  |  2 +-\n block/block-backend.c   | 24 ++++++++++++++----------\n block/io.c              |  5 +++--\n block/iscsi.c           |  5 +++--\n block/nfs.c             | 10 ++++++----\n block/null.c            |  4 ++--\n block/nvme.c            |  8 +++++---\n hw/ide/core.c           |  7 ++++---\n hw/net/virtio-net.c     | 14 +++++++-------\n replay/replay-events.c  |  2 +-\n stubs/replay-tools.c    |  2 +-\n util/async.c            | 39 +++++++++++++++++++++++++++++++++++++--\n 15 files changed, 120 insertions(+), 46 deletions(-)",
    "diff": "diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst\nindex 40f58d9d4fc..ca816d87587 100644\n--- a/docs/devel/replay.rst\n+++ b/docs/devel/replay.rst\n@@ -171,9 +171,10 @@ Bottom halves\n -------------\n \n Bottom half callbacks, that affect the guest state, should be invoked through\n-``replay_bh_schedule_event`` or ``replay_bh_schedule_oneshot_event`` functions.\n-Their invocations are saved in record mode and synchronized with the existing\n-log in replay mode.\n+``qemu_bh_schedule_event`` or ``aio_bh_schedule_oneshot_event`` functions\n+the with the appropriate clock type (e.g., QEMU_CLOCK_VIRTUAL. Their\n+invocations are saved in record mode and synchronized with the existing log in\n+replay mode.\n \n Disk I/O events are completely deterministic in our model, because\n in both record and replay modes we start virtual machine from the same\ndiff --git a/include/block/aio.h b/include/block/aio.h\nindex 43883a8a33a..bc323b0d936 100644\n--- a/include/block/aio.h\n+++ b/include/block/aio.h\n@@ -286,17 +286,30 @@ void aio_context_unref(AioContext *ctx);\n  * @name: A human-readable identifier for debugging purposes.\n  */\n void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,\n-                                  const char *name);\n+                                  const char *name, QEMUClockType clock_type);\n \n /**\n- * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run\n- * only once and as soon as possible.\n+ * aio_bh_schedule_oneshot_event: Allocate a new bottom half structure that\n+ * will run only once and as soon as possible.\n  *\n  * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the\n  * name string.\n  */\n+#define aio_bh_schedule_oneshot_event(ctx, cb, opaque, clock_type) \\\n+    aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)), \\\n+                                 clock_type)\n+\n+/**\n+ * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run\n+ * only once and as soon as possible.\n+ *\n+ * A legacy wrapper for aio_bh_schedule_oneshot_event() that uses realtime\n+ * as the clock type. Callers should specify the clock time in order to be\n+ * compatible with record/replay.\n+ */\n #define aio_bh_schedule_oneshot(ctx, cb, opaque) \\\n-    aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))\n+    aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)), \\\n+                                 QEMU_CLOCK_REALTIME)\n \n /**\n  * aio_bh_new_full: Allocate a new bottom half structure.\n@@ -378,6 +391,20 @@ void aio_bh_call(QEMUBH *bh);\n  */\n int aio_bh_poll(AioContext *ctx);\n \n+/**\n+ * qemu_bh_schedule_event: Schedule a bottom half.\n+ *\n+ * Scheduling a bottom half interrupts the main loop and causes the\n+ * execution of the callback that was passed to qemu_bh_new.\n+ *\n+ * Bottom halves that are scheduled from a bottom half handler are instantly\n+ * invoked.  This can create an infinite loop if a bottom half handler\n+ * schedules itself.\n+ *\n+ * @bh: The bottom half to be scheduled.\n+ */\n+void qemu_bh_schedule_event(QEMUBH *bh, QEMUClockType clock_type);\n+\n /**\n  * qemu_bh_schedule: Schedule a bottom half.\n  *\ndiff --git a/include/sysemu/replay.h b/include/sysemu/replay.h\nindex cba74fa9bce..30a7ae47518 100644\n--- a/include/sysemu/replay.h\n+++ b/include/sysemu/replay.h\n@@ -126,7 +126,7 @@ void replay_flush_events(void);\n /*! Adds bottom half event to the queue */\n void replay_bh_schedule_event(QEMUBH *bh);\n /* Adds oneshot bottom half event to the queue */\n-void replay_bh_schedule_oneshot_event(AioContext *ctx,\n+void replay_bh_oneshot_event(AioContext *ctx,\n     QEMUBHFunc *cb, void *opaque);\n /*! Adds input event to the queue */\n void replay_input_event(QemuConsole *src, InputEvent *evt);\ndiff --git a/backends/rng-builtin.c b/backends/rng-builtin.c\nindex f367eb665cf..eef5c61f4f5 100644\n--- a/backends/rng-builtin.c\n+++ b/backends/rng-builtin.c\n@@ -38,7 +38,7 @@ static void rng_builtin_request_entropy(RngBackend *b, RngRequest *req)\n {\n     RngBuiltin *s = RNG_BUILTIN(b);\n \n-    replay_bh_schedule_event(s->bh);\n+    qemu_bh_schedule_event(s->bh, QEMU_CLOCK_VIRTUAL);\n }\n \n static void rng_builtin_init(Object *obj)\ndiff --git a/block/block-backend.c b/block/block-backend.c\nindex 85bcdedcef6..2168729340d 100644\n--- a/block/block-backend.c\n+++ b/block/block-backend.c\n@@ -1530,8 +1530,8 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,\n     acb->blk = blk;\n     acb->ret = ret;\n \n-    replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n-                                     error_callback_bh, acb);\n+    aio_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n+                                  error_callback_bh, acb, QEMU_CLOCK_VIRTUAL);\n     return &acb->common;\n }\n \n@@ -1588,8 +1588,9 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,\n \n     acb->has_returned = true;\n     if (acb->rwco.ret != NOT_DONE) {\n-        replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n-                                         blk_aio_complete_bh, acb);\n+        aio_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n+                                      blk_aio_complete_bh, acb,\n+                                      QEMU_CLOCK_VIRTUAL);\n     }\n \n     return &acb->common;\n@@ -1894,8 +1895,9 @@ BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,\n \n     acb->has_returned = true;\n     if (acb->rwco.ret != NOT_DONE) {\n-        replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n-                                         blk_aio_complete_bh, acb);\n+        aio_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n+                                      blk_aio_complete_bh, acb,\n+                                      QEMU_CLOCK_VIRTUAL);\n     }\n \n     return &acb->common;\n@@ -1935,8 +1937,9 @@ BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,\n \n     acb->has_returned = true;\n     if (acb->rwco.ret != NOT_DONE) {\n-        replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n-                                         blk_aio_complete_bh, acb);\n+        aio_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n+                                      blk_aio_complete_bh, acb,\n+                                      QEMU_CLOCK_VIRTUAL);\n     }\n \n     return &acb->common;\n@@ -1974,8 +1977,9 @@ BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,\n     aio_co_enter(qemu_get_current_aio_context(), co);\n     acb->has_returned = true;\n     if (acb->rwco.ret != NOT_DONE) {\n-        replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n-                                         blk_aio_complete_bh, acb);\n+        aio_bh_schedule_oneshot_event(qemu_get_current_aio_context(),\n+                                      blk_aio_complete_bh, acb,\n+                                      QEMU_CLOCK_VIRTUAL);\n     }\n \n     return &acb->common;\ndiff --git a/block/io.c b/block/io.c\nindex 301514c8808..fcce0710824 100644\n--- a/block/io.c\n+++ b/block/io.c\n@@ -335,8 +335,9 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,\n         bdrv_inc_in_flight(bs);\n     }\n \n-    replay_bh_schedule_oneshot_event(qemu_get_aio_context(),\n-                                     bdrv_co_drain_bh_cb, &data);\n+    aio_bh_schedule_oneshot_event(qemu_get_aio_context(),\n+                                  bdrv_co_drain_bh_cb, &data,\n+                                  QEMU_CLOCK_VIRTUAL);\n \n     qemu_coroutine_yield();\n     /* If we are resumed from some other event (such as an aio completion or a\ndiff --git a/block/iscsi.c b/block/iscsi.c\nindex 979bf90cb79..98ed86b9831 100644\n--- a/block/iscsi.c\n+++ b/block/iscsi.c\n@@ -285,8 +285,9 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,\n     }\n \n     if (iTask->co) {\n-        replay_bh_schedule_oneshot_event(iTask->iscsilun->aio_context,\n-                                         iscsi_co_generic_bh_cb, iTask);\n+        aio_bh_schedule_oneshot_event(iTask->iscsilun->aio_context,\n+                                      iscsi_co_generic_bh_cb, iTask,\n+                                      QEMU_CLOCK_VIRTUAL);\n     } else {\n         iTask->complete = 1;\n     }\ndiff --git a/block/nfs.c b/block/nfs.c\nindex 0500f60c08f..66d9df0d89b 100644\n--- a/block/nfs.c\n+++ b/block/nfs.c\n@@ -256,8 +256,9 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,\n     if (task->ret < 0) {\n         error_report(\"NFS Error: %s\", nfs_get_error(nfs));\n     }\n-    replay_bh_schedule_oneshot_event(task->client->aio_context,\n-                                     nfs_co_generic_bh_cb, task);\n+    aio_bh_schedule_oneshot_event(task->client->aio_context,\n+                                  nfs_co_generic_bh_cb, task,\n+                                  QEMU_CLOCK_VIRTUAL);\n }\n \n static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset,\n@@ -723,8 +724,9 @@ nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,\n     if (task->ret < 0) {\n         error_report(\"NFS Error: %s\", nfs_get_error(nfs));\n     }\n-    replay_bh_schedule_oneshot_event(task->client->aio_context,\n-                                     nfs_co_generic_bh_cb, task);\n+    aio_bh_schedule_oneshot_event(task->client->aio_context,\n+                                  nfs_co_generic_bh_cb, task,\n+                                  QEMU_CLOCK_VIRTUAL);\n }\n \n static int64_t coroutine_fn nfs_co_get_allocated_file_size(BlockDriverState *bs)\ndiff --git a/block/null.c b/block/null.c\nindex 4730acc1eb2..d35b42cbb44 100644\n--- a/block/null.c\n+++ b/block/null.c\n@@ -183,8 +183,8 @@ static inline BlockAIOCB *null_aio_common(BlockDriverState *bs,\n         timer_mod_ns(&acb->timer,\n                      qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + s->latency_ns);\n     } else {\n-        replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs),\n-                                         null_bh_cb, acb);\n+        aio_bh_schedule_oneshot_event(bdrv_get_aio_context(bs),\n+                                      null_bh_cb, acb, QEMU_CLOCK_VIRTUAL);\n     }\n     return &acb->common;\n }\ndiff --git a/block/nvme.c b/block/nvme.c\nindex 3b588b139f6..4069639dc7a 100644\n--- a/block/nvme.c\n+++ b/block/nvme.c\n@@ -344,8 +344,9 @@ static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)\n static void nvme_wake_free_req_locked(NVMeQueuePair *q)\n {\n     if (!qemu_co_queue_empty(&q->free_req_queue)) {\n-        replay_bh_schedule_oneshot_event(q->s->aio_context,\n-                nvme_free_req_queue_cb, q);\n+        aio_bh_schedule_oneshot_event(q->s->aio_context,\n+                                      nvme_free_req_queue_cb, q,\n+                                      QEMU_CLOCK_VIRTUAL);\n     }\n }\n \n@@ -1177,7 +1178,8 @@ static void nvme_rw_cb(void *opaque, int ret)\n         /* The rw coroutine hasn't yielded, don't try to enter. */\n         return;\n     }\n-    replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);\n+    aio_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data,\n+                                  QEMU_CLOCK_VIRTUAL);\n }\n \n static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,\ndiff --git a/hw/ide/core.c b/hw/ide/core.c\nindex 08d92184554..c527caf3d69 100644\n--- a/hw/ide/core.c\n+++ b/hw/ide/core.c\n@@ -516,7 +516,7 @@ static void ide_issue_trim_cb(void *opaque, int ret)\n done:\n     iocb->aiocb = NULL;\n     if (iocb->bh) {\n-        replay_bh_schedule_event(iocb->bh);\n+        qemu_bh_schedule_event(iocb->bh, QEMU_CLOCK_VIRTUAL);\n     }\n }\n \n@@ -2368,8 +2368,9 @@ void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)\n             s = &bus->ifs[i];\n             s->status |= BUSY_STAT;\n         }\n-        replay_bh_schedule_oneshot_event(qemu_get_aio_context(),\n-                                         ide_bus_perform_srst, bus);\n+        aio_bh_schedule_oneshot_event(qemu_get_aio_context(),\n+                                      ide_bus_perform_srst, bus,\n+                                      QEMU_CLOCK_VIRTUAL);\n     }\n \n     bus->cmd = val;\ndiff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c\nindex 4fd1f9accab..ecbf274cb04 100644\n--- a/hw/net/virtio-net.c\n+++ b/hw/net/virtio-net.c\n@@ -415,10 +415,10 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)\n \n         if (queue_started) {\n             if (q->tx_timer) {\n-                timer_mod(q->tx_timer,\n-                               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);\n+                timer_mod(q->tx_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +\n+                                       n->tx_timeout);\n             } else {\n-                replay_bh_schedule_event(q->tx_bh);\n+                qemu_bh_schedule_event(q->tx_bh, QEMU_CLOCK_VIRTUAL);\n             }\n         } else {\n             if (q->tx_timer) {\n@@ -2705,7 +2705,7 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)\n          */\n         virtio_queue_set_notification(q->tx_vq, 0);\n         if (q->tx_bh) {\n-            replay_bh_schedule_event(q->tx_bh);\n+            qemu_bh_schedule_event(q->tx_bh, QEMU_CLOCK_VIRTUAL);\n         } else {\n             timer_mod(q->tx_timer,\n                       qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);\n@@ -2871,7 +2871,7 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)\n         return;\n     }\n     virtio_queue_set_notification(vq, 0);\n-    replay_bh_schedule_event(q->tx_bh);\n+    qemu_bh_schedule_event(q->tx_bh, QEMU_CLOCK_VIRTUAL);\n }\n \n static void virtio_net_tx_timer(void *opaque)\n@@ -2954,7 +2954,7 @@ static void virtio_net_tx_bh(void *opaque)\n     /* If we flush a full burst of packets, assume there are\n      * more coming and immediately reschedule */\n     if (ret >= n->tx_burst) {\n-        replay_bh_schedule_event(q->tx_bh);\n+        qemu_bh_schedule_event(q->tx_bh, QEMU_CLOCK_VIRTUAL);\n         q->tx_waiting = 1;\n         return;\n     }\n@@ -2968,7 +2968,7 @@ static void virtio_net_tx_bh(void *opaque)\n         return;\n     } else if (ret > 0) {\n         virtio_queue_set_notification(q->tx_vq, 0);\n-        replay_bh_schedule_event(q->tx_bh);\n+        qemu_bh_schedule_event(q->tx_bh, QEMU_CLOCK_VIRTUAL);\n         q->tx_waiting = 1;\n     }\n }\ndiff --git a/replay/replay-events.c b/replay/replay-events.c\nindex 2e46eda6bf8..d4b095b2097 100644\n--- a/replay/replay-events.c\n+++ b/replay/replay-events.c\n@@ -131,7 +131,7 @@ void replay_bh_schedule_event(QEMUBH *bh)\n     }\n }\n \n-void replay_bh_schedule_oneshot_event(AioContext *ctx,\n+void replay_bh_oneshot_event(AioContext *ctx,\n     QEMUBHFunc *cb, void *opaque)\n {\n     if (events_enabled) {\ndiff --git a/stubs/replay-tools.c b/stubs/replay-tools.c\nindex 3e8ca3212d9..31985af35f7 100644\n--- a/stubs/replay-tools.c\n+++ b/stubs/replay-tools.c\n@@ -30,7 +30,7 @@ void replay_bh_schedule_event(QEMUBH *bh)\n     qemu_bh_schedule(bh);\n }\n \n-void replay_bh_schedule_oneshot_event(AioContext *ctx,\n+void replay_bh_oneshot_event(AioContext *ctx,\n      QEMUBHFunc *cb, void *opaque)\n {\n     aio_bh_schedule_oneshot(ctx, cb, opaque);\ndiff --git a/util/async.c b/util/async.c\nindex 99db28389f6..b88083ecbe7 100644\n--- a/util/async.c\n+++ b/util/async.c\n@@ -36,6 +36,7 @@\n #include \"qemu/coroutine_int.h\"\n #include \"qemu/coroutine-tls.h\"\n #include \"sysemu/cpu-timers.h\"\n+#include \"sysemu/replay.h\"\n #include \"trace.h\"\n \n /***********************************************************/\n@@ -126,8 +127,8 @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)\n     return bh;\n }\n \n-void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,\n-                                  void *opaque, const char *name)\n+static void do_aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,\n+                                            void *opaque, const char *name)\n {\n     QEMUBH *bh;\n     bh = g_new(QEMUBH, 1);\n@@ -140,6 +141,24 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,\n     aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);\n }\n \n+void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,\n+                                  void *opaque, const char *name,\n+                                  QEMUClockType clock_type)\n+{\n+    switch (clock_type) {\n+    case QEMU_CLOCK_VIRTUAL:\n+    case QEMU_CLOCK_VIRTUAL_RT:\n+        if (replay_mode != REPLAY_MODE_NONE) {\n+            /* Record/replay must intercept bh events */\n+            replay_bh_oneshot_event(ctx, cb, opaque);\n+            break;\n+        }\n+        /* fallthrough */\n+    default:\n+        do_aio_bh_schedule_oneshot_full(ctx, cb, opaque, name);\n+    }\n+}\n+\n QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,\n                         const char *name, MemReentrancyGuard *reentrancy_guard)\n {\n@@ -226,6 +245,22 @@ int aio_bh_poll(AioContext *ctx)\n     return ret;\n }\n \n+void qemu_bh_schedule_event(QEMUBH *bh, QEMUClockType clock_type)\n+{\n+    switch (clock_type) {\n+    case QEMU_CLOCK_VIRTUAL:\n+    case QEMU_CLOCK_VIRTUAL_RT:\n+        if (replay_mode != REPLAY_MODE_NONE) {\n+            /* Record/replay must intercept bh events */\n+            replay_bh_schedule_event(bh);\n+            break;\n+        }\n+        /* fallthrough */\n+    default:\n+        aio_bh_enqueue(bh, BH_SCHEDULED);\n+    }\n+}\n+\n void qemu_bh_schedule_idle(QEMUBH *bh)\n {\n     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);\n",
    "prefixes": [
        "03/17"
    ]
}