Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/2194219/?format=api
{ "id": 2194219, "url": "http://patchwork.ozlabs.org/api/patches/2194219/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20260207134728.1495932-1-lulu@redhat.com/", "project": { "id": 14, "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api", "name": "QEMU Development", "link_name": "qemu-devel", "list_id": "qemu-devel.nongnu.org", "list_email": "qemu-devel@nongnu.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20260207134728.1495932-1-lulu@redhat.com>", "list_archive_url": null, "date": "2026-02-07T13:47:17", "name": "[01/12] net: introduce read_poll/write_poll", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "1c81f8c848458c1d85cf791da865d344faebf0df", "submitter": { "id": 78960, "url": "http://patchwork.ozlabs.org/api/people/78960/?format=api", "name": "Cindy Lu", "email": "lulu@redhat.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20260207134728.1495932-1-lulu@redhat.com/mbox/", "series": [ { "id": 491379, "url": "http://patchwork.ozlabs.org/api/series/491379/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=491379", "date": "2026-02-07T13:47:17", "name": "[01/12] net: introduce read_poll/write_poll", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/491379/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/2194219/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/2194219/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@legolas.ozlabs.org", "Authentication-Results": [ "legolas.ozlabs.org;\n\tdkim=pass (1024-bit key;\n unprotected) header.d=redhat.com header.i=@redhat.com header.a=rsa-sha256\n header.s=mimecast20190719 header.b=AfMPSglm;\n\tdkim-atps=neutral", "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=nongnu.org\n (client-ip=209.51.188.17; helo=lists.gnu.org;\n envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n receiver=patchwork.ozlabs.org)" ], "Received": [ "from lists.gnu.org (lists.gnu.org [209.51.188.17])\n\t(using TLSv1.2 with cipher ECDHE-ECDSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4f7XNJ0qK2z1xtV\n\tfor <incoming@patchwork.ozlabs.org>; Sun, 08 Feb 2026 00:48:16 +1100 (AEDT)", "from localhost ([::1] helo=lists1p.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.90_1)\n\t(envelope-from <qemu-devel-bounces@nongnu.org>)\n\tid 1voifJ-00030H-FG; Sat, 07 Feb 2026 08:48:03 -0500", "from eggs.gnu.org ([2001:470:142:3::10])\n by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.90_1) (envelope-from <lulu@redhat.com>) id 1voifE-000305-0S\n for qemu-devel@nongnu.org; Sat, 07 Feb 2026 08:47:56 -0500", "from us-smtp-delivery-124.mimecast.com ([170.10.133.124])\n by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.90_1) (envelope-from <lulu@redhat.com>) id 1voif9-0003Ca-BT\n for qemu-devel@nongnu.org; Sat, 07 Feb 2026 08:47:53 -0500", "from mx-prod-mc-03.mail-002.prod.us-west-2.aws.redhat.com\n (ec2-54-186-198-63.us-west-2.compute.amazonaws.com [54.186.198.63]) by\n relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.3,\n cipher=TLS_AES_256_GCM_SHA384) id us-mta-60-0naedmn0ObaYECvfR7nX7g-1; Sat,\n 07 Feb 2026 08:47:47 -0500", "from mx-prod-int-06.mail-002.prod.us-west-2.aws.redhat.com\n (mx-prod-int-06.mail-002.prod.us-west-2.aws.redhat.com [10.30.177.93])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n (No client certificate requested)\n by mx-prod-mc-03.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTPS\n id DA49D1955F12; Sat, 7 Feb 2026 13:47:45 +0000 (UTC)", "from S2.redhat.com (unknown [10.72.112.15])\n by mx-prod-int-06.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTP\n id 8556718004AD; Sat, 7 Feb 2026 13:47:42 +0000 (UTC)" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1770472070;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:mime-version:mime-version:\n content-transfer-encoding:content-transfer-encoding;\n bh=+/kZUQ2TG1BpuGHqKkikBty9u1J/HcAWH6IWltMHcy4=;\n b=AfMPSglmmZTWbRo+GlAGQW+fI5TEsMYhSmJJPQxC9uMv3XxhlDhodxYeSDx1l5dPOQTuEg\n Xrjtunzd7WL9RMwD4/49ppxPJeYhx8l7QnbE6rZyhKwBbPn0uR6dGm3COeZXJga4lsiPSA\n prcjGDdKTHrRUb8FBDuuJIcxXD5imuM=", "X-MC-Unique": "0naedmn0ObaYECvfR7nX7g-1", "X-Mimecast-MFC-AGG-ID": "0naedmn0ObaYECvfR7nX7g_1770472066", "From": "Cindy Lu <lulu@redhat.com>", "To": "lulu@redhat.com, mst@redhat.com, jasowang@redhat.com, zhangckid@gmail.com,\n lizhijian@fujitsu.com, qemu-devel@nongnu.org", "Subject": "[PATCH 01/12] net: introduce read_poll/write_poll", "Date": "Sat, 7 Feb 2026 21:47:17 +0800", "Message-ID": "<20260207134728.1495932-1-lulu@redhat.com>", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "X-Scanned-By": "MIMEDefang 3.4.1 on 10.30.177.93", "Received-SPF": "pass client-ip=170.10.133.124; envelope-from=lulu@redhat.com;\n helo=us-smtp-delivery-124.mimecast.com", "X-Spam_score_int": "-20", "X-Spam_score": "-2.1", "X-Spam_bar": "--", "X-Spam_report": "(-2.1 / 5.0 requ) BAYES_00=-1.9, DKIMWL_WL_HIGH=-0.001,\n DKIM_SIGNED=0.1, DKIM_VALID=-0.1, DKIM_VALID_AU=-0.1, DKIM_VALID_EF=-0.1,\n RCVD_IN_DNSWL_NONE=-0.0001, RCVD_IN_MSPIKE_H2=0.001,\n RCVD_IN_VALIDITY_RPBL_BLOCKED=0.001, RCVD_IN_VALIDITY_SAFE_BLOCKED=0.001,\n SPF_HELO_PASS=-0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no", "X-Spam_action": "no action", "X-BeenThere": "qemu-devel@nongnu.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "qemu development <qemu-devel.nongnu.org>", "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>", "List-Archive": "<https://lists.nongnu.org/archive/html/qemu-devel>", "List-Post": "<mailto:qemu-devel@nongnu.org>", "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>", "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=subscribe>", "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org", "Sender": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org" }, "content": "From: Jason Wang <jasowang@redhat.com>\n\nThe existing NetClientInfo::poll() callback enables or disables both read\nand write polling together. However, some use cases (such as filter-redirector\nwith enable_when_stopped) need finer control to enable only read polling\nwhile the VM is stopped, without affecting write polling.\n\nThis patch adds separate read_poll() and write_poll() callbacks to\nNetClientInfo, allowing independent control of read and write fd handlers.\n\nChanges:\n- Add NetReadPoll and NetWritePoll typedefs to net/net.h\n- Add read_poll and write_poll fields to NetClientInfo\n- Rename internal functions (e.g., tap_read_poll -> tap_update_read_poll)\n to avoid name collision with the new callbacks\n- Implement read_poll/write_poll callbacks for tap, af-xdp, l2tpv3, and\n netmap backends\n\nThis infrastructure will be used by filter-redirector to selectively enable\nbackend read polling when the VM is stopped, allowing network traffic to be\ndrained into the netfilter chain during migration pre-switchover.\n\nSigned-off-by: Jason Wang <jasowang@redhat.com>\nSigned-off-by: Cindy Lu <lulu@redhat.com>\n---\n include/net/net.h | 4 ++++\n net/af-xdp.c | 32 +++++++++++++++++++++++---------\n net/l2tpv3.c | 38 ++++++++++++++++++++++++++------------\n net/netmap.c | 30 ++++++++++++++++++++++--------\n net/tap.c | 36 +++++++++++++++++++++++++-----------\n 5 files changed, 100 insertions(+), 40 deletions(-)", "diff": "diff --git a/include/net/net.h b/include/net/net.h\nindex 72b476ee1d..d2b2e6fc44 100644\n--- a/include/net/net.h\n+++ b/include/net/net.h\n@@ -55,6 +55,8 @@ typedef struct NetOffloads {\n /* Net clients */\n \n typedef void (NetPoll)(NetClientState *, bool enable);\n+typedef void (NetReadPoll)(NetClientState *, bool enable);\n+typedef void (NetWritePoll)(NetClientState *, bool enable);\n typedef bool (NetCanReceive)(NetClientState *);\n typedef int (NetStart)(NetClientState *);\n typedef int (NetLoad)(NetClientState *);\n@@ -96,6 +98,8 @@ typedef struct NetClientInfo {\n LinkStatusChanged *link_status_changed;\n QueryRxFilter *query_rx_filter;\n NetPoll *poll;\n+ NetReadPoll *read_poll;\n+ NetWritePoll *write_poll;\n HasUfo *has_ufo;\n HasUso *has_uso;\n HasTunnel *has_tunnel;\ndiff --git a/net/af-xdp.c b/net/af-xdp.c\nindex 14f302ea21..b6e50b698f 100644\n--- a/net/af-xdp.c\n+++ b/net/af-xdp.c\n@@ -72,7 +72,7 @@ static void af_xdp_update_fd_handler(AFXDPState *s)\n }\n \n /* Update the read handler. */\n-static void af_xdp_read_poll(AFXDPState *s, bool enable)\n+static void af_xdp_update_read_poll(AFXDPState *s, bool enable)\n {\n if (s->read_poll != enable) {\n s->read_poll = enable;\n@@ -81,7 +81,7 @@ static void af_xdp_read_poll(AFXDPState *s, bool enable)\n }\n \n /* Update the write handler. */\n-static void af_xdp_write_poll(AFXDPState *s, bool enable)\n+static void af_xdp_update_write_poll(AFXDPState *s, bool enable)\n {\n if (s->write_poll != enable) {\n s->write_poll = enable;\n@@ -100,6 +100,18 @@ static void af_xdp_poll(NetClientState *nc, bool enable)\n }\n }\n \n+static void af_xdp_read_poll(NetClientState *nc, bool enable)\n+{\n+ AFXDPState *s = DO_UPCAST(AFXDPState, nc, nc);\n+ af_xdp_update_read_poll(s, enable);\n+}\n+\n+static void af_xdp_write_poll(NetClientState *nc, bool enable)\n+{\n+ AFXDPState *s = DO_UPCAST(AFXDPState, nc, nc);\n+ af_xdp_update_write_poll(s, enable);\n+}\n+\n static void af_xdp_complete_tx(AFXDPState *s)\n {\n uint32_t idx = 0;\n@@ -135,7 +147,7 @@ static void af_xdp_writable(void *opaque)\n * and kernel needs a wake up.\n */\n if (!s->outstanding_tx || !xsk_ring_prod__needs_wakeup(&s->tx)) {\n- af_xdp_write_poll(s, false);\n+ af_xdp_update_write_poll(s, false);\n }\n \n /* Flush any buffered packets. */\n@@ -163,7 +175,7 @@ static ssize_t af_xdp_receive(NetClientState *nc,\n * Out of buffers or space in tx ring. Poll until we can write.\n * This will also kick the Tx, if it was waiting on CQ.\n */\n- af_xdp_write_poll(s, true);\n+ af_xdp_update_write_poll(s, true);\n return 0;\n }\n \n@@ -178,7 +190,7 @@ static ssize_t af_xdp_receive(NetClientState *nc,\n s->outstanding_tx++;\n \n if (xsk_ring_prod__needs_wakeup(&s->tx)) {\n- af_xdp_write_poll(s, true);\n+ af_xdp_update_write_poll(s, true);\n }\n \n return size;\n@@ -192,7 +204,7 @@ static void af_xdp_send_completed(NetClientState *nc, ssize_t len)\n {\n AFXDPState *s = DO_UPCAST(AFXDPState, nc, nc);\n \n- af_xdp_read_poll(s, true);\n+ af_xdp_update_read_poll(s, true);\n }\n \n static void af_xdp_fq_refill(AFXDPState *s, uint32_t n)\n@@ -215,7 +227,7 @@ static void af_xdp_fq_refill(AFXDPState *s, uint32_t n)\n \n if (xsk_ring_prod__needs_wakeup(&s->fq)) {\n /* Receive was blocked by not having enough buffers. Wake it up. */\n- af_xdp_read_poll(s, true);\n+ af_xdp_update_read_poll(s, true);\n }\n }\n \n@@ -246,7 +258,7 @@ static void af_xdp_send(void *opaque)\n * The peer does not receive anymore. Packet is queued, stop\n * reading from the backend until af_xdp_send_completed().\n */\n- af_xdp_read_poll(s, false);\n+ af_xdp_update_read_poll(s, false);\n \n /* Return unused descriptors to not break the ring cache. */\n xsk_ring_cons__cancel(&s->rx, n_rx - i - 1);\n@@ -438,6 +450,8 @@ static NetClientInfo net_af_xdp_info = {\n .size = sizeof(AFXDPState),\n .receive = af_xdp_receive,\n .poll = af_xdp_poll,\n+ .read_poll = af_xdp_read_poll,\n+ .write_poll = af_xdp_write_poll,\n .cleanup = af_xdp_cleanup,\n };\n \n@@ -571,7 +585,7 @@ int net_init_af_xdp(const Netdev *netdev,\n }\n }\n \n- af_xdp_read_poll(s, true); /* Initially only poll for reads. */\n+ af_xdp_update_read_poll(s, true); /* Initially only poll for reads. */\n \n return 0;\n \ndiff --git a/net/l2tpv3.c b/net/l2tpv3.c\nindex cdfc641aa6..9f24982a94 100644\n--- a/net/l2tpv3.c\n+++ b/net/l2tpv3.c\n@@ -143,7 +143,7 @@ static void l2tpv3_update_fd_handler(NetL2TPV3State *s)\n s);\n }\n \n-static void l2tpv3_read_poll(NetL2TPV3State *s, bool enable)\n+static void l2tpv3_update_read_poll(NetL2TPV3State *s, bool enable)\n {\n if (s->read_poll != enable) {\n s->read_poll = enable;\n@@ -151,7 +151,7 @@ static void l2tpv3_read_poll(NetL2TPV3State *s, bool enable)\n }\n }\n \n-static void l2tpv3_write_poll(NetL2TPV3State *s, bool enable)\n+static void l2tpv3_update_write_poll(NetL2TPV3State *s, bool enable)\n {\n if (s->write_poll != enable) {\n s->write_poll = enable;\n@@ -162,21 +162,33 @@ static void l2tpv3_write_poll(NetL2TPV3State *s, bool enable)\n static void l2tpv3_writable(void *opaque)\n {\n NetL2TPV3State *s = opaque;\n- l2tpv3_write_poll(s, false);\n+ l2tpv3_update_write_poll(s, false);\n qemu_flush_queued_packets(&s->nc);\n }\n \n static void l2tpv3_send_completed(NetClientState *nc, ssize_t len)\n {\n NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);\n- l2tpv3_read_poll(s, true);\n+ l2tpv3_update_read_poll(s, true);\n }\n \n static void l2tpv3_poll(NetClientState *nc, bool enable)\n {\n NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);\n- l2tpv3_write_poll(s, enable);\n- l2tpv3_read_poll(s, enable);\n+ l2tpv3_update_write_poll(s, enable);\n+ l2tpv3_update_read_poll(s, enable);\n+}\n+\n+static void l2tpv3_read_poll(NetClientState *nc, bool enable)\n+{\n+ NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);\n+ l2tpv3_update_read_poll(s, enable);\n+}\n+\n+static void l2tpv3_write_poll(NetClientState *nc, bool enable)\n+{\n+ NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);\n+ l2tpv3_update_write_poll(s, enable);\n }\n \n static void l2tpv3_form_header(NetL2TPV3State *s)\n@@ -252,7 +264,7 @@ static ssize_t net_l2tpv3_receive_dgram_iov(NetClientState *nc,\n /* signal upper layer that socket buffer is full */\n ret = -errno;\n if (ret == -EAGAIN || ret == -ENOBUFS) {\n- l2tpv3_write_poll(s, true);\n+ l2tpv3_update_write_poll(s, true);\n ret = 0;\n }\n }\n@@ -295,7 +307,7 @@ static ssize_t net_l2tpv3_receive_dgram(NetClientState *nc,\n ret = -errno;\n if (ret == -EAGAIN || ret == -ENOBUFS) {\n /* signal upper layer that socket buffer is full */\n- l2tpv3_write_poll(s, true);\n+ l2tpv3_update_write_poll(s, true);\n ret = 0;\n }\n }\n@@ -369,7 +381,7 @@ static void net_l2tpv3_process_queue(NetL2TPV3State *s)\n l2tpv3_send_completed\n );\n if (size == 0) {\n- l2tpv3_read_poll(s, false);\n+ l2tpv3_update_read_poll(s, false);\n }\n bad_read = false;\n } else {\n@@ -497,8 +509,8 @@ static void net_l2tpv3_cleanup(NetClientState *nc)\n {\n NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);\n qemu_purge_queued_packets(nc);\n- l2tpv3_read_poll(s, false);\n- l2tpv3_write_poll(s, false);\n+ l2tpv3_update_read_poll(s, false);\n+ l2tpv3_update_write_poll(s, false);\n if (s->fd >= 0) {\n close(s->fd);\n }\n@@ -514,6 +526,8 @@ static NetClientInfo net_l2tpv3_info = {\n .receive = net_l2tpv3_receive_dgram,\n .receive_iov = net_l2tpv3_receive_dgram_iov,\n .poll = l2tpv3_poll,\n+ .read_poll = l2tpv3_read_poll,\n+ .write_poll = l2tpv3_write_poll,\n .cleanup = net_l2tpv3_cleanup,\n };\n \n@@ -715,7 +729,7 @@ int net_init_l2tpv3(const Netdev *netdev,\n s->fd = fd;\n s->counter = 0;\n \n- l2tpv3_read_poll(s, true);\n+ l2tpv3_update_read_poll(s, true);\n \n qemu_set_info_str(&s->nc, \"l2tpv3: connected\");\n return 0;\ndiff --git a/net/netmap.c b/net/netmap.c\nindex 6cd8f2bdc5..2e2a95a256 100644\n--- a/net/netmap.c\n+++ b/net/netmap.c\n@@ -113,7 +113,7 @@ static void netmap_update_fd_handler(NetmapState *s)\n }\n \n /* Update the read handler. */\n-static void netmap_read_poll(NetmapState *s, bool enable)\n+static void netmap_update_read_poll(NetmapState *s, bool enable)\n {\n if (s->read_poll != enable) { /* Do nothing if not changed. */\n s->read_poll = enable;\n@@ -122,7 +122,7 @@ static void netmap_read_poll(NetmapState *s, bool enable)\n }\n \n /* Update the write handler. */\n-static void netmap_write_poll(NetmapState *s, bool enable)\n+static void netmap_update_write_poll(NetmapState *s, bool enable)\n {\n if (s->write_poll != enable) {\n s->write_poll = enable;\n@@ -141,6 +141,18 @@ static void netmap_poll(NetClientState *nc, bool enable)\n }\n }\n \n+static void netmap_read_poll(NetClientState *nc, bool enable)\n+{\n+ NetmapState *s = DO_UPCAST(NetmapState, nc, nc);\n+ netmap_update_read_poll(s, enable);\n+}\n+\n+static void netmap_write_poll(NetClientState *nc, bool enable)\n+{\n+ NetmapState *s = DO_UPCAST(NetmapState, nc, nc);\n+ netmap_update_write_poll(s, enable);\n+}\n+\n /*\n * The fd_write() callback, invoked if the fd is marked as\n * writable after a poll. Unregister the handler and flush any\n@@ -150,7 +162,7 @@ static void netmap_writable(void *opaque)\n {\n NetmapState *s = opaque;\n \n- netmap_write_poll(s, false);\n+ netmap_update_write_poll(s, false);\n qemu_flush_queued_packets(&s->nc);\n }\n \n@@ -175,7 +187,7 @@ static ssize_t netmap_receive_iov(NetClientState *nc,\n * ones), but without publishing any new slots to be processed\n * (e.g., we don't advance ring->head). */\n ring->cur = tail;\n- netmap_write_poll(s, true);\n+ netmap_update_write_poll(s, true);\n return 0;\n }\n \n@@ -195,7 +207,7 @@ static ssize_t netmap_receive_iov(NetClientState *nc,\n /* We ran out of netmap slots while splitting the\n iovec fragments. */\n ring->cur = tail;\n- netmap_write_poll(s, true);\n+ netmap_update_write_poll(s, true);\n return 0;\n }\n \n@@ -242,7 +254,7 @@ static void netmap_send_completed(NetClientState *nc, ssize_t len)\n {\n NetmapState *s = DO_UPCAST(NetmapState, nc, nc);\n \n- netmap_read_poll(s, true);\n+ netmap_update_read_poll(s, true);\n }\n \n static void netmap_send(void *opaque)\n@@ -289,7 +301,7 @@ static void netmap_send(void *opaque)\n if (iovsize == 0) {\n /* The peer does not receive anymore. Packet is queued, stop\n * reading from the backend until netmap_send_completed(). */\n- netmap_read_poll(s, false);\n+ netmap_update_read_poll(s, false);\n break;\n }\n }\n@@ -384,6 +396,8 @@ static NetClientInfo net_netmap_info = {\n .receive = netmap_receive,\n .receive_iov = netmap_receive_iov,\n .poll = netmap_poll,\n+ .read_poll = netmap_read_poll,\n+ .write_poll = netmap_write_poll,\n .cleanup = netmap_cleanup,\n .has_ufo = netmap_has_vnet_hdr,\n .has_vnet_hdr = netmap_has_vnet_hdr,\n@@ -418,7 +432,7 @@ int net_init_netmap(const Netdev *netdev,\n s->rx = NETMAP_RXRING(nmd->nifp, 0);\n s->vnet_hdr_len = 0;\n pstrcpy(s->ifname, sizeof(s->ifname), netmap_opts->ifname);\n- netmap_read_poll(s, true); /* Initially only poll for reads. */\n+ netmap_update_read_poll(s, true); /* Initially only poll for reads. */\n \n return 0;\n }\ndiff --git a/net/tap.c b/net/tap.c\nindex abe3b2d036..c2e7e4d1d8 100644\n--- a/net/tap.c\n+++ b/net/tap.c\n@@ -99,13 +99,13 @@ static void tap_update_fd_handler(TAPState *s)\n s);\n }\n \n-static void tap_read_poll(TAPState *s, bool enable)\n+static void tap_update_read_poll(TAPState *s, bool enable)\n {\n s->read_poll = enable;\n tap_update_fd_handler(s);\n }\n \n-static void tap_write_poll(TAPState *s, bool enable)\n+static void tap_update_write_poll(TAPState *s, bool enable)\n {\n s->write_poll = enable;\n tap_update_fd_handler(s);\n@@ -115,7 +115,7 @@ static void tap_writable(void *opaque)\n {\n TAPState *s = opaque;\n \n- tap_write_poll(s, false);\n+ tap_update_write_poll(s, false);\n \n qemu_flush_queued_packets(&s->nc);\n }\n@@ -127,7 +127,7 @@ static ssize_t tap_write_packet(TAPState *s, const struct iovec *iov, int iovcnt\n len = RETRY_ON_EINTR(writev(s->fd, iov, iovcnt));\n \n if (len == -1 && errno == EAGAIN) {\n- tap_write_poll(s, true);\n+ tap_update_write_poll(s, true);\n return 0;\n }\n \n@@ -174,7 +174,7 @@ ssize_t tap_read_packet(int tapfd, uint8_t *buf, int maxlen)\n static void tap_send_completed(NetClientState *nc, ssize_t len)\n {\n TAPState *s = DO_UPCAST(TAPState, nc, nc);\n- tap_read_poll(s, true);\n+ tap_update_read_poll(s, true);\n }\n \n static void tap_send(void *opaque)\n@@ -212,7 +212,7 @@ static void tap_send(void *opaque)\n \n size = qemu_send_packet_async(&s->nc, buf, size, tap_send_completed);\n if (size == 0) {\n- tap_read_poll(s, false);\n+ tap_update_read_poll(s, false);\n break;\n } else if (size < 0) {\n break;\n@@ -334,8 +334,8 @@ static void tap_cleanup(NetClientState *nc)\n tap_exit_notify(&s->exit, NULL);\n qemu_remove_exit_notifier(&s->exit);\n \n- tap_read_poll(s, false);\n- tap_write_poll(s, false);\n+ tap_update_read_poll(s, false);\n+ tap_update_write_poll(s, false);\n close(s->fd);\n s->fd = -1;\n }\n@@ -343,8 +343,20 @@ static void tap_cleanup(NetClientState *nc)\n static void tap_poll(NetClientState *nc, bool enable)\n {\n TAPState *s = DO_UPCAST(TAPState, nc, nc);\n- tap_read_poll(s, enable);\n- tap_write_poll(s, enable);\n+ tap_update_read_poll(s, enable);\n+ tap_update_write_poll(s, enable);\n+}\n+\n+static void tap_read_poll(NetClientState *nc, bool enable)\n+{\n+ TAPState *s = DO_UPCAST(TAPState, nc, nc);\n+ tap_update_read_poll(s, enable);\n+}\n+\n+static void tap_write_poll(NetClientState *nc, bool enable)\n+{\n+ TAPState *s = DO_UPCAST(TAPState, nc, nc);\n+ tap_update_write_poll(s, enable);\n }\n \n static bool tap_set_steering_ebpf(NetClientState *nc, int prog_fd)\n@@ -382,6 +394,8 @@ static NetClientInfo net_tap_info = {\n .receive = tap_receive,\n .receive_iov = tap_receive_iov,\n .poll = tap_poll,\n+ .read_poll = tap_read_poll,\n+ .write_poll = tap_write_poll,\n .cleanup = tap_cleanup,\n .has_ufo = tap_has_ufo,\n .has_uso = tap_has_uso,\n@@ -425,7 +439,7 @@ static TAPState *net_tap_fd_init(NetClientState *peer,\n if (vnet_hdr) {\n tap_fd_set_vnet_hdr_len(s->fd, s->host_vnet_hdr_len);\n }\n- tap_read_poll(s, true);\n+ tap_update_read_poll(s, true);\n s->vhost_net = NULL;\n \n s->exit.notify = tap_exit_notify;\n", "prefixes": [ "01/12" ] }