Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/2194964/?format=api
{ "id": 2194964, "url": "http://patchwork.ozlabs.org/api/patches/2194964/?format=api", "web_url": "http://patchwork.ozlabs.org/project/openvswitch/patch/e3c7ac7fe2cc772bfde5a13dd180e79aee385f99.1770716110.git.echaudro@redhat.com/", "project": { "id": 47, "url": "http://patchwork.ozlabs.org/api/projects/47/?format=api", "name": "Open vSwitch", "link_name": "openvswitch", "list_id": "ovs-dev.openvswitch.org", "list_email": "ovs-dev@openvswitch.org", "web_url": "http://openvswitch.org/", "scm_url": "git@github.com:openvswitch/ovs.git", "webscm_url": "https://github.com/openvswitch/ovs", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<e3c7ac7fe2cc772bfde5a13dd180e79aee385f99.1770716110.git.echaudro@redhat.com>", "list_archive_url": null, "date": "2026-02-10T09:56:08", "name": "[ovs-dev,1/2] dpif-offload: Add infrastructure for offload provider PMD helpers.", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "84e32a036e0dc1c63aa9d5c36bb87be84ebd5dd7", "submitter": { "id": 70613, "url": "http://patchwork.ozlabs.org/api/people/70613/?format=api", "name": "Eelco Chaudron", "email": "echaudro@redhat.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/openvswitch/patch/e3c7ac7fe2cc772bfde5a13dd180e79aee385f99.1770716110.git.echaudro@redhat.com/mbox/", "series": [ { "id": 491639, "url": "http://patchwork.ozlabs.org/api/series/491639/?format=api", "web_url": "http://patchwork.ozlabs.org/project/openvswitch/list/?series=491639", "date": "2026-02-10T09:56:07", "name": "dpif-offload: Add PMD thread helpers and hardware offload simulation", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/491639/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/2194964/comments/", "check": "success", "checks": "http://patchwork.ozlabs.org/api/patches/2194964/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<ovs-dev-bounces@openvswitch.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "dev@openvswitch.org" ], "Delivered-To": [ "patchwork-incoming@legolas.ozlabs.org", "ovs-dev@lists.linuxfoundation.org" ], "Authentication-Results": [ "legolas.ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (1024-bit key;\n unprotected) header.d=redhat.com header.i=@redhat.com header.a=rsa-sha256\n header.s=mimecast20190719 header.b=bQe6QXrl;\n\tdkim-atps=neutral", "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org\n (client-ip=140.211.166.136; helo=smtp3.osuosl.org;\n envelope-from=ovs-dev-bounces@openvswitch.org; receiver=patchwork.ozlabs.org)", "smtp3.osuosl.org;\n\tdkim=fail reason=\"signature verification failed\" (1024-bit key)\n header.d=redhat.com header.i=@redhat.com header.a=rsa-sha256\n header.s=mimecast20190719 header.b=bQe6QXrl", "smtp2.osuosl.org; dmarc=pass (p=quarantine dis=none)\n header.from=redhat.com", "smtp2.osuosl.org;\n dkim=pass (1024-bit key) header.d=redhat.com header.i=@redhat.com\n header.a=rsa-sha256 header.s=mimecast20190719 header.b=bQe6QXrl" ], "Received": [ "from smtp3.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4f9H5X4G6Tz1xtV\n\tfor <incoming@patchwork.ozlabs.org>; Tue, 10 Feb 2026 20:56:32 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby smtp3.osuosl.org (Postfix) with ESMTP id 95B92612BB;\n\tTue, 10 Feb 2026 09:56:28 +0000 (UTC)", "from smtp3.osuosl.org ([127.0.0.1])\n by localhost (smtp3.osuosl.org [127.0.0.1]) (amavis, port 10024) with ESMTP\n id Yh36YH_2HWhJ; Tue, 10 Feb 2026 09:56:26 +0000 (UTC)", "from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])\n\tby smtp3.osuosl.org (Postfix) with ESMTPS id 99789610CC;\n\tTue, 10 Feb 2026 09:56:26 +0000 (UTC)", "from lf-lists.osuosl.org (localhost [127.0.0.1])\n\tby lists.linuxfoundation.org (Postfix) with ESMTP id 738DAC077F;\n\tTue, 10 Feb 2026 09:56:26 +0000 (UTC)", "from smtp2.osuosl.org (smtp2.osuosl.org [IPv6:2605:bc80:3010::133])\n by lists.linuxfoundation.org (Postfix) with ESMTP id 20326C077F\n for <dev@openvswitch.org>; Tue, 10 Feb 2026 09:56:25 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n by smtp2.osuosl.org (Postfix) with ESMTP id EBDA740701\n for <dev@openvswitch.org>; Tue, 10 Feb 2026 09:56:24 +0000 (UTC)", "from smtp2.osuosl.org ([127.0.0.1])\n by localhost (smtp2.osuosl.org [127.0.0.1]) (amavis, port 10024) with ESMTP\n id ngkTbOieI96y for <dev@openvswitch.org>;\n Tue, 10 Feb 2026 09:56:23 +0000 (UTC)", "from us-smtp-delivery-124.mimecast.com\n (us-smtp-delivery-124.mimecast.com [170.10.129.124])\n by smtp2.osuosl.org (Postfix) with ESMTPS id 8E2A7402A8\n for <dev@openvswitch.org>; Tue, 10 Feb 2026 09:56:23 +0000 (UTC)", "from mx-prod-mc-06.mail-002.prod.us-west-2.aws.redhat.com\n (ec2-35-165-154-97.us-west-2.compute.amazonaws.com [35.165.154.97]) by\n relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.3,\n cipher=TLS_AES_256_GCM_SHA384) id us-mta-589-mnL22G1uO9WE2vlRAn_TsA-1; Tue,\n 10 Feb 2026 04:56:18 -0500", "from mx-prod-int-08.mail-002.prod.us-west-2.aws.redhat.com\n (mx-prod-int-08.mail-002.prod.us-west-2.aws.redhat.com [10.30.177.111])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n (No client certificate requested)\n by mx-prod-mc-06.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTPS\n id 1FFE51800365; Tue, 10 Feb 2026 09:56:17 +0000 (UTC)", "from ebuild.chome (unknown [10.44.34.212])\n by mx-prod-int-08.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTP\n id A741D18003F5; Tue, 10 Feb 2026 09:56:15 +0000 (UTC)" ], "X-Virus-Scanned": [ "amavis at osuosl.org", "amavis at osuosl.org" ], "X-Comment": "SPF check N/A for local connections - client-ip=140.211.9.56;\n helo=lists.linuxfoundation.org;\n envelope-from=ovs-dev-bounces@openvswitch.org; receiver=<UNKNOWN> ", "DKIM-Filter": [ "OpenDKIM Filter v2.11.0 smtp3.osuosl.org 99789610CC", "OpenDKIM Filter v2.11.0 smtp2.osuosl.org 8E2A7402A8" ], "Received-SPF": "Pass (mailfrom) identity=mailfrom; client-ip=170.10.129.124;\n helo=us-smtp-delivery-124.mimecast.com; envelope-from=echaudro@redhat.com;\n receiver=<UNKNOWN>", "DMARC-Filter": "OpenDMARC Filter v1.4.2 smtp2.osuosl.org 8E2A7402A8", "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1770717382;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:cc:mime-version:mime-version:content-type:content-type:\n content-transfer-encoding:content-transfer-encoding:\n in-reply-to:in-reply-to:references:references;\n bh=E688PVV5px8eVO1LLtZJ0lPEstxo8lTK34jys3mjWWQ=;\n b=bQe6QXrl/fqf9MMstzWUbNqyeJ9XB6z5p6X3Tqfzu2dpFVElplykRlRqzLbfEdemBy+xlS\n Ahc6Trb/B5BbxoNnxBkckGQ+IrHr25XyOrkc5ISn4uXfkM+VYgZUnRnAgkWb2yyOIUKHbv\n sape2VX1vdmahTbWGWyc7K6HSi2vInQ=", "X-MC-Unique": "mnL22G1uO9WE2vlRAn_TsA-1", "X-Mimecast-MFC-AGG-ID": "mnL22G1uO9WE2vlRAn_TsA_1770717377", "To": "dev@openvswitch.org", "Date": "Tue, 10 Feb 2026 10:56:08 +0100", "Message-ID": "\n <e3c7ac7fe2cc772bfde5a13dd180e79aee385f99.1770716110.git.echaudro@redhat.com>", "In-Reply-To": "<cover.1770716110.git.echaudro@redhat.com>", "References": "<cover.1770716110.git.echaudro@redhat.com>", "MIME-Version": "1.0", "X-Scanned-By": "MIMEDefang 3.4.1 on 10.30.177.111", "X-Mimecast-Spam-Score": "0", "X-Mimecast-MFC-PROC-ID": "7CUTGAJAukAYigb6J4KTLmd14TBEfn_vz-vydxk2EU4_1770717377", "X-Mimecast-Originator": "redhat.com", "Subject": "[ovs-dev] [PATCH 1/2] dpif-offload: Add infrastructure for offload\n provider PMD helpers.", "X-BeenThere": "ovs-dev@openvswitch.org", "X-Mailman-Version": "2.1.30", "Precedence": "list", "List-Id": "<ovs-dev.openvswitch.org>", "List-Unsubscribe": "<https://mail.openvswitch.org/mailman/options/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=unsubscribe>", "List-Archive": "<http://mail.openvswitch.org/pipermail/ovs-dev/>", "List-Post": "<mailto:ovs-dev@openvswitch.org>", "List-Help": "<mailto:ovs-dev-request@openvswitch.org?subject=help>", "List-Subscribe": "<https://mail.openvswitch.org/mailman/listinfo/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=subscribe>", "From": "Eelco Chaudron via dev <ovs-dev@openvswitch.org>", "Reply-To": "Eelco Chaudron <echaudro@redhat.com>", "Cc": "elibr@nvidia.com", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "ovs-dev-bounces@openvswitch.org", "Sender": "\"dev\" <ovs-dev-bounces@openvswitch.org>" }, "content": "This patch adds support for specific PMD thread initialization,\ndeinitialization, and a callback execution to perform work as\npart of the PMD thread loop. This allows hardware offload\nproviders to handle any specific asynchronous or batching work.\n\nThis patch also adds cycle statistics for the provider-specific\ncallbacks to the 'ovs-appctl dpif-netdev/pmd-perf-show' command.\n\nSigned-off-by: Eelco Chaudron <echaudro@redhat.com>\n---\n lib/dpif-netdev-perf.c | 19 ++++-\n lib/dpif-netdev-perf.h | 3 +-\n lib/dpif-netdev.c | 42 ++++++++++-\n lib/dpif-offload-dummy.c | 38 ++++++++++\n lib/dpif-offload-provider.h | 26 +++++++\n lib/dpif-offload.c | 133 ++++++++++++++++++++++++++++++++++\n lib/dpif-offload.h | 11 +++\n tests/pmd.at | 32 ++++++++\n utilities/checkpatch_dict.txt | 2 +\n 9 files changed, 298 insertions(+), 8 deletions(-)", "diff": "diff --git a/lib/dpif-netdev-perf.c b/lib/dpif-netdev-perf.c\nindex 1cd4ee0842..39465ba819 100644\n--- a/lib/dpif-netdev-perf.c\n+++ b/lib/dpif-netdev-perf.c\n@@ -232,6 +232,7 @@ pmd_perf_format_overall_stats(struct ds *str, struct pmd_perf_stats *s,\n uint64_t busy_iter = tot_iter >= idle_iter ? tot_iter - idle_iter : 0;\n uint64_t sleep_iter = stats[PMD_SLEEP_ITER];\n uint64_t tot_sleep_cycles = stats[PMD_CYCLES_SLEEP];\n+ uint64_t offload_cycles = stats[PMD_CYCLES_OFFLOAD];\n \n ds_put_format(str,\n \" Iterations: %12\"PRIu64\" (%.2f us/it)\\n\"\n@@ -242,7 +243,8 @@ pmd_perf_format_overall_stats(struct ds *str, struct pmd_perf_stats *s,\n \" Sleep time (us): %12.0f (%3.0f us/iteration avg.)\\n\",\n tot_iter,\n tot_iter\n- ? (tot_cycles + tot_sleep_cycles) * us_per_cycle / tot_iter\n+ ? (tot_cycles + tot_sleep_cycles + offload_cycles)\n+ * us_per_cycle / tot_iter\n : 0,\n tot_cycles, 100.0 * (tot_cycles / duration) / tsc_hz,\n idle_iter,\n@@ -252,6 +254,13 @@ pmd_perf_format_overall_stats(struct ds *str, struct pmd_perf_stats *s,\n sleep_iter, tot_iter ? 100.0 * sleep_iter / tot_iter : 0,\n tot_sleep_cycles * us_per_cycle,\n sleep_iter ? (tot_sleep_cycles * us_per_cycle) / sleep_iter : 0);\n+ if (offload_cycles > 0) {\n+ ds_put_format(str,\n+ \" Offload cycles: %12\" PRIu64 \" (%5.1f %% of used cycles)\\n\",\n+ offload_cycles,\n+ 100.0 * offload_cycles / (tot_cycles + tot_sleep_cycles\n+ + offload_cycles));\n+ }\n if (rx_packets > 0) {\n ds_put_format(str,\n \" Rx packets: %12\"PRIu64\" (%.0f Kpps, %.0f cycles/pkt)\\n\"\n@@ -532,14 +541,14 @@ OVS_REQUIRES(s->stats_mutex)\n void\n pmd_perf_end_iteration(struct pmd_perf_stats *s, int rx_packets,\n int tx_packets, uint64_t sleep_cycles,\n- bool full_metrics)\n+ uint64_t offload_cycles, bool full_metrics)\n {\n uint64_t now_tsc = cycles_counter_update(s);\n struct iter_stats *cum_ms;\n uint64_t cycles, cycles_per_pkt = 0;\n char *reason = NULL;\n \n- cycles = now_tsc - s->start_tsc - sleep_cycles;\n+ cycles = now_tsc - s->start_tsc - sleep_cycles - offload_cycles;\n s->current.timestamp = s->iteration_cnt;\n s->current.cycles = cycles;\n s->current.pkts = rx_packets;\n@@ -558,6 +567,10 @@ pmd_perf_end_iteration(struct pmd_perf_stats *s, int rx_packets,\n pmd_perf_update_counter(s, PMD_CYCLES_SLEEP, sleep_cycles);\n }\n \n+ if (offload_cycles) {\n+ pmd_perf_update_counter(s, PMD_CYCLES_OFFLOAD, offload_cycles);\n+ }\n+\n if (!full_metrics) {\n return;\n }\ndiff --git a/lib/dpif-netdev-perf.h b/lib/dpif-netdev-perf.h\nindex 84beced151..2a055dacdd 100644\n--- a/lib/dpif-netdev-perf.h\n+++ b/lib/dpif-netdev-perf.h\n@@ -82,6 +82,7 @@ enum pmd_stat_type {\n PMD_CYCLES_UPCALL, /* Cycles spent processing upcalls. */\n PMD_SLEEP_ITER, /* Iterations where a sleep has taken place. */\n PMD_CYCLES_SLEEP, /* Total cycles slept to save power. */\n+ PMD_CYCLES_OFFLOAD, /* Total cycles spend handling offload. */\n PMD_N_STATS\n };\n \n@@ -411,7 +412,7 @@ pmd_perf_start_iteration(struct pmd_perf_stats *s);\n void\n pmd_perf_end_iteration(struct pmd_perf_stats *s, int rx_packets,\n int tx_packets, uint64_t sleep_cycles,\n- bool full_metrics);\n+ uint64_t offload_cycles, bool full_metrics);\n \n /* Formatting the output of commands. */\n \ndiff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c\nindex 9df05c4c28..64531a02c0 100644\n--- a/lib/dpif-netdev.c\n+++ b/lib/dpif-netdev.c\n@@ -329,6 +329,9 @@ struct dp_netdev {\n uint64_t last_reconfigure_seq;\n struct ovsthread_once once_set_config;\n \n+ /* When a reconfigure is requested, forcefully reload all PMDs. */\n+ bool force_pmd_reload;\n+\n /* Cpu mask for pin of pmd threads. */\n char *pmd_cmask;\n \n@@ -339,6 +342,7 @@ struct dp_netdev {\n \n struct conntrack *conntrack;\n struct pmd_auto_lb pmd_alb;\n+ bool offload_enabled;\n \n /* Bonds. */\n struct ovs_mutex bond_mutex; /* Protects updates of 'tx_bonds'. */\n@@ -4556,6 +4560,14 @@ dpif_netdev_set_config(struct dpif *dpif, const struct smap *other_config)\n log_all_pmd_sleeps(dp);\n }\n \n+ if (!dp->offload_enabled) {\n+ dp->offload_enabled = dpif_offload_enabled();\n+ if (dp->offload_enabled) {\n+ dp->force_pmd_reload = true;\n+ dp_netdev_request_reconfigure(dp);\n+ }\n+ }\n+\n return 0;\n }\n \n@@ -6216,6 +6228,14 @@ reconfigure_datapath(struct dp_netdev *dp)\n ovs_mutex_unlock(&pmd->port_mutex);\n }\n \n+ /* Do we need to forcefully reload all threads? */\n+ if (dp->force_pmd_reload) {\n+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {\n+ pmd->need_reload = true;\n+ }\n+ dp->force_pmd_reload = false;\n+ }\n+\n /* Reload affected pmd threads. */\n reload_affected_pmds(dp);\n \n@@ -6516,6 +6536,7 @@ pmd_thread_main(void *f_)\n {\n struct dp_netdev_pmd_thread *pmd = f_;\n struct pmd_perf_stats *s = &pmd->perf_stats;\n+ struct dpif_offload_pmd_ctx *offload_ctx = NULL;\n unsigned int lc = 0;\n struct polled_queue *poll_list;\n bool wait_for_reload = false;\n@@ -6549,6 +6570,9 @@ reload:\n dpdk_attached = dpdk_attach_thread(pmd->core_id);\n }\n \n+ dpif_offload_pmd_thread_reload(pmd->dp->full_name, pmd->core_id,\n+ pmd->numa_id, &offload_ctx);\n+\n /* List port/core affinity */\n for (i = 0; i < poll_cnt; i++) {\n VLOG_DBG(\"Core %d processing port \\'%s\\' with queue-id %d\\n\",\n@@ -6588,7 +6612,7 @@ reload:\n ovs_mutex_lock(&pmd->perf_stats.stats_mutex);\n for (;;) {\n uint64_t rx_packets = 0, tx_packets = 0;\n- uint64_t time_slept = 0;\n+ uint64_t time_slept = 0, offload_cycles = 0;\n uint64_t max_sleep;\n \n pmd_perf_start_iteration(s);\n@@ -6628,6 +6652,10 @@ reload:\n ? true : false);\n }\n \n+ /* Do work required by any of the hardware offload providers. */\n+ offload_cycles = dpif_offload_pmd_thread_do_work(offload_ctx,\n+ &pmd->perf_stats);\n+\n if (max_sleep) {\n /* Check if a sleep should happen on this iteration. */\n if (sleep_time) {\n@@ -6687,7 +6715,7 @@ reload:\n }\n \n pmd_perf_end_iteration(s, rx_packets, tx_packets, time_slept,\n- pmd_perf_metrics_enabled(pmd));\n+ offload_cycles, pmd_perf_metrics_enabled(pmd));\n }\n ovs_mutex_unlock(&pmd->perf_stats.stats_mutex);\n \n@@ -6708,6 +6736,7 @@ reload:\n goto reload;\n }\n \n+ dpif_offload_pmd_thread_exit(offload_ctx);\n pmd_free_static_tx_qid(pmd);\n dfc_cache_uninit(&pmd->flow_cache);\n free(poll_list);\n@@ -9623,7 +9652,7 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,\n struct polled_queue *poll_list, int poll_cnt)\n {\n struct dpcls *cls;\n- uint64_t tot_idle = 0, tot_proc = 0, tot_sleep = 0;\n+ uint64_t tot_idle = 0, tot_proc = 0, tot_sleep = 0, tot_offload = 0;\n unsigned int pmd_load = 0;\n \n if (pmd->ctx.now > pmd->next_cycle_store) {\n@@ -9642,11 +9671,14 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,\n pmd->prev_stats[PMD_CYCLES_ITER_BUSY];\n tot_sleep = pmd->perf_stats.counters.n[PMD_CYCLES_SLEEP] -\n pmd->prev_stats[PMD_CYCLES_SLEEP];\n+ tot_offload = pmd->perf_stats.counters.n[PMD_CYCLES_OFFLOAD] -\n+ pmd->prev_stats[PMD_CYCLES_OFFLOAD];\n \n if (pmd_alb->is_enabled && !pmd->isolated) {\n if (tot_proc) {\n pmd_load = ((tot_proc * 100) /\n- (tot_idle + tot_proc + tot_sleep));\n+ (tot_idle + tot_proc + tot_sleep\n+ + tot_offload));\n }\n \n atomic_read_relaxed(&pmd_alb->rebalance_load_thresh,\n@@ -9665,6 +9697,8 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,\n pmd->perf_stats.counters.n[PMD_CYCLES_ITER_BUSY];\n pmd->prev_stats[PMD_CYCLES_SLEEP] =\n pmd->perf_stats.counters.n[PMD_CYCLES_SLEEP];\n+ pmd->prev_stats[PMD_CYCLES_OFFLOAD] =\n+ pmd->perf_stats.counters.n[PMD_CYCLES_OFFLOAD];\n \n /* Get the cycles that were used to process each queue and store. */\n for (unsigned i = 0; i < poll_cnt; i++) {\ndiff --git a/lib/dpif-offload-dummy.c b/lib/dpif-offload-dummy.c\nindex b7b24d6064..a096e40d6d 100644\n--- a/lib/dpif-offload-dummy.c\n+++ b/lib/dpif-offload-dummy.c\n@@ -17,6 +17,7 @@\n #include <config.h>\n #include <errno.h>\n \n+#include \"coverage.h\"\n #include \"dpif.h\"\n #include \"dpif-offload.h\"\n #include \"dpif-offload-provider.h\"\n@@ -33,6 +34,8 @@\n \n VLOG_DEFINE_THIS_MODULE(dpif_offload_dummy);\n \n+COVERAGE_DEFINE(dummy_offload_do_work);\n+\n struct pmd_id_data {\n struct hmap_node node;\n void *flow_reference;\n@@ -784,6 +787,40 @@ dummy_netdev_simulate_offload(struct netdev *netdev, struct dp_packet *packet,\n ovs_mutex_unlock(&port->port_mutex);\n }\n \n+static void\n+dummy_pmd_thread_work_cb(unsigned core_id OVS_UNUSED, int numa_id OVS_UNUSED,\n+ void *ctx OVS_UNUSED)\n+{\n+ COVERAGE_INC(dummy_offload_do_work);\n+}\n+\n+static void\n+dummy_pmd_thread_lifecycle(const struct dpif_offload *dpif_offload,\n+ bool exit, unsigned core_id, int numa_id,\n+ dpif_offload_pmd_thread_work_cb **callback,\n+ void **ctx)\n+{\n+ /* Only do this for the 'dummy' class, not for 'dummy_x'. */\n+ if (strcmp(dpif_offload_type(dpif_offload), \"dummy\")) {\n+ *callback = NULL;\n+ *ctx = NULL;\n+ return;\n+ }\n+\n+ VLOG_DBG(\n+ \"pmd_thread_lifecycle; exit=%s, core=%u, numa=%d, cb=%p, ctx=%p\",\n+ exit ? \"true\" : \"false\", core_id, numa_id, *callback, *ctx);\n+\n+ ovs_assert(!*callback || *callback == dummy_pmd_thread_work_cb);\n+\n+ if (exit) {\n+ free(*ctx);\n+ } else {\n+ *ctx = *ctx ? *ctx : xstrdup(\"DUMMY_OFFLOAD_WORK\");\n+ *callback = dummy_pmd_thread_work_cb;\n+ }\n+}\n+\n #define DEFINE_DPIF_DUMMY_CLASS(NAME, TYPE_STR) \\\n struct dpif_offload_class NAME = { \\\n .type = TYPE_STR, \\\n@@ -803,6 +840,7 @@ dummy_netdev_simulate_offload(struct netdev *netdev, struct dp_packet *packet,\n .netdev_flow_del = dummy_flow_del, \\\n .netdev_flow_stats = dummy_flow_stats, \\\n .register_flow_unreference_cb = dummy_register_flow_unreference_cb, \\\n+ .pmd_thread_lifecycle = dummy_pmd_thread_lifecycle \\\n }\n \n DEFINE_DPIF_DUMMY_CLASS(dpif_offload_dummy_class, \"dummy\");\ndiff --git a/lib/dpif-offload-provider.h b/lib/dpif-offload-provider.h\nindex 02ef46cb08..259de2c299 100644\n--- a/lib/dpif-offload-provider.h\n+++ b/lib/dpif-offload-provider.h\n@@ -87,6 +87,10 @@ dpif_offload_flow_dump_thread_init(\n }\n \n \n+/* Offload Provider specific PMD thread work callback definition. */\n+typedef void dpif_offload_pmd_thread_work_cb(unsigned core_id, int numa_id,\n+ void *ctx);\n+\n struct dpif_offload_class {\n /* Type of DPIF offload provider in this class, e.g., \"tc\", \"dpdk\",\n * \"dummy\", etc. */\n@@ -305,6 +309,28 @@ struct dpif_offload_class {\n * to netdev_flow_put() is no longer held by the offload provider. */\n void (*register_flow_unreference_cb)(const struct dpif_offload *,\n dpif_offload_flow_unreference_cb *);\n+\n+\n+ /* The API below is specific to PMD (userspace) thread lifecycle handling.\n+ *\n+ * This API allows a provider to supply a callback function\n+ * (via `*callback`) and an optional context pointer (via `*ctx`) for a\n+ * PMD thread.\n+ *\n+ * The lifecycle hook may be invoked multiple times for the same PMD\n+ * thread. For example, when the thread is reinitialized, this function\n+ * will be called again and the previous `callback` and `ctx` values will\n+ * be passed back in. It is the provider's responsibility to decide\n+ * whether those should be reused, replaced, or cleaned up before storing\n+ * new values.\n+ *\n+ * When the PMD thread is terminating, this API is called with\n+ * `exit == true`. At that point, the provider must release any resources\n+ * associated with the previously returned `callback` and `ctx`. */\n+ void (*pmd_thread_lifecycle)(const struct dpif_offload *, bool exit,\n+ unsigned core_id, int numa_id,\n+ dpif_offload_pmd_thread_work_cb **callback,\n+ void **ctx);\n };\n \n extern struct dpif_offload_class dpif_offload_dummy_class;\ndiff --git a/lib/dpif-offload.c b/lib/dpif-offload.c\nindex bb2feced9e..cbf1f6c704 100644\n--- a/lib/dpif-offload.c\n+++ b/lib/dpif-offload.c\n@@ -17,6 +17,7 @@\n #include <config.h>\n #include <errno.h>\n \n+#include \"dpif-netdev-perf.h\"\n #include \"dpif-offload.h\"\n #include \"dpif-offload-provider.h\"\n #include \"dpif-provider.h\"\n@@ -54,6 +55,7 @@ static const struct dpif_offload_class *base_dpif_offload_classes[] = {\n &dpif_offload_dummy_x_class,\n };\n \n+#define TOTAL_PROVIDERS ARRAY_SIZE(base_dpif_offload_classes)\n #define DEFAULT_PROVIDER_PRIORITY_LIST \"tc,dpdk,dummy,dummy_x\"\n \n static char *dpif_offload_provider_priority_list = NULL;\n@@ -1665,3 +1667,134 @@ dpif_offload_port_mgr_port_count(const struct dpif_offload *offload)\n \n return cmap_count(&offload->ports->odp_port_to_port);\n }\n+\n+struct dpif_offload_pmd_ctx_node {\n+ const struct dpif_offload *offload;\n+ dpif_offload_pmd_thread_work_cb *callback;\n+ void *provider_ctx;\n+};\n+\n+struct dpif_offload_pmd_ctx {\n+ unsigned core_id;\n+ int numa_id;\n+ size_t n_nodes;\n+ struct dpif_offload_pmd_ctx_node nodes[TOTAL_PROVIDERS];\n+};\n+\n+void\n+dpif_offload_pmd_thread_reload(const char *dpif_name, unsigned core_id,\n+ int numa_id, struct dpif_offload_pmd_ctx **ctx_)\n+{\n+ struct dpif_offload_pmd_ctx_node old_nodes[TOTAL_PROVIDERS];\n+ struct dpif_offload_provider_collection *collection;\n+ struct dpif_offload_pmd_ctx *ctx;\n+ struct dpif_offload *offload;\n+ size_t old_n_nodes = 0;\n+\n+ if (!dpif_offload_enabled()) {\n+ ovs_assert(!*ctx_);\n+ return;\n+ }\n+\n+ ovs_mutex_lock(&dpif_offload_mutex);\n+ collection = shash_find_data(&dpif_offload_providers, dpif_name);\n+ ovs_mutex_unlock(&dpif_offload_mutex);\n+\n+ if (OVS_UNLIKELY(!collection)) {\n+ ovs_assert(!*ctx_);\n+ return;\n+ }\n+\n+ if (!*ctx_) {\n+ /* Would be nice if we have a numa specific xzalloc(). */\n+ ctx = xzalloc(sizeof *ctx);\n+ ctx->core_id = core_id;\n+ ctx->numa_id = numa_id;\n+ *ctx_ = ctx;\n+ } else {\n+ ctx = *ctx_;\n+ old_n_nodes = ctx->n_nodes;\n+\n+ if (old_n_nodes) {\n+ memcpy(old_nodes, ctx->nodes, old_n_nodes * sizeof old_nodes[0]);\n+ }\n+\n+ /* Reset active nodes array. */\n+ memset(ctx->nodes, 0, sizeof ctx->nodes);\n+ ctx->n_nodes = 0;\n+ }\n+\n+ LIST_FOR_EACH (offload, dpif_list_node, &collection->list) {\n+\n+ ovs_assert(ctx->n_nodes < TOTAL_PROVIDERS);\n+\n+ if (!offload->class->pmd_thread_lifecycle) {\n+ continue;\n+ }\n+\n+ if (old_n_nodes) {\n+ /* If this is a reload, try to find previous callback and ctx. */\n+ for (size_t i = 0; i < old_n_nodes; i++) {\n+ struct dpif_offload_pmd_ctx_node *node = &old_nodes[i];\n+\n+ if (offload == node->offload) {\n+ ctx->nodes[ctx->n_nodes].callback = node->callback;\n+ ctx->nodes[ctx->n_nodes].provider_ctx = node->provider_ctx;\n+ break;\n+ }\n+ }\n+ }\n+\n+ offload->class->pmd_thread_lifecycle(\n+ offload, false, core_id, numa_id,\n+ &ctx->nodes[ctx->n_nodes].callback,\n+ &ctx->nodes[ctx->n_nodes].provider_ctx);\n+\n+ if (ctx->nodes[ctx->n_nodes].callback) {\n+ ctx->nodes[ctx->n_nodes].offload = offload;\n+ ctx->n_nodes++;\n+ } else {\n+ memset(&ctx->nodes[ctx->n_nodes], 0,\n+ sizeof ctx->nodes[ctx->n_nodes]);\n+ }\n+ }\n+}\n+\n+uint64_t\n+dpif_offload_pmd_thread_do_work(struct dpif_offload_pmd_ctx *ctx,\n+ struct pmd_perf_stats *stats)\n+{\n+ struct cycle_timer offload_work_timer;\n+\n+ if (!ctx || !ctx->n_nodes) {\n+ return 0;\n+ }\n+\n+ cycle_timer_start(stats, &offload_work_timer);\n+\n+ for (size_t i = 0; i < ctx->n_nodes; i++) {\n+ ctx->nodes[i].callback(ctx->core_id, ctx->numa_id,\n+ ctx->nodes[i].provider_ctx);\n+ }\n+\n+ return cycle_timer_stop(stats, &offload_work_timer);\n+}\n+\n+void\n+dpif_offload_pmd_thread_exit(struct dpif_offload_pmd_ctx *ctx)\n+{\n+ if (!ctx) {\n+ return;\n+ }\n+\n+ for (size_t i = 0; i < ctx->n_nodes; i++) {\n+ struct dpif_offload_pmd_ctx_node *node = &ctx->nodes[i];\n+\n+ node->offload->class->pmd_thread_lifecycle(node->offload, true,\n+ ctx->core_id, ctx->numa_id,\n+ &node->callback,\n+ &node->provider_ctx);\n+ }\n+\n+ free(ctx);\n+}\ndiff --git a/lib/dpif-offload.h b/lib/dpif-offload.h\nindex 7fad3ebee3..0f66d8cd8e 100644\n--- a/lib/dpif-offload.h\n+++ b/lib/dpif-offload.h\n@@ -22,6 +22,7 @@\n /* Forward declarations of private structures. */\n struct dpif_offload_class;\n struct dpif_offload;\n+struct pmd_perf_stats;\n \n /* Definition of the DPIF offload implementation type.\n *\n@@ -186,4 +187,14 @@ dpif_offload_datapath_flow_op_continue(struct dpif_offload_flow_cb_data *cb,\n }\n }\n \n+/* PMD Thread helper functions. */\n+struct dpif_offload_pmd_ctx;\n+\n+void dpif_offload_pmd_thread_reload(const char *dpif_name,\n+ unsigned core_id, int numa_id,\n+ struct dpif_offload_pmd_ctx **);\n+uint64_t dpif_offload_pmd_thread_do_work(struct dpif_offload_pmd_ctx *,\n+ struct pmd_perf_stats *);\n+void dpif_offload_pmd_thread_exit(struct dpif_offload_pmd_ctx *);\n+\n #endif /* DPIF_OFFLOAD_H */\ndiff --git a/tests/pmd.at b/tests/pmd.at\nindex 8254ac3b0f..54184d8c92 100644\n--- a/tests/pmd.at\n+++ b/tests/pmd.at\n@@ -1689,3 +1689,35 @@ recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(dst=10.1.2.\n \n OVS_VSWITCHD_STOP\n AT_CLEANUP\n+\n+AT_SETUP([PMD - offload work])\n+OVS_VSWITCHD_START([], [], [], [DUMMY_NUMA],\n+ [-- set Open_vSwitch . other_config:hw-offload=true])\n+\n+AT_CHECK([ovs-appctl vlog/set dpif_offload_dummy:dbg])\n+AT_CHECK([ovs-vsctl add-port br0 p0 -- set Interface p0 type=dummy-pmd])\n+\n+CHECK_CPU_DISCOVERED()\n+CHECK_PMD_THREADS_CREATED()\n+\n+OVS_WAIT_UNTIL(\n+ [test $(ovs-appctl coverage/read-counter dummy_offload_do_work) -gt 0])\n+\n+AT_CHECK([ovs-appctl dpif-netdev/pmd-perf-show \\\n+ | grep -Eq 'Offload cycles: +[[0-9]]+ \\( *[[0-9.]]+ % of used cycles\\)'])\n+\n+OVS_VSWITCHD_STOP\n+\n+LOG=\"$(sed -n 's/.*\\(pmd_thread_lifecycle.*\\)/\\1/p' ovs-vswitchd.log)\"\n+CB=$(echo \"$LOG\" | sed -n '2p' | sed -n 's/.*cb=\\([[^,]]*\\).*/\\1/p')\n+CTX=$(echo \"$LOG\" | sed -n '2p' | sed -n 's/.*ctx=\\(.*\\)$/\\1/p')\n+\n+AT_CHECK([echo \"$LOG\" | sed -n '1p' | sed 's/(nil)/0x0/g'], [0], [dnl\n+pmd_thread_lifecycle; exit=false, core=0, numa=0, cb=0x0, ctx=0x0\n+])\n+AT_CHECK([echo \"$LOG\" | sed -n '2p' \\\n+ | grep -q \"exit=false, core=0, numa=0, cb=$CB, ctx=$CTX\"])\n+AT_CHECK([echo \"$LOG\" | sed -n '$p' \\\n+ | grep -q \"exit=true, core=0, numa=0, cb=$CB, ctx=$CTX\"])\n+\n+AT_CLEANUP\ndiff --git a/utilities/checkpatch_dict.txt b/utilities/checkpatch_dict.txt\nindex 13f107246b..0972cba603 100644\n--- a/utilities/checkpatch_dict.txt\n+++ b/utilities/checkpatch_dict.txt\n@@ -35,6 +35,7 @@ cpu\n cpus\n cstime\n csum\n+ctx\n cutime\n cvlan\n datapath\n@@ -45,6 +46,7 @@ decap\n decapsulation\n defrag\n defragment\n+deinitialization\n deref\n dereference\n dest\n", "prefixes": [ "ovs-dev", "1/2" ] }