diff mbox series

[ovs-dev,22/41] dpif-offload: Add operate implementation to dpif-offload.

Message ID ada031dbdeae3eb0470e6ccea2b6eb0a1e318b87.1762950453.git.echaudro@redhat.com
State Changes Requested
Headers show
Series Architectural refactoring of hardware offload infrastructure | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed
ovsrobot/github-robot-_Build_and_Test success github build: passed

Commit Message

Eelco Chaudron Nov. 12, 2025, 3:35 p.m. UTC
Add the APIs to support the dpif operate function to propagate to
the dpif-offload layer for flow manipulation and querying.

Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
---
 lib/dpif-offload-provider.h | 14 +++++++
 lib/dpif-offload.c          | 84 ++++++++++++++++++++++++++++++++++++-
 lib/dpif.c                  | 20 ++++++++-
 3 files changed, 116 insertions(+), 2 deletions(-)

Comments

0-day Robot Nov. 14, 2025, 2:17 p.m. UTC | #1
Git SHA: ada031dbdeae3eb0470e6ccea2b6eb0a1e318b87
Author: Eelco Chaudron <echaudro@redhat.com>
Subject: dpif-offload: Add operate implementation to dpif-offload.

The patch adds APIs to support the dpif operate function to propagate to 
the dpif-offload layer for flow manipulation and querying.

> diff --git a/lib/dpif-offload-provider.h b/lib/dpif-offload-provider.h
> index 1519ab5ed..64bb3242d 100644
> --- a/lib/dpif-offload-provider.h
> +++ b/lib/dpif-offload-provider.h

[ ... ]

> diff --git a/lib/dpif-offload.c b/lib/dpif-offload.c
> index 62eef398b..da61a7c0e 100644
> --- a/lib/dpif-offload.c
> +++ b/lib/dpif-offload.c
> @@ -31,7 +31,7 @@
>  
>  VLOG_DEFINE_THIS_MODULE(dpif_offload);
>  
> -static struct vlog_rate_limit rl_dbg = VLOG_RATE_LIMIT_INIT(1, 5);
> +static struct vlog_rate_limit rl_dbg = VLOG_RATE_LIMIT_INIT(100, 100);

Was this rate limit change intentional in this commit? The commit message 
doesn't mention adjusting logging rates, and this seems like it might 
belong in a separate change or should be explained.

> @@ -1116,6 +1116,88 @@ dpif_offload_get_netdev_by_port_id(struct dpif *dpif,
>      return netdev;
>  }
>  
> +/* This function tries to offload the operations to the dpif-offload
> + * providers. It will return the number of operations not handled, whose
> + * pointers are re-arranged and available in **ops. */
> +size_t
> +dpif_offload_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
> +                     enum dpif_offload_type offload_type)
> +{

[ ... ]

> +    for (size_t i = 0; i < n_ops; i++) {
> +        struct dpif_op *op = ops[i];
> +
> +        if (offload_type == DPIF_OFFLOAD_ALWAYS) {
> +          /* For DPIF_OFFLOAD_ALWAYS, we should keep the error values,
> +           * and mark the unprocessed ones as EOPNOTSUPP. This way, they
> +           * will not be processed by the dpif layer. */

The comment alignment looks incorrect - it uses spaces instead of proper 
alignment. Also, notice the if statement block uses inconsistent 
indentation (2 spaces instead of 4).

> +            if (op->error < 0) {
> +                op->error = EOPNOTSUPP;
> +            }
> +            continue;
> +        }
> +
> +        /* For the other offload types, operations that were not handled or
> +         * failed to offload should be processed by the dpif layer. */
> +        if (op->error != 0 && op->error != EEXIST) {
> +            op->error = 0;
> +            ops[n_ops_left++] = op;
> +        }
> +    }

> diff --git a/lib/dpif.c b/lib/dpif.c
> index 9efc38082..e8bbd65b7 100644
> --- a/lib/dpif.c
> +++ b/lib/dpif.c
> @@ -1401,7 +1401,25 @@ dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
>               * handle itself, without help. */
>              size_t i;
>  
> -            dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
> +            /* See if we can handle some flow add/del/get through the
> +             * dpif-offload provider. */
> +            if (offload_type != DPIF_OFFLOAD_NEVER
> +                && dpif_offload_is_offload_enabled()) {
> +                struct dpif_op **ops_copy;
> +                size_t ops_left;
> +
> +                ops_copy = xmemdup(ops, sizeof(*ops_copy) * chunk);
> +
> +                ops_left = dpif_offload_operate(dpif, ops_copy, chunk,
> +                                                offload_type);
> +                if (ops_left) {
> +                    dpif->dpif_class->operate(dpif, ops_copy, ops_left,
> +                                              offload_type);
> +                }
> +                free(ops_copy);

Does this code leak the ops_copy array when dpif_offload_operate fails? 
The function doesn't appear to handle error returns from 
dpif_offload_operate itself, only the individual operation errors. What 
happens if dpif_offload_operate encounters an internal error and can't 
process the operations?

Also, could this create issues with original operation results? The code 
operates on ops_copy but the results from the copied operations aren't 
being copied back to the original ops array.

> +            } else {
> +                dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
> +            }
diff mbox series

Patch

diff --git a/lib/dpif-offload-provider.h b/lib/dpif-offload-provider.h
index 1519ab5ed..64bb3242d 100644
--- a/lib/dpif-offload-provider.h
+++ b/lib/dpif-offload-provider.h
@@ -199,6 +199,18 @@  struct dpif_offload_class {
     void (*flow_dump_thread_destroy)(
         struct dpif_offload_flow_dump_thread *);
 
+    /* Executes each of the 'n_ops' operations in 'ops' in order if their
+     * 'error' field is negative, placing each operation's results in the
+     * 'output' members documented in comments and the 'error' member of each
+     * dpif_op. Operations with a non-negative 'error' value have already been
+     * processed by a higher priority offload provider.
+     *
+     * Note that only the DPIF_OP_FLOW_PUT/DEL/GET operations should be
+     * handled, and this is only needed for the DPIF_OFFLOAD_IMPL_HW_ONLY type
+     * of offload providers. */
+    void (*operate)(struct dpif *, const struct dpif_offload *,
+                    struct dpif_op **, size_t n_ops);
+
     /* Returns the number of flows offloaded by the offload provider. */
     uint64_t (*flow_get_n_offloaded)(const struct dpif_offload *);
 
@@ -320,6 +332,8 @@  int dpif_offload_flow_dump_next(struct dpif_flow_dump_thread *,
 void dpif_offload_flow_dump_thread_create(struct dpif_flow_dump_thread *,
                                           struct dpif_flow_dump *);
 void dpif_offload_flow_dump_thread_destroy(struct dpif_flow_dump_thread *);
+size_t dpif_offload_operate(struct dpif *, struct dpif_op **, size_t n_ops,
+                            enum dpif_offload_type offload_type);
 
 static inline void dpif_offload_assert_class(
     const struct dpif_offload *dpif_offload,
diff --git a/lib/dpif-offload.c b/lib/dpif-offload.c
index 62eef398b..da61a7c0e 100644
--- a/lib/dpif-offload.c
+++ b/lib/dpif-offload.c
@@ -31,7 +31,7 @@ 
 
 VLOG_DEFINE_THIS_MODULE(dpif_offload);
 
-static struct vlog_rate_limit rl_dbg = VLOG_RATE_LIMIT_INIT(1, 5);
+static struct vlog_rate_limit rl_dbg = VLOG_RATE_LIMIT_INIT(100, 100);
 
 static struct ovs_mutex dpif_offload_mutex = OVS_MUTEX_INITIALIZER;
 static struct shash dpif_offload_classes \
@@ -1116,6 +1116,88 @@  dpif_offload_get_netdev_by_port_id(struct dpif *dpif,
     return netdev;
 }
 
+/* This function tries to offload the operations to the dpif-offload
+ * providers. It will return the number of operations not handled, whose
+ * pointers are re-arranged and available in **ops. */
+size_t
+dpif_offload_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
+                     enum dpif_offload_type offload_type)
+{
+    struct dp_offload *dp_offload = dpif_offload_get_dp_offload(dpif);
+    const struct dpif_offload *offload;
+    size_t n_ops_left = 0;
+
+    if (!dp_offload || !dpif_offload_is_offload_enabled()) {
+        return n_ops;
+    }
+
+    for (size_t i = 0; i < n_ops; i++) {
+        ops[i]->error = -1;
+    }
+
+    LIST_FOR_EACH (offload, dpif_list_node, &dp_offload->offload_providers) {
+        if (offload->class->operate
+            && offload->class->impl_type == DPIF_OFFLOAD_IMPL_HW_ONLY) {
+
+            offload->class->operate(dpif, offload, ops, n_ops);
+
+            for (size_t i = 0; i < n_ops; i++) {
+                struct dpif_op *op = ops[i];
+
+                if (op->error == EOPNOTSUPP) {
+                    /* Not supported by this offload provider, try next one. */
+                    op->error = -1;
+                } else {
+                    VLOG_DBG("Tried offloading %d to dpif-offload provider "
+                             "%s, error %d",
+                             op->type,
+                             dpif_offload_name(offload), op->error);
+
+                    switch (op->type) {
+                    case DPIF_OP_FLOW_PUT:
+                        log_flow_put_message(dpif, &this_module,
+                                             &op->flow_put, 0);
+                        break;
+                    case DPIF_OP_FLOW_DEL:
+                        log_flow_del_message(dpif, &this_module,
+                                             &op->flow_del, 0);
+                        break;
+                    case DPIF_OP_FLOW_GET:
+                        log_flow_get_message(dpif, &this_module,
+                                             &op->flow_get, 0);
+                        break;
+                    case DPIF_OP_EXECUTE:
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+    for (size_t i = 0; i < n_ops; i++) {
+        struct dpif_op *op = ops[i];
+
+        if (offload_type == DPIF_OFFLOAD_ALWAYS) {
+          /* For DPIF_OFFLOAD_ALWAYS, we should keep the error values,
+           * and mark the unprocessed ones as EOPNOTSUPP. This way, they
+           * will not be processed by the dpif layer. */
+            if (op->error < 0) {
+                op->error = EOPNOTSUPP;
+            }
+            continue;
+        }
+
+        /* For the other offload types, operations that were not handled or
+         * failed to offload should be processed by the dpif layer. */
+        if (op->error != 0 && op->error != EEXIST) {
+            op->error = 0;
+            ops[n_ops_left++] = op;
+        }
+    }
+
+    return n_ops_left;
+}
+
 
 int
 dpif_offload_netdev_flush_flows(struct netdev *netdev)
diff --git a/lib/dpif.c b/lib/dpif.c
index 9efc38082..e8bbd65b7 100644
--- a/lib/dpif.c
+++ b/lib/dpif.c
@@ -1401,7 +1401,25 @@  dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
              * handle itself, without help. */
             size_t i;
 
-            dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
+            /* See if we can handle some flow add/del/get through the
+             * dpif-offload provider. */
+            if (offload_type != DPIF_OFFLOAD_NEVER
+                && dpif_offload_is_offload_enabled()) {
+                struct dpif_op **ops_copy;
+                size_t ops_left;
+
+                ops_copy = xmemdup(ops, sizeof(*ops_copy) * chunk);
+
+                ops_left = dpif_offload_operate(dpif, ops_copy, chunk,
+                                                offload_type);
+                if (ops_left) {
+                    dpif->dpif_class->operate(dpif, ops_copy, ops_left,
+                                              offload_type);
+                }
+                free(ops_copy);
+            } else {
+                dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
+            }
 
             for (i = 0; i < chunk; i++) {
                 struct dpif_op *op = ops[i];