@@ -48,6 +48,7 @@ enum OVS_PACKED_ENUM dp_packet_source {
};
#define DP_PACKET_CONTEXT_SIZE 64
+#define INVALID_FLOW_MARK 0
/* Bit masks for the 'offloads' member of the 'dp_packet' structure. */
enum OVS_PACKED_ENUM dp_packet_offload_mask {
@@ -288,8 +288,32 @@ struct dpif_offload_class {
int (*netdev_hw_miss_packet_postprocess)(const struct dpif_offload *,
struct netdev *,
struct dp_packet *);
-};
+ /* Add or modify the specified flow directly in the offload datapath.
+ * The actual implementation may choose to handle the offload
+ * asynchronously by returning EINPROGRESS and invoking the supplied
+ * 'callback' once completed. For successful synchronous handling, the
+ * callback must not be called, and 0 should be returned. If this call is
+ * not successful, a positive errno value should be returned. */
+ int (*netdev_flow_put)(const struct dpif_offload *, struct netdev *,
+ struct dpif_offload_flow_put *,
+ uint32_t *flow_mark);
+
+ /* Delete the specified flow directly from the offloaded datapath. See the
+ * above 'netdev_flow_put' for implementation details. */
+ int (*netdev_flow_del)(const struct dpif_offload *, struct netdev *,
+ struct dpif_offload_flow_del *,
+ uint32_t *flow_mark);
+
+ /* Get offload statistics based on the flows 'ufid'. Note that this API
+ * does NOT support asynchronous handling. Returns 'true' if the flow was
+ * offloaded, 'false' if not. In the latter case, 'stats' and 'attrs'
+ * are not valid. */
+ bool (*netdev_flow_stats)(const struct dpif_offload *, struct netdev *,
+ const ovs_u128 *ufid,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs);
+};
extern struct dpif_offload_class dpif_offload_dummy_class;
extern struct dpif_offload_class dpif_offload_dummy_x_class;
@@ -1186,19 +1186,14 @@ dpif_offload_offload_get_netdev_by_port_id(struct dpif_offload *offload,
return offload->class->get_netdev(offload, port_no);
}
-struct netdev *
-dpif_offload_get_netdev_by_port_id(struct dpif *dpif,
- struct dpif_offload **offload,
- odp_port_t port_no)
+static struct netdev *
+dpif_offload_get_netdev_by_port_id_(struct dp_offload *dp_offload,
+ struct dpif_offload **offload,
+ odp_port_t port_no)
{
- struct dp_offload *dp_offload = dpif_offload_get_dp_offload(dpif);
struct dpif_offload *tmp_offload;
struct netdev *netdev = NULL;
- if (!dp_offload || !dpif_offload_is_offload_enabled()) {
- return NULL;
- }
-
LIST_FOR_EACH (tmp_offload, dpif_list_node,
&dp_offload->offload_providers) {
netdev = tmp_offload->class->get_netdev(tmp_offload, port_no);
@@ -1209,10 +1204,23 @@ dpif_offload_get_netdev_by_port_id(struct dpif *dpif,
break;
}
}
-
return netdev;
}
+struct netdev *
+dpif_offload_get_netdev_by_port_id(struct dpif *dpif,
+ struct dpif_offload **offload,
+ odp_port_t port_no)
+{
+ struct dp_offload *dp_offload = dpif_offload_get_dp_offload(dpif);
+
+ if (!dp_offload || !dpif_offload_is_offload_enabled()) {
+ return NULL;
+ }
+
+ return dpif_offload_get_netdev_by_port_id_(dp_offload, offload, port_no);
+}
+
bool
dpif_offload_netdevs_out_of_resources(struct dpif *dpif)
{
@@ -1344,6 +1352,109 @@ dpif_offload_netdev_flush_flows(struct netdev *netdev)
return EOPNOTSUPP;
}
+int
+dpif_offload_datapath_flow_put(const char *dpif_name,
+ struct dpif_offload_flow_put *put,
+ uint32_t *flow_mark)
+{
+ struct dpif_offload *offload;
+ struct dp_offload *dp_offload;
+ struct netdev *netdev;
+
+ ovs_mutex_lock(&dpif_offload_mutex);
+ /* XXX: Implement a faster solution than the current dpif_name lookup. */
+ dp_offload = shash_find_data(&dpif_offload_providers, dpif_name);
+ ovs_mutex_unlock(&dpif_offload_mutex);
+
+ if (OVS_UNLIKELY(!dp_offload)) {
+ if (flow_mark) {
+ *flow_mark = INVALID_FLOW_MARK;
+ }
+ return 0;
+ }
+
+ netdev = dpif_offload_get_netdev_by_port_id_(dp_offload, &offload,
+ put->in_port);
+
+ if (OVS_LIKELY(netdev && offload->class->netdev_flow_put)) {
+ return offload->class->netdev_flow_put(offload, netdev, put,
+ flow_mark);
+ }
+
+ if (flow_mark) {
+ *flow_mark = INVALID_FLOW_MARK;
+ }
+ return 0;
+}
+
+int
+dpif_offload_datapath_flow_del(const char *dpif_name,
+ struct dpif_offload_flow_del *del,
+ uint32_t *flow_mark)
+{
+ struct dpif_offload *offload;
+ struct dp_offload *dp_offload;
+ struct netdev *netdev;
+
+ ovs_mutex_lock(&dpif_offload_mutex);
+ /* XXX: Implement a faster solution than the current dpif_name lookup. */
+ dp_offload = shash_find_data(&dpif_offload_providers, dpif_name);
+ ovs_mutex_unlock(&dpif_offload_mutex);
+
+ if (OVS_UNLIKELY(!dp_offload)) {
+ if (flow_mark) {
+ *flow_mark = INVALID_FLOW_MARK;
+ }
+ return 0;
+ }
+
+ netdev = dpif_offload_get_netdev_by_port_id_(dp_offload, &offload,
+ del->in_port);
+
+ if (OVS_LIKELY(netdev && offload->class->netdev_flow_del)) {
+ return offload->class->netdev_flow_del(offload, netdev, del,
+ flow_mark);
+ }
+
+ if (flow_mark) {
+ *flow_mark = INVALID_FLOW_MARK;
+ }
+ return 0;
+}
+
+bool
+dpif_offload_datapath_flow_stats(const char *dpif_name, odp_port_t in_port,
+ const ovs_u128 *ufid,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs)
+{
+ struct dpif_offload *offload;
+ struct dp_offload *dp_offload;
+ struct netdev *netdev;
+
+ if (!dpif_offload_is_offload_enabled()) {
+ return false;
+ }
+
+ ovs_mutex_lock(&dpif_offload_mutex);
+ /* XXX: Implement a faster solution than the current dpif_name lookup. */
+ dp_offload = shash_find_data(&dpif_offload_providers, dpif_name);
+ ovs_mutex_unlock(&dpif_offload_mutex);
+
+ if (OVS_UNLIKELY(!dp_offload)) {
+ return false;
+ }
+
+ netdev = dpif_offload_get_netdev_by_port_id_(dp_offload, &offload,
+ in_port);
+
+ if (OVS_LIKELY(netdev && offload->class->netdev_flow_stats)) {
+ return offload->class->netdev_flow_stats(offload, netdev, ufid, stats,
+ attrs);
+ }
+ return false;
+}
+
int
dpif_offload_netdev_hw_miss_packet_postprocess(struct netdev *netdev,
struct dp_packet *packet)
@@ -142,4 +142,58 @@ int dpif_offload_netdev_flush_flows(struct netdev *);
int dpif_offload_netdev_hw_miss_packet_postprocess(struct netdev *,
struct dp_packet *);
+
+/* Flow modification callback definitions. */
+typedef void dpif_offload_flow_op_cb(void *aux_dp, void *aux_flow,
+ struct dpif_flow_stats *stats,
+ uint32_t flow_mark, int error);
+
+/* Supporting structures for flow modification functions. */
+struct dpif_offload_flow_cb_data {
+ dpif_offload_flow_op_cb *callback;
+ void *callback_aux_dp;
+ void *callback_aux_flow;
+};
+
+struct dpif_offload_flow_put {
+ bool modify;
+ odp_port_t in_port;
+ odp_port_t orig_in_port; /* Originating in_port for tunneled packets. */
+ const ovs_u128 *ufid;
+ struct match *match;
+ const struct nlattr *actions;
+ size_t actions_len;
+ struct dpif_flow_stats *stats;
+ struct dpif_offload_flow_cb_data cb_data;
+};
+
+struct dpif_offload_flow_del {
+ odp_port_t in_port;
+ const ovs_u128 *ufid;
+ struct dpif_flow_stats *stats;
+ struct dpif_offload_flow_cb_data cb_data;
+};
+
+/* Flow modification functions, which can be used in the fast path. */
+int dpif_offload_datapath_flow_put(const char *dpif_name,
+ struct dpif_offload_flow_put *,
+ uint32_t *flow_mark);
+int dpif_offload_datapath_flow_del(const char *dpif_name,
+ struct dpif_offload_flow_del *,
+ uint32_t *flow_mark);
+bool dpif_offload_datapath_flow_stats(const char *dpif_name,
+ odp_port_t in_port, const ovs_u128 *ufid,
+ struct dpif_flow_stats *,
+ struct dpif_flow_attrs *);
+
+static inline void dpif_offload_datapath_flow_op_continue(
+ struct dpif_offload_flow_cb_data *cb, struct dpif_flow_stats *stats,
+ uint32_t flow_mark, int error)
+{
+ if (cb && cb->callback) {
+ cb->callback(cb->callback_aux_dp, cb->callback_aux_flow,
+ stats, flow_mark, error);
+ }
+}
+
#endif /* DPIF_OFFLOAD_H */
This patch introduces new APIs in dpif-offload to allow userspace datapaths to directly manage flows in the offloaded datapath, providing efficient fast-path operations. Signed-off-by: Eelco Chaudron <echaudro@redhat.com> --- lib/dp-packet.h | 1 + lib/dpif-offload-provider.h | 26 ++++++- lib/dpif-offload.c | 131 +++++++++++++++++++++++++++++++++--- lib/dpif-offload.h | 54 +++++++++++++++ 4 files changed, 201 insertions(+), 11 deletions(-)