@@ -3008,10 +3008,49 @@ dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd,
return NULL;
}
+static bool
+dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
+ const struct dp_netdev_flow *netdev_flow,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs)
+{
+ struct nlattr *actions;
+ struct ofpbuf wbuffer;
+ struct netdev *netdev;
+ struct match match;
+
+ int ret = 0;
+
+ if (!netdev_is_flow_api_enabled()) {
+ return false;
+ }
+
+ netdev = netdev_ports_get(netdev_flow->flow.in_port.odp_port, dp->class);
+ if (!netdev) {
+ return false;
+ }
+ /* Taking a global 'port_mutex' to fulfill thread safety
+ * restrictions for the netdev-offload-dpdk module. */
+ ovs_mutex_lock(&dp->port_mutex);
+ ret = netdev_flow_get(netdev, &match, &actions, &netdev_flow->mega_ufid,
+ stats, attrs, &wbuffer);
+ ovs_mutex_unlock(&dp->port_mutex);
+ netdev_close(netdev);
+ if (ret) {
+ return false;
+ }
+
+ return true;
+}
+
static void
-get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
- struct dpif_flow_stats *stats)
+get_dpif_flow_status(const struct dp_netdev *dp,
+ const struct dp_netdev_flow *netdev_flow_,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs)
{
+ struct dpif_flow_stats offload_stats;
+ struct dpif_flow_attrs offload_attrs;
struct dp_netdev_flow *netdev_flow;
unsigned long long n;
long long used;
@@ -3027,6 +3066,21 @@ get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
stats->used = used;
atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
stats->tcp_flags = flags;
+
+ if (dpif_netdev_get_flow_offload_status(dp, netdev_flow,
+ &offload_stats, &offload_attrs)) {
+ stats->n_packets += offload_stats.n_packets;
+ stats->n_bytes += offload_stats.n_bytes;
+ stats->used = MAX(stats->used, offload_stats.used);
+ stats->tcp_flags |= offload_stats.tcp_flags;
+ if (attrs) {
+ attrs->offloaded = offload_attrs.offloaded;
+ attrs->dp_layer = offload_attrs.dp_layer;
+ }
+ } else if (attrs) {
+ attrs->offloaded = false;
+ attrs->dp_layer = "ovs";
+ }
}
/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
@@ -3034,7 +3088,8 @@ get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
* 'mask_buf'. Actions will be returned without copying, by relying on RCU to
* protect them. */
static void
-dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
+dp_netdev_flow_to_dpif_flow(const struct dp_netdev *dp,
+ const struct dp_netdev_flow *netdev_flow,
struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
struct dpif_flow *flow, bool terse)
{
@@ -3077,10 +3132,8 @@ dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
flow->ufid = netdev_flow->ufid;
flow->ufid_present = true;
flow->pmd_id = netdev_flow->pmd_id;
- get_dpif_flow_stats(netdev_flow, &flow->stats);
- flow->attrs.offloaded = false;
- flow->attrs.dp_layer = "ovs";
+ get_dpif_flow_status(dp, netdev_flow, &flow->stats, &flow->attrs);
}
static int
@@ -3183,8 +3236,8 @@ dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key,
get->key_len);
if (netdev_flow) {
- dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer,
- get->flow, false);
+ dp_netdev_flow_to_dpif_flow(dp, netdev_flow, get->buffer,
+ get->buffer, get->flow, false);
error = 0;
break;
} else {
@@ -3359,7 +3412,7 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
put->actions, put->actions_len);
if (stats) {
- get_dpif_flow_stats(netdev_flow, stats);
+ get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL);
}
if (put->flags & DPIF_FP_ZERO_STATS) {
/* XXX: The userspace datapath uses thread local statistics
@@ -3478,7 +3531,7 @@ flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd,
del->key_len);
if (netdev_flow) {
if (stats) {
- get_dpif_flow_stats(netdev_flow, stats);
+ get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL);
}
dp_netdev_pmd_remove_flow(pmd, netdev_flow);
} else {
@@ -3612,13 +3665,13 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
= dpif_netdev_flow_dump_thread_cast(thread_);
struct dpif_netdev_flow_dump *dump = thread->dump;
struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
+ struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
+ struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
int n_flows = 0;
int i;
ovs_mutex_lock(&dump->mutex);
if (!dump->status) {
- struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
- struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
struct dp_netdev_pmd_thread *pmd = dump->cur_pmd;
int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
@@ -3675,7 +3728,7 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
- dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f,
+ dp_netdev_flow_to_dpif_flow(dp, netdev_flow, &key, &mask, f,
dump->up.terse);
}