@@ -435,12 +435,14 @@ struct dp_flow_offload_item {
struct dp_flow_offload {
struct ovs_mutex mutex;
struct ovs_list list;
+ uint64_t enqueued_item;
pthread_cond_t cond;
};
static struct dp_flow_offload dp_flow_offload = {
.mutex = OVS_MUTEX_INITIALIZER,
.list = OVS_LIST_INITIALIZER(&dp_flow_offload.list),
+ .enqueued_item = 0,
};
static struct ovsthread_once offload_thread_once
@@ -2627,6 +2629,7 @@ dp_netdev_append_flow_offload(struct dp_flow_offload_item *offload)
{
ovs_mutex_lock(&dp_flow_offload.mutex);
ovs_list_push_back(&dp_flow_offload.list, &offload->node);
+ dp_flow_offload.enqueued_item++;
xpthread_cond_signal(&dp_flow_offload.cond);
ovs_mutex_unlock(&dp_flow_offload.mutex);
}
@@ -2743,6 +2746,7 @@ dp_netdev_flow_offload_main(void *data OVS_UNUSED)
ovsrcu_quiesce_end();
}
list = ovs_list_pop_front(&dp_flow_offload.list);
+ dp_flow_offload.enqueued_item--;
offload = CONTAINER_OF(list, struct dp_flow_offload_item, node);
ovs_mutex_unlock(&dp_flow_offload.mutex);
@@ -4197,6 +4201,55 @@ dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
}
}
+static int
+dpif_netdev_offload_stats_get(struct dpif *dpif,
+ struct netdev_custom_stats *stats)
+{
+ enum {
+ DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED,
+ DP_NETDEV_HW_OFFLOADS_STATS_INSERTED,
+ };
+ const char *names[] = {
+ [DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED] = "Enqueued offloads",
+ [DP_NETDEV_HW_OFFLOADS_STATS_INSERTED] = "Inserted offloads",
+ };
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+ struct dp_netdev_port *port;
+ uint64_t nb_offloads;
+ size_t i;
+
+ if (!netdev_is_flow_api_enabled()) {
+ return EINVAL;
+ }
+
+ stats->size = ARRAY_SIZE(names);
+ stats->counters = xcalloc(stats->size, sizeof *stats->counters);
+
+ nb_offloads = 0;
+
+ ovs_mutex_lock(&dp->port_mutex);
+ HMAP_FOR_EACH (port, node, &dp->ports) {
+ uint64_t port_nb_offloads = 0;
+
+ /* Do not abort on read error from a port, just report 0. */
+ if (!netdev_hw_offload_stats_get(port->netdev, &port_nb_offloads)) {
+ nb_offloads += port_nb_offloads;
+ }
+ }
+ ovs_mutex_unlock(&dp->port_mutex);
+
+ stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED].value =
+ dp_flow_offload.enqueued_item;
+ stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_INSERTED].value = nb_offloads;
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ snprintf(stats->counters[i].name, sizeof(stats->counters[i].name),
+ "%s", names[i]);
+ }
+
+ return 0;
+}
+
/* Enable or Disable PMD auto load balancing. */
static void
set_pmd_auto_lb(struct dp_netdev *dp)
@@ -8415,7 +8468,7 @@ const struct dpif_class dpif_netdev_class = {
dpif_netdev_flow_dump_thread_destroy,
dpif_netdev_flow_dump_next,
dpif_netdev_operate,
- NULL, /* offload_stats_get */
+ dpif_netdev_offload_stats_get,
NULL, /* recv_set */
NULL, /* handlers_set */
dpif_netdev_set_config,
In the netdev datapath, keep track of the enqueued offloads between the PMDs and the offload thread. Additionally, query each netdev for their hardware offload counters. Signed-off-by: Gaetan Rivet <grive@u256.net> --- lib/dpif-netdev.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-)