@@ -1173,3 +1173,26 @@ conntrack_dump_done(struct conntrack_dump *dump OVS_UNUSED)
{
return 0;
}
+
+int
+conntrack_flush(struct conntrack *ct, const uint16_t *zone)
+{
+ unsigned i;
+
+ for (i = 0; i < CONNTRACK_BUCKETS; i++) {
+ struct conn *conn, *next;
+
+ ct_lock_lock(&ct->buckets[i].lock);
+ HMAP_FOR_EACH_SAFE(conn, next, node, &ct->buckets[i].connections) {
+ if (!zone || *zone == conn->key.zone) {
+ ovs_list_remove(&conn->exp_node);
+ hmap_remove(&ct->buckets[i].connections, &conn->node);
+ atomic_count_dec(&ct->n_conn);
+ delete_conn(conn);
+ }
+ }
+ ct_lock_unlock(&ct->buckets[i].lock);
+ }
+
+ return 0;
+}
@@ -83,6 +83,8 @@ int conntrack_dump_start(struct conntrack *, struct conntrack_dump *,
const uint16_t *pzone);
int conntrack_dump_next(struct conntrack_dump *, struct ct_dpif_entry *);
int conntrack_dump_done(struct conntrack_dump *);
+
+int conntrack_flush(struct conntrack *, const uint16_t *zone);
/* 'struct ct_lock' is a wrapper for an adaptive mutex. It's useful to try
* different types of locks (e.g. spinlocks) */
@@ -4309,6 +4309,14 @@ dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED,
return err;
}
+static int
+dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone)
+{
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+
+ return conntrack_flush(&dp->conntrack, zone);
+}
+
const struct dpif_class dpif_netdev_class = {
"netdev",
dpif_netdev_init,
@@ -4352,7 +4360,7 @@ const struct dpif_class dpif_netdev_class = {
dpif_netdev_ct_dump_start,
dpif_netdev_ct_dump_next,
dpif_netdev_ct_dump_done,
- NULL, /* ct_flush */
+ dpif_netdev_ct_flush,
};
static void