@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -59,6 +59,9 @@ static struct hmap *const all_bonds OVS_GUARDED_BY(rwlock) = &all_bonds__;
#define BOND_MASK 0xff
#define BOND_BUCKETS (BOND_MASK + 1)
+/* Priority for internal rules created to handle recirculation */
+#define RECIRC_RULE_PRIORITY 20
+
/* A hash bucket for mapping a flow to a slave.
* "struct bond" has an array of BOND_BUCKETS of these. */
struct bond_entry {
@@ -1131,7 +1134,7 @@ bond_rebalance(struct bond *bond)
}
bond->next_rebalance = time_msec() + bond->rebalance_interval;
- use_recirc = ofproto_dpif_get_support(bond->ofproto)->odp.recirc &&
+ use_recirc = bond->ofproto->backer->support.odp.recirc &&
bond_may_recirc(bond, NULL, NULL);
if (use_recirc) {
@@ -285,7 +285,7 @@ recirc_alloc_id(struct ofproto_dpif *ofproto)
tunnel.ipv6_dst = in6addr_any;
struct frozen_state state = {
.table_id = TBL_INTERNAL,
- .ofproto_uuid = *ofproto_dpif_get_uuid(ofproto),
+ .ofproto_uuid = ofproto->uuid,
.metadata = { .tunnel = &tunnel, .in_port = OFPP_NONE },
};
return recirc_alloc_id__(&state, frozen_state_hash(&state))->id;
@@ -338,9 +338,8 @@ recirc_free_ofproto(struct ofproto_dpif *ofproto, const char *ofproto_name)
{
struct recirc_id_node *n;
- const struct uuid *ofproto_uuid = ofproto_dpif_get_uuid(ofproto);
CMAP_FOR_EACH (n, metadata_node, &metadata_map) {
- if (uuid_equals(&n->state.ofproto_uuid, ofproto_uuid)) {
+ if (uuid_equals(&n->state.ofproto_uuid, &ofproto->uuid)) {
VLOG_ERR("recirc_id %"PRIu32
" left allocated when ofproto (%s)"
" is destructed", n->id, ofproto_name);
@@ -541,7 +541,7 @@ udpif_start_threads(struct udpif *udpif, size_t n_handlers,
"handler", udpif_upcall_handler, handler);
}
- enable_ufid = ofproto_dpif_get_enable_ufid(udpif->backer);
+ enable_ufid = udpif->backer->support.ufid;
atomic_init(&udpif->enable_ufid, enable_ufid);
dpif_enable_upcall(udpif->dpif);
@@ -567,7 +567,7 @@ udpif_start_threads(struct udpif *udpif, size_t n_handlers,
static void
udpif_pause_revalidators(struct udpif *udpif)
{
- if (ofproto_dpif_backer_enabled(udpif->backer)) {
+ if (udpif->backer->recv_set_enable) {
latch_set(&udpif->pause_latch);
ovs_barrier_block(&udpif->pause_barrier);
}
@@ -578,7 +578,7 @@ udpif_pause_revalidators(struct udpif *udpif)
static void
udpif_resume_revalidators(struct udpif *udpif)
{
- if (ofproto_dpif_backer_enabled(udpif->backer)) {
+ if (udpif->backer->recv_set_enable) {
latch_poll(&udpif->pause_latch);
ovs_barrier_block(&udpif->pause_barrier);
}
@@ -700,7 +700,7 @@ udpif_use_ufid(struct udpif *udpif)
bool enable;
atomic_read_relaxed(&enable_ufid, &enable);
- return enable && ofproto_dpif_get_enable_ufid(udpif->backer);
+ return enable && udpif->backer->support.ufid;
}
@@ -1507,7 +1507,7 @@ ukey_create_from_upcall(struct upcall *upcall, struct flow_wildcards *wc)
.mask = wc ? &wc->masks : NULL,
};
- odp_parms.support = ofproto_dpif_get_support(upcall->ofproto)->odp;
+ odp_parms.support = upcall->ofproto->backer->support.odp;
if (upcall->key_len) {
ofpbuf_use_const(&keybuf, upcall->key, upcall->key_len);
} else {
@@ -139,8 +139,8 @@ xlate_push_stats_entry(struct xc_entry *entry,
break;
case XC_FIN_TIMEOUT:
if (stats->tcp_flags & (TCP_FIN | TCP_RST)) {
- rule_dpif_reduce_timeouts(entry->fin.rule, entry->fin.idle,
- entry->fin.hard);
+ ofproto_rule_reduce_timeouts(&entry->fin.rule->up, entry->fin.idle,
+ entry->fin.hard);
}
break;
case XC_GROUP:
@@ -208,7 +208,7 @@ xlate_cache_clear_entry(struct xc_entry *entry)
case XC_TABLE:
break;
case XC_RULE:
- rule_dpif_unref(entry->rule);
+ ofproto_rule_unref(&entry->rule->up);
break;
case XC_BOND:
free(entry->bond.flow);
@@ -234,7 +234,7 @@ xlate_cache_clear_entry(struct xc_entry *entry)
* has already released it's reference above. */
break;
case XC_GROUP:
- group_dpif_unref(entry->group.group);
+ ofproto_group_unref(&entry->group.group->up);
break;
case XC_TNL_NEIGH:
break;
@@ -1246,7 +1246,7 @@ xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
struct xbridge *xbridge;
HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
- if (uuid_equals(ofproto_dpif_get_uuid(xbridge->ofproto), uuid)) {
+ if (uuid_equals(&xbridge->ofproto->uuid, uuid)) {
return xbridge;
}
}
@@ -1485,10 +1485,7 @@ group_first_live_bucket(const struct xlate_ctx *ctx,
const struct group_dpif *group, int depth)
{
struct ofputil_bucket *bucket;
- const struct ovs_list *buckets;
-
- buckets = group_dpif_get_buckets(group, NULL);
- LIST_FOR_EACH (bucket, list_node, buckets) {
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
if (bucket_is_alive(ctx, bucket, depth)) {
return bucket;
}
@@ -1506,10 +1503,7 @@ group_best_live_bucket(const struct xlate_ctx *ctx,
uint32_t best_score = 0;
struct ofputil_bucket *bucket;
- const struct ovs_list *buckets;
-
- buckets = group_dpif_get_buckets(group, NULL);
- LIST_FOR_EACH (bucket, list_node, buckets) {
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
if (bucket_is_alive(ctx, bucket, 0)) {
uint32_t score =
(hash_int(bucket->bucket_id, basis) & 0xffff) * bucket->weight;
@@ -2968,7 +2962,8 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
flow->in_port.ofp_port = peer->ofp_port;
flow->metadata = htonll(0);
memset(&flow->tunnel, 0, sizeof flow->tunnel);
- flow->tunnel.metadata.tab = ofproto_dpif_get_tun_tab(peer->xbridge->ofproto);
+ flow->tunnel.metadata.tab = ofproto_get_tun_tab(
+ &peer->xbridge->ofproto->up);
ctx->wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
memset(flow->regs, 0, sizeof flow->regs);
flow->actset_output = OFPP_UNSET;
@@ -3238,8 +3233,8 @@ xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule, bool deepens)
ctx->indentation++;
ctx->depth += deepens;
ctx->rule = rule;
- ctx->rule_cookie = rule_dpif_get_flow_cookie(rule);
- actions = rule_dpif_get_actions(rule);
+ ctx->rule_cookie = rule->up.flow_cookie;
+ actions = rule_get_actions(&rule->up);
do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
ctx->rule_cookie = old_cookie;
ctx->rule = old_rule;
@@ -3306,7 +3301,7 @@ xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
entry->rule = rule;
- rule_dpif_ref(rule);
+ ofproto_rule_ref(&rule->up);
}
xlate_recursively(ctx, rule, table_id <= old_table_id);
}
@@ -3388,10 +3383,7 @@ static void
xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
struct ofputil_bucket *bucket;
- const struct ovs_list *buckets;
-
- buckets = group_dpif_get_buckets(group, NULL);
- LIST_FOR_EACH (bucket, list_node, buckets) {
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
xlate_group_bucket(ctx, bucket);
}
xlate_group_stats(ctx, group, NULL);
@@ -3407,7 +3399,7 @@ xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
} else if (ctx->xin->xcache) {
- group_dpif_unref(group);
+ ofproto_group_unref(&group->up);
}
}
@@ -3425,23 +3417,18 @@ xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
} else if (ctx->xin->xcache) {
- group_dpif_unref(group);
+ ofproto_group_unref(&group->up);
}
}
static void
xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
- const struct field_array *fields;
- struct ofputil_bucket *bucket;
- const uint8_t *mask_values;
- uint32_t basis;
- size_t i;
-
- fields = group_dpif_get_fields(group);
- mask_values = fields->values;
- basis = hash_uint64(group_dpif_get_selection_method_param(group));
+ const struct field_array *fields = &group->up.props.fields;
+ const uint8_t *mask_values = fields->values;
+ uint32_t basis = hash_uint64(group->up.props.selection_method_param);
+ size_t i;
BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->used.bm) {
const struct mf_field *mf = mf_from_id(i);
@@ -3471,12 +3458,12 @@ xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
mf_mask_field_masked(mf, &mask, ctx->wc);
}
- bucket = group_best_live_bucket(ctx, group, basis);
+ struct ofputil_bucket *bucket = group_best_live_bucket(ctx, group, basis);
if (bucket) {
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
} else if (ctx->xin->xcache) {
- group_dpif_unref(group);
+ ofproto_group_unref(&group->up);
}
}
@@ -3490,13 +3477,11 @@ xlate_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
* compare to zero can be used to decide if the dp_hash value is valid
* without masking the dp_hash field. */
if (!ctx->xin->flow.dp_hash) {
- uint64_t param = group_dpif_get_selection_method_param(group);
+ uint64_t param = group->up.props.selection_method_param;
ctx_trigger_recirculate_with_hash(ctx, param >> 32, (uint32_t)param);
} else {
- uint32_t n_buckets;
-
- group_dpif_get_buckets(group, &n_buckets);
+ uint32_t n_buckets = group->up.n_buckets;
if (n_buckets) {
/* Minimal mask to cover the number of buckets. */
uint32_t mask = (1 << log_2_ceil(n_buckets)) - 1;
@@ -3517,7 +3502,7 @@ xlate_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
static void
xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
- const char *selection_method = group_dpif_get_selection_method(group);
+ const char *selection_method = group->up.props.selection_method;
/* Select groups may access flow keys beyond L2 in order to
* select a bucket. Recirculate as appropriate to make this possible.
@@ -3544,7 +3529,7 @@ xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
bool was_in_group = ctx->in_group;
ctx->in_group = true;
- switch (group_dpif_get_type(group)) {
+ switch (group->up.type) {
case OFPGT11_ALL:
case OFPGT11_INDIRECT:
xlate_all_group(ctx, group);
@@ -3659,7 +3644,7 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
* explicit table miss. OpenFlow before 1.3 doesn't have that concept so
* it will get translated back to OFPR_ACTION for those versions. */
if (reason == OFPR_ACTION
- && ctx->rule && rule_dpif_is_table_miss(ctx->rule)) {
+ && ctx->rule && rule_is_table_miss(&ctx->rule->up)) {
reason = OFPR_EXPLICIT_MISS;
}
@@ -3725,7 +3710,7 @@ emit_continuation(struct xlate_ctx *ctx, const struct frozen_state *state)
.packet_len = dp_packet_size(ctx->xin->packet),
.reason = ctx->pause->reason,
},
- .bridge = *ofproto_dpif_get_uuid(ctx->xbridge->ofproto),
+ .bridge = ctx->xbridge->ofproto->uuid,
.stack = xmemdup(state->stack,
state->n_stack * sizeof *state->stack),
.n_stack = state->n_stack,
@@ -3762,7 +3747,7 @@ finish_freezing__(struct xlate_ctx *ctx, uint8_t table)
struct frozen_state state = {
.table_id = table,
- .ofproto_uuid = *ofproto_dpif_get_uuid(ctx->xbridge->ofproto),
+ .ofproto_uuid = ctx->xbridge->ofproto->uuid,
.stack = ctx->stack.data,
.n_stack = ctx->stack.size / sizeof(union mf_subvalue),
.mirrors = ctx->mirrors,
@@ -4204,7 +4189,7 @@ xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
uint16_t idle_timeout, uint16_t hard_timeout)
{
if (tcp_flags & (TCP_FIN | TCP_RST)) {
- rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout);
+ ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
}
}
@@ -5461,8 +5446,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
}
/* Set the bridge for post-recirculation processing if needed. */
- if (!uuid_equals(ofproto_dpif_get_uuid(ctx.xbridge->ofproto),
- &state->ofproto_uuid)) {
+ if (!uuid_equals(&ctx.xbridge->ofproto->uuid, &state->ofproto_uuid)) {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
const struct xbridge *new_bridge
= xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
@@ -5532,7 +5516,8 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
/* Tunnel metadata in udpif format must be normalized before translation. */
if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
- const struct tun_table *tun_tab = ofproto_dpif_get_tun_tab(xin->ofproto);
+ const struct tun_table *tun_tab
+ = ofproto_get_tun_tab(&xin->ofproto->up);
int err;
err = tun_metadata_from_geneve_udpif(tun_tab, &xin->upcall_flow->tunnel,
@@ -5547,7 +5532,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
/* If the original flow did not come in on a tunnel, then it won't have
* FLOW_TNL_F_UDPIF set. However, we still need to have a metadata
* table in case we generate tunnel actions. */
- flow->tunnel.metadata.tab = ofproto_dpif_get_tun_tab(xin->ofproto);
+ flow->tunnel.metadata.tab = ofproto_get_tun_tab(&xin->ofproto->up);
}
ctx.wc->masks.tunnel.metadata.tab = flow->tunnel.metadata.tab;
@@ -5564,7 +5549,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
entry->rule = ctx.rule;
- rule_dpif_ref(ctx.rule);
+ ofproto_rule_ref(&ctx.rule->up);
}
if (OVS_UNLIKELY(ctx.xin->resubmit_hook)) {
@@ -5626,10 +5611,10 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
ofpacts_len = xin->ofpacts_len;
} else if (ctx.rule) {
const struct rule_actions *actions
- = rule_dpif_get_actions(ctx.rule);
+ = rule_get_actions(&ctx.rule->up);
ofpacts = actions->ofpacts;
ofpacts_len = actions->ofpacts_len;
- ctx.rule_cookie = rule_dpif_get_flow_cookie(ctx.rule);
+ ctx.rule_cookie = ctx.rule->up.flow_cookie;
} else {
OVS_NOT_REACHED();
}
@@ -26,7 +26,6 @@
#include "coverage.h"
#include "cfm.h"
#include "ct-dpif.h"
-#include "dpif.h"
#include "fail-open.h"
#include "guarded-list.h"
#include "hmapx.h"
@@ -80,54 +79,11 @@ COVERAGE_DEFINE(packet_in_overflow);
struct flow_miss;
-struct rule_dpif {
- struct rule up;
-
- /* These statistics:
- *
- * - Do include packets and bytes from datapath flows which have not
- * recently been processed by a revalidator. */
- struct ovs_mutex stats_mutex;
- struct dpif_flow_stats stats OVS_GUARDED;
-
- /* In non-NULL, will point to a new rule (for which a reference is held) to
- * which all the stats updates should be forwarded. This exists only
- * transitionally when flows are replaced.
- *
- * Protected by stats_mutex. If both 'rule->stats_mutex' and
- * 'rule->new_rule->stats_mutex' must be held together, acquire them in that
- * order, */
- struct rule_dpif *new_rule OVS_GUARDED;
- bool forward_counts OVS_GUARDED; /* Forward counts? 'used' time will be
- * forwarded in all cases. */
-
- /* If non-zero then the recirculation id that has
- * been allocated for use with this rule.
- * The recirculation id and associated internal flow should
- * be freed when the rule is freed */
- uint32_t recirc_id;
-};
-
-/* RULE_CAST() depends on this. */
-BUILD_ASSERT_DECL(offsetof(struct rule_dpif, up) == 0);
-
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
long long int *used);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
static void rule_expire(struct rule_dpif *, long long now);
-struct group_dpif {
- struct ofgroup up;
-
- /* These statistics:
- *
- * - Do include packets and bytes from datapath flows which have not
- * recently been processed by a revalidator. */
- struct ovs_mutex stats_mutex;
- uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
- uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
-};
-
struct ofbundle {
struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
struct ofproto_dpif *ofproto; /* Owning ofproto. */
@@ -216,24 +172,6 @@ static int set_cfm(struct ofport *, const struct cfm_settings *);
static int set_lldp(struct ofport *ofport_, const struct smap *cfg);
static void ofport_update_peer(struct ofport_dpif *);
-/* Reasons that we might need to revalidate every datapath flow, and
- * corresponding coverage counters.
- *
- * A value of 0 means that there is no need to revalidate.
- *
- * It would be nice to have some cleaner way to integrate with coverage
- * counters, but with only a few reasons I guess this is good enough for
- * now. */
-enum revalidate_reason {
- REV_RECONFIGURE = 1, /* Switch configuration changed. */
- REV_STP, /* Spanning tree protocol port status change. */
- REV_RSTP, /* RSTP port status change. */
- REV_BOND, /* Bonding changed. */
- REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
- REV_FLOW_TABLE, /* Flow table changed. */
- REV_MAC_LEARNING, /* Mac learning changed. */
- REV_MCAST_SNOOPING, /* Multicast snooping changed. */
-};
COVERAGE_DEFINE(rev_reconfigure);
COVERAGE_DEFINE(rev_stp);
COVERAGE_DEFINE(rev_rstp);
@@ -243,89 +181,11 @@ COVERAGE_DEFINE(rev_flow_table);
COVERAGE_DEFINE(rev_mac_learning);
COVERAGE_DEFINE(rev_mcast_snooping);
-/* All datapaths of a given type share a single dpif backer instance. */
-struct dpif_backer {
- char *type;
- int refcount;
- struct dpif *dpif;
- struct udpif *udpif;
-
- struct ovs_rwlock odp_to_ofport_lock;
- struct hmap odp_to_ofport_map OVS_GUARDED; /* Contains "struct ofport"s. */
-
- struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
-
- enum revalidate_reason need_revalidate; /* Revalidate all flows. */
-
- bool recv_set_enable; /* Enables or disables receiving packets. */
-
- /* Version string of the datapath stored in OVSDB. */
- char *dp_version_string;
-
- /* Datapath feature support. */
- struct dpif_backer_support support;
- struct atomic_count tnl_count;
-};
-
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
-static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
-
-struct ofproto_dpif {
- struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
- struct ofproto up;
- struct dpif_backer *backer;
-
- /* Unique identifier for this instantiation of this bridge in this running
- * process. */
- struct uuid uuid;
-
- ATOMIC(ovs_version_t) tables_version; /* For classifier lookups. */
-
- uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
-
- /* Special OpenFlow rules. */
- struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
- struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
- struct rule_dpif *drop_frags_rule; /* Used in OFPUTIL_FRAG_DROP mode. */
-
- /* Bridging. */
- struct netflow *netflow;
- struct dpif_sflow *sflow;
- struct dpif_ipfix *ipfix;
- struct hmap bundles; /* Contains "struct ofbundle"s. */
- struct mac_learning *ml;
- struct mcast_snooping *ms;
- bool has_bonded_bundles;
- bool lacp_enabled;
- struct mbridge *mbridge;
-
- struct ovs_mutex stats_mutex;
- struct netdev_stats stats OVS_GUARDED; /* To account packets generated and
- * consumed in userspace. */
-
- /* Spanning tree. */
- struct stp *stp;
- long long int stp_last_tick;
-
- /* Rapid Spanning Tree. */
- struct rstp *rstp;
- long long int rstp_last_tick;
-
- /* Ports. */
- struct sset ports; /* Set of standard port names. */
- struct sset ghost_ports; /* Ports with no datapath port. */
- struct sset port_poll_set; /* Queued names for port_poll() reply. */
- int port_poll_errno; /* Last errno for port_poll() reply. */
- uint64_t change_seq; /* Connectivity status changes. */
-
- /* Work queues. */
- struct guarded_list ams; /* Contains "struct ofproto_async_msgs"s. */
- struct seq *ams_seq; /* For notifying 'ams' reception. */
- uint64_t ams_seqno;
-};
+struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
/* All existing ofproto_dpif instances, indexed by ->up.name. */
-static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
+struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
static bool ofproto_use_tnl_push_pop = true;
static void ofproto_unixctl_init(void);
@@ -337,18 +197,6 @@ ofproto_dpif_cast(const struct ofproto *ofproto)
return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
}
-bool
-ofproto_dpif_get_enable_ufid(const struct dpif_backer *backer)
-{
- return backer->support.ufid;
-}
-
-struct dpif_backer_support *
-ofproto_dpif_get_support(const struct ofproto_dpif *ofproto)
-{
- return &ofproto->backer->support;
-}
-
static void ofproto_trace(struct ofproto_dpif *, struct flow *,
const struct dp_packet *packet,
const struct ofpact[], size_t ofpacts_len,
@@ -360,12 +208,6 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
/* Initial mappings of port to bridge mappings. */
static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
-const struct tun_table *
-ofproto_dpif_get_tun_tab(const struct ofproto_dpif *ofproto)
-{
- return ofproto_get_tun_tab(&ofproto->up);
-}
-
/* Initialize 'ofm' for a learn action. If the rule already existed, reference
* to that rule is taken, otherwise a new rule is created. 'ofm' keeps the
* rule reference in both cases. */
@@ -480,12 +322,6 @@ lookup_ofproto_dpif_by_port_name(const char *name)
return NULL;
}
-bool
-ofproto_dpif_backer_enabled(struct dpif_backer* backer)
-{
- return backer->recv_set_enable;
-}
-
static int
type_run(const char *type)
{
@@ -3862,28 +3698,6 @@ rule_dpif_credit_stats(struct rule_dpif *rule,
ovs_mutex_unlock(&rule->stats_mutex);
}
-ovs_be64
-rule_dpif_get_flow_cookie(const struct rule_dpif *rule)
- OVS_REQUIRES(rule->up.mutex)
-{
- return rule->up.flow_cookie;
-}
-
-void
-rule_dpif_reduce_timeouts(struct rule_dpif *rule, uint16_t idle_timeout,
- uint16_t hard_timeout)
-{
- ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
-}
-
-/* Returns 'rule''s actions. The returned actions are RCU-protected, and can
- * be read until the calling thread quiesces. */
-const struct rule_actions *
-rule_dpif_get_actions(const struct rule_dpif *rule)
-{
- return rule_get_actions(&rule->up);
-}
-
/* Sets 'rule''s recirculation id. */
static void
rule_dpif_set_recirc_id(struct rule_dpif *rule, uint32_t id)
@@ -4120,7 +3934,7 @@ check_mask(struct ofproto_dpif *ofproto, const struct miniflow *flow)
ovs_u128 ct_label;
uint32_t ct_mark;
- support = &ofproto_dpif_get_support(ofproto)->odp;
+ support = &ofproto->backer->support.odp;
ct_state = MINIFLOW_GET_U16(flow, ct_state);
if (support->ct_state && support->ct_zone && support->ct_mark
&& support->ct_label && support->ct_state_nat) {
@@ -4169,7 +3983,7 @@ check_actions(const struct ofproto_dpif *ofproto,
}
ct = CONTAINER_OF(ofpact, struct ofpact_conntrack, ofpact);
- support = &ofproto_dpif_get_support(ofproto)->odp;
+ support = &ofproto->backer->support.odp;
if (!support->ct_state) {
report_unsupported_ct(NULL);
@@ -4250,7 +4064,7 @@ rule_insert(struct rule *rule_, struct rule *old_rule_, bool forward_counts)
/* Take a reference to the new rule, and refer all stats updates from
* the old rule to the new rule. */
- rule_dpif_ref(rule);
+ ofproto_rule_ref(&rule->up);
ovs_mutex_lock(&old_rule->stats_mutex);
ovs_mutex_lock(&rule->stats_mutex);
@@ -4278,7 +4092,7 @@ rule_destruct(struct rule *rule_)
ovs_mutex_destroy(&rule->stats_mutex);
/* Release reference to the new rule, if any. */
if (rule->new_rule) {
- rule_dpif_unref(rule->new_rule);
+ ofproto_rule_unref(&rule->new_rule->up);
}
if (rule->recirc_id) {
recirc_free_id(rule->recirc_id);
@@ -4524,14 +4338,11 @@ static void
group_construct_stats(struct group_dpif *group)
OVS_REQUIRES(group->stats_mutex)
{
- struct ofputil_bucket *bucket;
- const struct ovs_list *buckets;
-
group->packet_count = 0;
group->byte_count = 0;
- buckets = group_dpif_get_buckets(group, NULL);
- LIST_FOR_EACH (bucket, list_node, buckets) {
+ struct ofputil_bucket *bucket;
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
bucket->stats.packet_count = 0;
bucket->stats.byte_count = 0;
}
@@ -4549,10 +4360,8 @@ group_dpif_credit_stats(struct group_dpif *group,
bucket->stats.packet_count += stats->n_packets;
bucket->stats.byte_count += stats->n_bytes;
} else { /* Credit to all buckets */
- const struct ovs_list *buckets;
-
- buckets = group_dpif_get_buckets(group, NULL);
- LIST_FOR_EACH (bucket, list_node, buckets) {
+ struct ofputil_bucket *bucket;
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
bucket->stats.packet_count += stats->n_packets;
bucket->stats.byte_count += stats->n_bytes;
}
@@ -4583,17 +4392,14 @@ static enum ofperr
group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
{
struct group_dpif *group = group_dpif_cast(group_);
- struct ofputil_bucket *bucket;
- const struct ovs_list *buckets;
- struct bucket_counter *bucket_stats;
ovs_mutex_lock(&group->stats_mutex);
ogs->packet_count = group->packet_count;
ogs->byte_count = group->byte_count;
- buckets = group_dpif_get_buckets(group, NULL);
- bucket_stats = ogs->bucket_stats;
- LIST_FOR_EACH (bucket, list_node, buckets) {
+ struct bucket_counter *bucket_stats = ogs->bucket_stats;
+ struct ofputil_bucket *bucket;
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
bucket_stats->packet_count = bucket->stats.packet_count;
bucket_stats->byte_count = bucket->stats.byte_count;
bucket_stats++;
@@ -4605,7 +4411,7 @@ group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
/* If the group exists, this function increments the groups's reference count.
*
- * Make sure to call group_dpif_unref() after no longer needing to maintain
+ * Make sure to call ofproto_group_unref() after no longer needing to maintain
* a reference to the group. */
struct group_dpif *
group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
@@ -4615,27 +4421,6 @@ group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
version, take_ref);
return ofgroup ? group_dpif_cast(ofgroup) : NULL;
}
-
-const struct ovs_list *
-group_dpif_get_buckets(const struct group_dpif *group, uint32_t *n_buckets)
-{
- if (n_buckets) {
- *n_buckets = group->up.n_buckets;
- }
- return &group->up.buckets;
-}
-
-enum ofp11_group_type
-group_dpif_get_type(const struct group_dpif *group)
-{
- return group->up.type;
-}
-
-const char *
-group_dpif_get_selection_method(const struct group_dpif *group)
-{
- return group->up.props.selection_method;
-}
/* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
* supports a notion of an OAM flag, sets it if 'oam' is true.
@@ -4656,18 +4441,6 @@ ofproto_dpif_send_packet(const struct ofport_dpif *ofport, bool oam,
ovs_mutex_unlock(&ofproto->stats_mutex);
return error;
}
-
-uint64_t
-group_dpif_get_selection_method_param(const struct group_dpif *group)
-{
- return group->up.props.selection_method_param;
-}
-
-const struct field_array *
-group_dpif_get_fields(const struct group_dpif *group)
-{
- return &group->up.props.fields;
-}
/* Return the version string of the datapath that backs up
* this 'ofproto'.
@@ -4973,7 +4746,7 @@ trace_format_rule(struct ofproto *ofproto, struct ds *result, int level,
cls_rule_format(&rule->up.cr, ofproto_get_tun_tab(ofproto), result);
ds_put_char(result, '\n');
- actions = rule_dpif_get_actions(rule);
+ actions = rule->up.actions;
ds_put_char_multiple(result, '\t', level);
ds_put_cstr(result, "OpenFlow actions=");
@@ -5190,7 +4963,7 @@ parse_flow_and_packet(int argc, const char *argv[],
goto exit;
}
- flow->tunnel.metadata.tab = ofproto_dpif_get_tun_tab(*ofprotop);
+ flow->tunnel.metadata.tab = ofproto_get_tun_tab(&(*ofprotop)->up);
/* Convert Geneve options to OpenFlow format now. This isn't actually
* required in order to get the right results since the ofproto xlate
@@ -5224,7 +4997,7 @@ parse_flow_and_packet(int argc, const char *argv[],
}
err = parse_ofp_exact_flow(flow, NULL,
- ofproto_dpif_get_tun_tab(*ofprotop),
+ ofproto_get_tun_tab(&(*ofprotop)->up),
argv[argc - 1], NULL);
if (err) {
m_err = xasprintf("Bad openflow flow syntax: %s", err);
@@ -5734,16 +5507,7 @@ ofproto_unixctl_init(void)
unixctl_command_register("dpif/disable-truncate", "", 0, 0,
disable_datapath_truncate, NULL);
}
-
-/* Returns true if 'table' is the table used for internal rules,
- * false otherwise. */
-bool
-table_is_internal(uint8_t table_id)
-{
- return table_id == TBL_INTERNAL;
-}
-
static odp_port_t
ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
{
@@ -5851,12 +5615,6 @@ ofproto_dpif_delete_internal_flow(struct ofproto_dpif *ofproto,
return 0;
}
-const struct uuid *
-ofproto_dpif_get_uuid(const struct ofproto_dpif *ofproto)
-{
- return &ofproto->uuid;
-}
-
const struct ofproto_class ofproto_dpif_class = {
init,
enumerate_types,
@@ -15,45 +15,15 @@
#ifndef OFPROTO_DPIF_H
#define OFPROTO_DPIF_H 1
-#include <stdint.h>
-
-#include "fail-open.h"
-#include "hmapx.h"
-#include "odp-util.h"
-#include "openvswitch/ofp-util.h"
-#include "ovs-thread.h"
-#include "ofproto-provider.h"
-#include "timer.h"
-#include "util.h"
-#include "ovs-thread.h"
-
-/* Priority for internal rules created to handle recirculation */
-#define RECIRC_RULE_PRIORITY 20
-
-union user_action_cookie;
-struct dpif_flow_stats;
-struct ofproto;
-struct ofproto_async_msg;
-struct ofproto_dpif;
-struct ofport_dpif;
-struct dpif_backer;
-struct OVS_LOCKABLE rule_dpif;
-struct OVS_LOCKABLE group_dpif;
-
-/* Number of implemented OpenFlow tables. */
-enum { N_TABLES = 255 };
-enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
-BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
-
-/* Ofproto-dpif -- DPIF based ofproto implementation.
+/* ofproto-dpif -- DPIF based ofproto implementation.
*
- * Ofproto-dpif provides an ofproto implementation for those platforms which
+ * ofproto-dpif provides an ofproto implementation for those platforms which
* implement the netdev and dpif interface defined in netdev.h and dpif.h. The
* most important of which is the Linux Kernel Module (dpif-linux), but
* alternatives are supported such as a userspace only implementation
* (dpif-netdev), and a dummy implementation used for unit testing.
*
- * Ofproto-dpif is divided into three major chunks.
+ * ofproto-dpif is divided into three major chunks.
*
* - ofproto-dpif.c
* The main ofproto-dpif module is responsible for implementing the
@@ -62,7 +32,7 @@ BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
* configuring relevant submodules.
*
* - ofproto-dpif-upcall.c
- * Ofproto-dpif-upcall is responsible for retrieving upcalls from the kernel,
+ * ofproto-dpif-upcall is responsible for retrieving upcalls from the kernel,
* processing miss upcalls, and handing more complex ones up to the main
* ofproto-dpif module. Miss upcall processing boils down to figuring out
* what each packet's actions are, executing them (i.e. asking the kernel to
@@ -70,8 +40,108 @@ BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
* to install a kernel flow.
*
* - ofproto-dpif-xlate.c
- * Ofproto-dpif-xlate is responsible for translating OpenFlow actions into
- * datapath actions. */
+ * ofproto-dpif-xlate is responsible for translating OpenFlow actions into
+ * datapath actions.
+ */
+
+#include <stdint.h>
+
+#include "dpif.h"
+#include "fail-open.h"
+#include "hmapx.h"
+#include "odp-util.h"
+#include "openvswitch/ofp-util.h"
+#include "ovs-thread.h"
+#include "ofproto-provider.h"
+#include "util.h"
+#include "ovs-thread.h"
+
+struct dpif_flow_stats;
+struct ofproto_async_msg;
+struct ofproto_dpif;
+struct xlate_cache;
+
+/* Number of implemented OpenFlow tables. */
+enum { N_TABLES = 255 };
+enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
+BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
+
+struct rule_dpif {
+ struct rule up;
+
+ /* These statistics:
+ *
+ * - Do include packets and bytes from datapath flows which have not
+ * recently been processed by a revalidator. */
+ struct ovs_mutex stats_mutex;
+ struct dpif_flow_stats stats OVS_GUARDED;
+
+ /* In non-NULL, will point to a new rule (for which a reference is held) to
+ * which all the stats updates should be forwarded. This exists only
+ * transitionally when flows are replaced.
+ *
+ * Protected by stats_mutex. If both 'rule->stats_mutex' and
+ * 'rule->new_rule->stats_mutex' must be held together, acquire them in that
+ * order, */
+ struct rule_dpif *new_rule OVS_GUARDED;
+ bool forward_counts OVS_GUARDED; /* Forward counts? 'used' time will be
+ * forwarded in all cases. */
+
+ /* If non-zero then the recirculation id that has
+ * been allocated for use with this rule.
+ * The recirculation id and associated internal flow should
+ * be freed when the rule is freed */
+ uint32_t recirc_id;
+};
+
+struct rule_dpif *rule_dpif_lookup_from_table(struct ofproto_dpif *,
+ ovs_version_t, struct flow *,
+ struct flow_wildcards *,
+ const struct dpif_flow_stats *,
+ uint8_t *table_id,
+ ofp_port_t in_port,
+ bool may_packet_in,
+ bool honor_table_miss,
+ struct xlate_cache *);
+
+void rule_dpif_credit_stats(struct rule_dpif *,
+ const struct dpif_flow_stats *);
+
+void rule_set_recirc_id(struct rule *, uint32_t id);
+
+/* Returns true if 'rule' is an internal rule, false otherwise. */
+static inline bool
+rule_dpif_is_internal(const struct rule_dpif *rule)
+{
+ return rule->up.table_id == TBL_INTERNAL;
+}
+
+/* Groups. */
+
+struct group_dpif {
+ struct ofgroup up;
+
+ /* These statistics:
+ *
+ * - Do include packets and bytes from datapath flows which have not
+ * recently been processed by a revalidator. */
+ struct ovs_mutex stats_mutex;
+ uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
+ uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
+};
+
+void group_dpif_credit_stats(struct group_dpif *,
+ struct ofputil_bucket *,
+ const struct dpif_flow_stats *);
+struct group_dpif *group_dpif_lookup(struct ofproto_dpif *,
+ uint32_t group_id, ovs_version_t version,
+ bool take_ref);
+
+/* Backers.
+ *
+ * A "backer" is the datapath (dpif) on which an dpif-based bridge (an
+ * ofproto-dpif) resides. A backer can host several bridges, but a bridge is
+ * backed by only a single dpif. */
/* Stores the various features which the corresponding backer supports. */
struct dpif_backer_support {
@@ -97,60 +167,117 @@ struct dpif_backer_support {
struct odp_support odp;
};
-bool ofproto_dpif_get_enable_ufid(const struct dpif_backer *backer);
-struct dpif_backer_support *ofproto_dpif_get_support(const struct ofproto_dpif *);
-
-ovs_version_t ofproto_dpif_get_tables_version(struct ofproto_dpif *);
+/* Reasons that we might need to revalidate every datapath flow, and
+ * corresponding coverage counters.
+ *
+ * A value of 0 means that there is no need to revalidate.
+ *
+ * It would be nice to have some cleaner way to integrate with coverage
+ * counters, but with only a few reasons I guess this is good enough for
+ * now. */
+enum revalidate_reason {
+ REV_RECONFIGURE = 1, /* Switch configuration changed. */
+ REV_STP, /* Spanning tree protocol port status change. */
+ REV_RSTP, /* RSTP port status change. */
+ REV_BOND, /* Bonding changed. */
+ REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
+ REV_FLOW_TABLE, /* Flow table changed. */
+ REV_MAC_LEARNING, /* Mac learning changed. */
+ REV_MCAST_SNOOPING, /* Multicast snooping changed. */
+};
-void ofproto_dpif_credit_table_stats(struct ofproto_dpif *, uint8_t table_id,
- uint64_t n_matches, uint64_t n_misses);
+/* All datapaths of a given type share a single dpif backer instance. */
+struct dpif_backer {
+ char *type;
+ int refcount;
+ struct dpif *dpif;
+ struct udpif *udpif;
-struct xlate_cache;
+ struct ovs_rwlock odp_to_ofport_lock;
+ struct hmap odp_to_ofport_map OVS_GUARDED; /* Contains "struct ofport"s. */
-struct rule_dpif *rule_dpif_lookup_from_table(struct ofproto_dpif *,
- ovs_version_t, struct flow *,
- struct flow_wildcards *,
- const struct dpif_flow_stats *,
- uint8_t *table_id,
- ofp_port_t in_port,
- bool may_packet_in,
- bool honor_table_miss,
- struct xlate_cache *xcache);
+ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
-static inline void rule_dpif_ref(struct rule_dpif *);
-static inline void rule_dpif_unref(struct rule_dpif *);
+ enum revalidate_reason need_revalidate; /* Revalidate all flows. */
-void rule_dpif_credit_stats(struct rule_dpif *rule ,
- const struct dpif_flow_stats *);
+ bool recv_set_enable; /* Enables or disables receiving packets. */
-static inline bool rule_dpif_is_fail_open(const struct rule_dpif *);
-static inline bool rule_dpif_is_table_miss(const struct rule_dpif *);
-static inline bool rule_dpif_is_internal(const struct rule_dpif *);
+ /* Version string of the datapath stored in OVSDB. */
+ char *dp_version_string;
-uint8_t rule_dpif_get_table(const struct rule_dpif *);
+ /* Datapath feature support. */
+ struct dpif_backer_support support;
+ struct atomic_count tnl_count;
+};
-bool table_is_internal(uint8_t table_id);
+/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
+extern struct shash all_dpif_backers;
-const struct rule_actions *rule_dpif_get_actions(const struct rule_dpif *);
-void rule_set_recirc_id(struct rule *, uint32_t id);
+struct ofport_dpif *odp_port_to_ofport(const struct dpif_backer *, odp_port_t);
+
+/* A bridge based on a "dpif" datapath. */
+
+struct ofproto_dpif {
+ struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
+ struct ofproto up;
+ struct dpif_backer *backer;
+
+ /* Unique identifier for this instantiation of this bridge in this running
+ * process. */
+ struct uuid uuid;
+
+ ATOMIC(ovs_version_t) tables_version; /* For classifier lookups. */
+
+ uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
+
+ /* Special OpenFlow rules. */
+ struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
+ struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
+ struct rule_dpif *drop_frags_rule; /* Used in OFPUTIL_FRAG_DROP mode. */
+
+ /* Bridging. */
+ struct netflow *netflow;
+ struct dpif_sflow *sflow;
+ struct dpif_ipfix *ipfix;
+ struct hmap bundles; /* Contains "struct ofbundle"s. */
+ struct mac_learning *ml;
+ struct mcast_snooping *ms;
+ bool has_bonded_bundles;
+ bool lacp_enabled;
+ struct mbridge *mbridge;
+
+ struct ovs_mutex stats_mutex;
+ struct netdev_stats stats OVS_GUARDED; /* To account packets generated and
+ * consumed in userspace. */
+
+ /* Spanning tree. */
+ struct stp *stp;
+ long long int stp_last_tick;
+
+ /* Rapid Spanning Tree. */
+ struct rstp *rstp;
+ long long int rstp_last_tick;
+
+ /* Ports. */
+ struct sset ports; /* Set of standard port names. */
+ struct sset ghost_ports; /* Ports with no datapath port. */
+ struct sset port_poll_set; /* Queued names for port_poll() reply. */
+ int port_poll_errno; /* Last errno for port_poll() reply. */
+ uint64_t change_seq; /* Connectivity status changes. */
+
+ /* Work queues. */
+ struct guarded_list ams; /* Contains "struct ofproto_async_msgs"s. */
+ struct seq *ams_seq; /* For notifying 'ams' reception. */
+ uint64_t ams_seqno;
+};
-ovs_be64 rule_dpif_get_flow_cookie(const struct rule_dpif *rule);
+/* All existing ofproto_dpif instances, indexed by ->up.name. */
+extern struct hmap all_ofproto_dpifs;
-void rule_dpif_reduce_timeouts(struct rule_dpif *rule, uint16_t idle_timeout,
- uint16_t hard_timeout);
+ovs_version_t ofproto_dpif_get_tables_version(struct ofproto_dpif *);
-void group_dpif_credit_stats(struct group_dpif *,
- struct ofputil_bucket *,
- const struct dpif_flow_stats *);
-struct group_dpif *group_dpif_lookup(struct ofproto_dpif *ofproto,
- uint32_t group_id, ovs_version_t version,
- bool take_ref);
-const struct ovs_list *group_dpif_get_buckets(const struct group_dpif *group,
- uint32_t *n_buckets);
-enum ofp11_group_type group_dpif_get_type(const struct group_dpif *group);
-const char *group_dpif_get_selection_method(const struct group_dpif *group);
-uint64_t group_dpif_get_selection_method_param(const struct group_dpif *group);
-const struct field_array *group_dpif_get_fields(const struct group_dpif *group);
+void ofproto_dpif_credit_table_stats(struct ofproto_dpif *, uint8_t table_id,
+ uint64_t n_matches, uint64_t n_misses);
int ofproto_dpif_execute_actions(struct ofproto_dpif *, ovs_version_t,
const struct flow *, struct rule_dpif *,
@@ -165,16 +292,13 @@ void ofproto_dpif_send_async_msg(struct ofproto_dpif *,
struct ofproto_async_msg *);
int ofproto_dpif_send_packet(const struct ofport_dpif *, bool oam,
struct dp_packet *);
-enum ofperr ofproto_dpif_flow_mod_init_for_learn(struct ofproto_dpif *,
- const struct ofputil_flow_mod *,
- struct ofproto_flow_mod *);
+enum ofperr ofproto_dpif_flow_mod_init_for_learn(
+ struct ofproto_dpif *, const struct ofputil_flow_mod *,
+ struct ofproto_flow_mod *);
-struct ofport_dpif *odp_port_to_ofport(const struct dpif_backer *, odp_port_t);
struct ofport_dpif *ofp_port_to_ofport(const struct ofproto_dpif *,
ofp_port_t);
-bool ofproto_dpif_backer_enabled(struct dpif_backer* backer);
-
int ofproto_dpif_add_internal_flow(struct ofproto_dpif *,
const struct match *, int priority,
uint16_t idle_timeout,
@@ -183,59 +307,6 @@ int ofproto_dpif_add_internal_flow(struct ofproto_dpif *,
int ofproto_dpif_delete_internal_flow(struct ofproto_dpif *, struct match *,
int priority);
-const struct uuid *ofproto_dpif_get_uuid(const struct ofproto_dpif *);
-const struct tun_table *ofproto_dpif_get_tun_tab(const struct ofproto_dpif *);
-
-/* struct rule_dpif has struct rule as it's first member. */
-#define RULE_CAST(RULE) ((struct rule *)RULE)
-#define GROUP_CAST(GROUP) ((struct ofgroup *)GROUP)
-
-static inline struct group_dpif* group_dpif_ref(struct group_dpif *group)
-{
- if (group) {
- ofproto_group_ref(GROUP_CAST(group));
- }
- return group;
-}
-
-static inline void group_dpif_unref(struct group_dpif *group)
-{
- if (group) {
- ofproto_group_unref(GROUP_CAST(group));
- }
-}
-
-static inline void rule_dpif_ref(struct rule_dpif *rule)
-{
- if (rule) {
- ofproto_rule_ref(RULE_CAST(rule));
- }
-}
-
-static inline void rule_dpif_unref(struct rule_dpif *rule)
-{
- if (rule) {
- ofproto_rule_unref(RULE_CAST(rule));
- }
-}
-
-static inline bool rule_dpif_is_fail_open(const struct rule_dpif *rule)
-{
- return is_fail_open_rule(RULE_CAST(rule));
-}
-
-static inline bool rule_dpif_is_table_miss(const struct rule_dpif *rule)
-{
- return rule_is_table_miss(RULE_CAST(rule));
-}
-
-/* Returns true if 'rule' is an internal rule, false otherwise. */
-static inline bool rule_dpif_is_internal(const struct rule_dpif *rule)
-{
- return RULE_CAST(rule)->table_id == TBL_INTERNAL;
-}
-
-#undef RULE_CAST
+bool ovs_native_tunneling_is_on(struct ofproto_dpif *);
-bool ovs_native_tunneling_is_on(struct ofproto_dpif *ofproto);
#endif /* ofproto-dpif.h */