diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 44ebf96..81fcbc1 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -405,6 +405,7 @@ enum dpdk_hw_ol_features {
NETDEV_RX_HW_SCATTER = 1 << 2,
NETDEV_TX_TSO_OFFLOAD = 1 << 3,
NETDEV_TX_SCTP_CHECKSUM_OFFLOAD = 1 << 4,
+ NETDEV_TX_VLAN_INSERT = 1 << 5,
};
/*
@@ -488,7 +489,10 @@ struct netdev_dpdk {
struct netdev_dpdk_sw_stats *sw_stats;
/* Protects stats */
rte_spinlock_t stats_lock;
- /* 36 pad bytes here. */
+ rte_spinlock_t tx_thd_lock;
+ uint16_t (* tx_burst)(uint16_t, uint16_t, struct rte_mbuf **,
+ uint16_t, rte_spinlock_t *);
+ /* 24 pad bytes here. */
);
PADDED_MEMBERS(CACHE_LINE_SIZE,
@@ -954,6 +958,28 @@ dpdk_watchdog(void *dummy OVS_UNUSED)
return NULL;
}
+static inline uint16_t
+dpdk_eth_dev_tx_burst_ts(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts, rte_spinlock_t *lock)
+{
+ uint16_t ret;
+
+ rte_spinlock_lock(lock);
+ ret =rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts);
+ rte_spinlock_unlock(lock);
+
+ return ret;
+}
+
+static inline uint16_t
+dpdk_eth_dev_tx_burst_native(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts,
+ rte_spinlock_t *lock OVS_UNUSED)
+{
+
+ return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts);
+}
+
static int
dpdk_eth_dev_port_config(struct netdev_dpdk *dev, int n_rxq, int n_txq)
{
@@ -986,14 +1012,21 @@ dpdk_eth_dev_port_config(struct netdev_dpdk *dev, int n_rxq, int n_txq)
conf.rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
}
- if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) {
- conf.txmode.offloads |= DPDK_TX_TSO_OFFLOAD_FLAGS;
- if (dev->hw_ol_features & NETDEV_TX_SCTP_CHECKSUM_OFFLOAD) {
- conf.txmode.offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
- }
- }
+ if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) {
+ conf.txmode.offloads |= DPDK_TX_TSO_OFFLOAD_FLAGS;
+ if (dev->hw_ol_features & NETDEV_TX_SCTP_CHECKSUM_OFFLOAD) {
+ conf.txmode.offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+ }
+ }
+
+ dev->tx_burst = dpdk_eth_dev_tx_burst_native;
+ if (dev->hw_ol_features & NETDEV_TX_VLAN_INSERT) {
+ conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ rte_spinlock_init(&dev->tx_thd_lock);
+ dev->tx_burst = dpdk_eth_dev_tx_burst_ts;
+ }
- /* Limit configured rss hash functions to only those supported
+ /* Limit configured rss hash functions to only those supported
* by the eth device. */
conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
@@ -1912,6 +1945,10 @@ netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args,
NIC_PORT_DEFAULT_TXQ_SIZE,
&dev->requested_txq_size);
+ if (smap_get_bool(args, "tx_vlan_insert", false)) {
+ dev->hw_ol_features |= NETDEV_TX_VLAN_INSERT;
+ }
+
new_devargs = smap_get(args, "dpdk-devargs");
if (dev->devargs && new_devargs && strcmp(new_devargs, dev->devargs)) {
@@ -2215,8 +2252,8 @@ netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid,
while (nb_tx != nb_tx_prep) {
uint32_t ret;
- ret = rte_eth_tx_burst(dev->port_id, qid, pkts + nb_tx,
- nb_tx_prep - nb_tx);
+ ret = dev->tx_burst(dev->port_id, qid, pkts + nb_tx,
+ nb_tx_prep - nb_tx, &dev->tx_thd_lock);
if (!ret) {
break;
}
@@ -5228,6 +5265,54 @@ netdev_dpdk_rte_flow_query_count(struct netdev *netdev,
return ret;
}
+static int
+netdev_dpdk_mirror_offload(struct netdev *src, struct eth_addr *flow_addr,
+ struct netdev *dst, uint16_t vlan_id,
+ bool add_mirror, bool tx_cb) {
+ struct netdev_dpdk *src_dev = netdev_dpdk_cast(src);
+ struct netdev_dpdk *dst_dev = netdev_dpdk_cast(dst);
+ int status = 0;
+
+ if (add_mirror) {
+ int i;
+ struct mirror_param data;
+ uint64_t mac_addr = 0;
+
+ memset(&data, 0, sizeof(struct mirror_param));
+ data.target_addr = 0;
+ if (flow_addr) {
+ for (i = 0; i < 6; i++) {
+ mac_addr <<= 8;
+ mac_addr |= flow_addr->ea[6 - i - 1];
+ }
+ data.target_addr = mac_addr;
+ }
+
+ data.dst_port_id = dst_dev->port_id;
+ data.dst_vlan_id = vlan_id;
+ data.n_src_queue = tx_cb?src->n_txq:src->n_rxq;
+ data.n_dst_queue = dst->n_txq;
+ data.lock = &dst_dev->tx_thd_lock;
+ data.max_burst_size = NETDEV_MAX_BURST;
+
+ VLOG_INFO("register %s mirror-offload with src-port:%d (%s) and "
+ "output-port:%d (%s) vlan-id=%d flow-mac="
+ "0x%" PRId64 "\n",
+ tx_cb?"ingress":"egress", src_dev->port_id,
+ src->name, dst_dev->port_id, dst->name, vlan_id,
+ (uint64_t)__builtin_bswap64(data.target_addr));
+
+ status = netdev_register_mirror(src_dev->port_id, &data, tx_cb);
+ } else {
+ VLOG_INFO("unregister %s mirror-offload with src-port:%d(%s)\n",
+ tx_cb?"ingress":"egress", src_dev->port_id,
+ src->name);
+ status = netdev_unregister_mirror(src_dev->port_id, tx_cb);
+ }
+
+ return status;
+}
+
#define NETDEV_DPDK_CLASS_COMMON \
.is_pmd = true, \
.alloc = netdev_dpdk_alloc, \
@@ -5277,6 +5362,7 @@ static const struct netdev_class dpdk_class = {
.construct = netdev_dpdk_construct,
.set_config = netdev_dpdk_set_config,
.send = netdev_dpdk_eth_send,
+ .mirror_offload = netdev_dpdk_mirror_offload,
};
static const struct netdev_class dpdk_vhost_class = {
@@ -5285,6 +5371,7 @@ static const struct netdev_class dpdk_vhost_class = {
.construct = netdev_dpdk_vhost_construct,
.destruct = netdev_dpdk_vhost_destruct,
.send = netdev_dpdk_vhost_send,
+ .mirror_offload = netdev_dpdk_mirror_offload,
.get_carrier = netdev_dpdk_vhost_get_carrier,
.get_stats = netdev_dpdk_vhost_get_stats,
.get_custom_stats = netdev_dpdk_get_sw_custom_stats,
@@ -5301,6 +5388,7 @@ static const struct netdev_class dpdk_vhost_client_class = {
.destruct = netdev_dpdk_vhost_destruct,
.set_config = netdev_dpdk_vhost_client_set_config,
.send = netdev_dpdk_vhost_send,
+ .mirror_offload = netdev_dpdk_mirror_offload,
.get_carrier = netdev_dpdk_vhost_get_carrier,
.get_stats = netdev_dpdk_vhost_get_stats,
.get_custom_stats = netdev_dpdk_get_sw_custom_stats,
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index de6101e..1717a9e 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -29,6 +29,11 @@
#include "packets.h"
#include "uuid.h"
+#define MAC_ADDR_MAP 0x0000FFFFFFFFFFFFULL
+#define is_mac_addr_match(a,b) (((a^b)&MAC_ADDR_MAP) == 0)
+#define INIT_MIRROR_DB_SIZE 8
+#define INVALID_PORT_ID 0xFFFF
+
VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk);
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
@@ -1581,3 +1586,425 @@ const struct netdev_flow_api netdev_offload_dpdk = {
.init_flow_api = netdev_offload_dpdk_init_flow_api,
.flow_get = netdev_offload_dpdk_flow_get,
};
+
+/*
+ * The below API is for port/flow mirror offloading which uses a different DPDK
+ * interface as rte-flow.
+ */
+static int mirror_port_db_size = 0;
+static int mirror_port_used = 0;
+static struct mirror_offload_port *mirror_port_db = NULL;
+
+static struct mirror_offload_port*
+netdev_mirror_data_find(uint16_t port_id)
+{
+ int i;
+
+ if (mirror_port_db == NULL) {
+ return NULL;
+ }
+ for (i = 0; i < mirror_port_db_size; i++) {
+ if (port_id == mirror_port_db[i].port_id) {
+ return &mirror_port_db[i];
+ }
+ }
+ return NULL;
+}
+
+static void
+netdev_mirror_db_init(struct mirror_offload_port *db, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ db[i].port_id = INVALID_PORT_ID;
+ memset(&db[i].rx, 0, sizeof(struct mirror_param));
+ memset(&db[i].tx, 0, sizeof(struct mirror_param));
+ }
+}
+
+/* Double the db size when it runs out of space */
+static int
+netdev_mirror_db_resize(void)
+{
+ int new_size = 2 * mirror_port_db_size;
+ struct mirror_offload_port *new_db = malloc(
+ sizeof(struct mirror_offload_port)*new_size);
+
+ if (new_db == NULL) {
+ VLOG_ERR("Out of memory !!!");
+ return -1;
+ }
+
+ memcpy(new_db, mirror_port_db, sizeof(struct mirror_offload_port)
+ *mirror_port_db_size);
+ netdev_mirror_db_init(&new_db[mirror_port_db_size], mirror_port_db_size);
+ mirror_port_db_size = new_size;
+ mirror_port_db = new_db;
+
+ return 0;
+}
+
+static void
+netdev_mirror_data_remove(uint16_t port_id, int tx) {
+ struct mirror_offload_port *target = netdev_mirror_data_find(port_id);
+
+ if (!target) {
+ VLOG_ERR("Attempt to remove unsaved port, %d, %s callback\n",
+ port_id, tx?"tx": "rx");
+ }
+
+ if (tx) {
+ memset(&target->tx, 0, sizeof(struct mirror_param));
+ } else {
+ memset(&target->rx, 0, sizeof(struct mirror_param));
+ }
+
+ if ((target->rx.mirror_cb == NULL) &&
+ (target->tx.mirror_cb == NULL)) {
+ target->port_id = INVALID_PORT_ID;
+ mirror_port_used --;
+ /* release port mirror db memory when there
+ * is no outstanding port mirror offloading
+ * configuration
+ */
+ if (mirror_port_used == 0) {
+ free(mirror_port_db);
+ mirror_port_db = NULL;
+ mirror_port_db_size = 0;
+ }
+ }
+}
+
+static struct mirror_offload_port*
+netdev_mirror_data_add(uint16_t port_id, int tx,
+ struct mirror_param *new_param)
+{
+ struct mirror_offload_port *target = NULL;
+ int i;
+
+ if (!mirror_port_db) {
+ mirror_port_db_size = INIT_MIRROR_DB_SIZE;
+ mirror_port_db = xmalloc(sizeof(struct mirror_offload_port)*
+ mirror_port_db_size);
+ if (!mirror_port_db) {
+ VLOG_ERR("Out of memory!!!");
+ return target;
+ }
+ netdev_mirror_db_init(mirror_port_db, mirror_port_db_size);
+ }
+ target = netdev_mirror_data_find(port_id);
+ if (target) {
+ if (tx) {
+ if (target->tx.mirror_cb) {
+ VLOG_ERR("Attempt to add ingress mirror offloading"
+ " on port, %d, while one is outstanding\n", port_id);
+ return target;
+ }
+
+ memcpy(&target->tx, new_param, sizeof(*new_param));
+
+ } else {
+ if (target->rx.mirror_cb) {
+ VLOG_ERR("Attempt to add egress mirror offloading"
+ " on port, %d, while one is outstanding\n", port_id);
+ return target;
+ }
+
+ memcpy(&target->rx, new_param, sizeof(struct mirror_param));
+
+ }
+ } else {
+ struct mirror_param *param;
+ /* find an unused spot on db */
+ for (i = 0; i < mirror_port_db_size; i++) {
+ if (mirror_port_db[i].port_id == INVALID_PORT_ID) {
+ break;
+ }
+ }
+ if (i == mirror_port_db_size) {
+ if (netdev_mirror_db_resize()) {
+ return NULL;
+ }
+ }
+
+ param = tx ? &mirror_port_db[i].tx : &mirror_port_db[i].rx;
+ memcpy(param, new_param, sizeof(struct mirror_param));
+
+ target = &mirror_port_db[i];
+ target->port_id = port_id;
+ mirror_port_used ++;
+ }
+ return target;
+}
+
+static inline uint16_t
+netdev_rx_flow_mirror_offload_cb(uint16_t port_id OVS_UNUSED,
+ uint16_t qidx, struct rte_mbuf **pkts, uint16_t nb_pkts,
+ uint16_t maxi_pkts OVS_UNUSED, void *user_params)
+{
+ struct mirror_param *data = user_params;
+ uint16_t i, dst_qidx, match_count = 0;
+ uint16_t pkt_trans;
+ uint16_t dst_port_id = data->dst_port_id;
+ uint16_t dst_vlan_id = data->dst_vlan_id;
+ uint64_t target_addr = data->target_addr;
+ struct rte_mbuf **pkt_buf = &data->pkt_buf[qidx * data->max_burst_size];
+
+ if (nb_pkts == 0) {
+ return 0;
+ }
+
+ if (nb_pkts > data->max_burst_size) {
+ VLOG_ERR("Per-flow batch size, %d, exceeds maximum limit\n", nb_pkts);
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ uint64_t *dst_mac_addr =
+ rte_pktmbuf_mtod_offset(pkts[i], uint64_t *, 0);
+ if (is_mac_addr_match(target_addr, (*dst_mac_addr))) {
+ pkt_buf[match_count] = pkts[i];
+ pkt_buf[match_count]->ol_flags |= PKT_TX_VLAN_PKT;
+ pkt_buf[match_count]->vlan_tci = dst_vlan_id;
+ rte_mbuf_refcnt_update(pkt_buf[match_count], 1);
+ match_count ++;
+ }
+ }
+
+ dst_qidx = (data->n_dst_queue > qidx)?qidx:(data->n_dst_queue -1);
+
+ rte_spinlock_lock(data->lock);
+ pkt_trans = rte_eth_tx_burst(dst_port_id, dst_qidx, pkt_buf, match_count);
+ rte_spinlock_unlock(data->lock);
+
+ for (i = 0; i < match_count; i++) {
+ pkt_buf[i]->ol_flags &= ~PKT_TX_VLAN_PKT;
+ }
+
+ while (unlikely (pkt_trans < match_count)) {
+ rte_pktmbuf_free(pkt_buf[pkt_trans]);
+ pkt_trans++;
+ }
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+netdev_tx_flow_mirror_offload_cb(uint16_t port_id OVS_UNUSED,
+ uint16_t qidx, struct rte_mbuf **pkts, uint16_t nb_pkts,
+ void *user_params)
+{
+ struct mirror_param *data = user_params;
+ uint16_t i, dst_qidx, match_count = 0;
+ uint16_t pkt_trans;
+ uint16_t dst_port_id = data->dst_port_id;
+ uint16_t dst_vlan_id = data->dst_vlan_id;
+ uint64_t target_addr = data->target_addr;
+ struct rte_mbuf **pkt_buf = &data->pkt_buf[qidx * data->max_burst_size];
+
+ if (nb_pkts == 0) {
+ return 0;
+ }
+
+ if (nb_pkts > data->max_burst_size) {
+ VLOG_ERR("Per-flow batch size, %d, exceeds maximum limit\n", nb_pkts);
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ uint64_t *src_mac_addr =
+ rte_pktmbuf_mtod_offset(pkts[i], uint64_t *, 6);
+ if (is_mac_addr_match(target_addr, (*src_mac_addr))) {
+ pkt_buf[match_count] = pkts[i];
+ pkt_buf[match_count]->ol_flags |= PKT_TX_VLAN_PKT;
+ pkt_buf[match_count]->vlan_tci = dst_vlan_id;
+ rte_mbuf_refcnt_update(pkt_buf[match_count], 1);
+ match_count++;
+ }
+ }
+
+ dst_qidx = (data->n_dst_queue > qidx)?qidx:(data->n_dst_queue -1);
+
+ rte_spinlock_lock(data->lock);
+ pkt_trans = rte_eth_tx_burst(dst_port_id, dst_qidx, pkt_buf, match_count);
+ rte_spinlock_unlock(data->lock);
+
+ for (i = 0; i < match_count; i++) {
+ pkt_buf[i]->ol_flags &= ~PKT_TX_VLAN_PKT;
+ }
+
+ while (unlikely (pkt_trans < match_count)) {
+ rte_pktmbuf_free(pkt_buf[pkt_trans]);
+ pkt_trans++;
+ }
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+netdev_rx_port_mirror_offload_cb(uint16_t port_id OVS_UNUSED,
+ uint16_t qidx, struct rte_mbuf **pkts, uint16_t nb_pkts,
+ uint16_t max_pkts OVS_UNUSED, void *user_params)
+{
+ struct mirror_param *data = user_params;
+ uint16_t i, dst_qidx;
+ uint16_t pkt_trans;
+ uint16_t dst_port_id = data->dst_port_id;
+ uint16_t dst_vlan_id = data->dst_vlan_id;
+
+ if (nb_pkts == 0) {
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ pkts[i]->ol_flags |= PKT_TX_VLAN_PKT;
+ pkts[i]->vlan_tci = dst_vlan_id;
+ rte_mbuf_refcnt_update(pkts[i], 1);
+ }
+
+ dst_qidx = (data->n_dst_queue > qidx)?qidx:(data->n_dst_queue -1);
+
+ rte_spinlock_lock(data->lock);
+ pkt_trans = rte_eth_tx_burst(dst_port_id, dst_qidx, pkts, nb_pkts);
+ rte_spinlock_unlock(data->lock);
+
+ for (i = 0; i < nb_pkts; i++) {
+ pkts[i]->ol_flags &= ~PKT_TX_VLAN_PKT;
+ }
+
+ while (unlikely (pkt_trans < nb_pkts)) {
+ rte_pktmbuf_free(pkts[pkt_trans]);
+ pkt_trans++;
+ }
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+netdev_tx_port_mirror_offload_cb(uint16_t port_id OVS_UNUSED,
+ uint16_t qidx, struct rte_mbuf **pkts, uint16_t nb_pkts,
+ void *user_params)
+{
+ struct mirror_param *data = user_params;
+ uint16_t i, dst_qidx;
+ uint16_t pkt_trans;
+ uint16_t dst_port_id = data->dst_port_id;
+ uint16_t dst_vlan_id = data->dst_vlan_id;
+
+ if (nb_pkts == 0) {
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ pkts[i]->ol_flags |= PKT_TX_VLAN_PKT;
+ pkts[i]->vlan_tci = dst_vlan_id;
+ rte_mbuf_refcnt_update(pkts[i], 1);
+ }
+
+ dst_qidx = (data->n_dst_queue > qidx)?qidx:(data->n_dst_queue -1);
+
+ rte_spinlock_lock(data->lock);
+ pkt_trans = rte_eth_tx_burst(dst_port_id, dst_qidx, pkts, nb_pkts);
+ rte_spinlock_unlock(data->lock);
+
+ for (i = 0; i < nb_pkts; i++) {
+ pkts[i]->ol_flags &= ~PKT_TX_VLAN_PKT;
+ }
+
+ while (unlikely (pkt_trans < nb_pkts)) {
+ rte_pktmbuf_free(pkts[pkt_trans]);
+ pkt_trans++;
+ }
+
+ return nb_pkts;
+}
+
+int
+netdev_register_mirror(uint16_t src_port, struct mirror_param *param,
+ int tx_cb)
+{
+ int i;
+ struct mirror_offload_port *port_info;
+ struct mirror_param *data;
+
+ port_info = netdev_mirror_data_add(src_port, tx_cb, param);
+ if (!port_info) {
+ return -1;
+ }
+
+ data = tx_cb ? &port_info->tx : &port_info->rx;
+
+ data->pkt_buf = NULL;
+ if (data->target_addr) {
+ data->pkt_buf = xmalloc(sizeof(struct rte_mbuf *)*data->max_burst_size*
+ data->n_src_queue);
+ if (!data->pkt_buf) {
+ VLOG_ERR("Out of memory !!!");
+ return -1;
+ }
+ }
+
+ data->mirror_cb = xmalloc(sizeof(struct rte_eth_rxtx_callbac *) *
+ data->n_src_queue);
+ if (!data->mirror_cb) {
+ VLOG_ERR("Out of memory !!!");
+ return -1;
+ }
+
+ if (!tx_cb) {
+ rte_rx_callback_fn fn = (data->target_addr)?
+ netdev_rx_flow_mirror_offload_cb:
+ netdev_rx_port_mirror_offload_cb;
+ for (i = 0;i < data->n_src_queue; i++) {
+ data->mirror_cb[i] = rte_eth_add_rx_callback(src_port,
+ i, fn, data);
+ }
+ } else {
+ rte_tx_callback_fn fn = (data->target_addr)?
+ netdev_tx_flow_mirror_offload_cb:
+ netdev_tx_port_mirror_offload_cb;
+ for (i = 0; i < data->n_src_queue; i++) {
+ data->mirror_cb[i] = rte_eth_add_tx_callback(src_port,
+ i, fn, data);
+ }
+ }
+
+ return 0;
+}
+
+int netdev_unregister_mirror(uint16_t src_port, int tx_cb)
+{
+ /* release both cb and pkt_buf */
+ int i;
+ struct mirror_offload_port *port_info;
+ struct mirror_param *data;
+
+ port_info = netdev_mirror_data_find(src_port);
+ if (port_info == NULL) {
+ VLOG_ERR("Source port %d is not on outstanding port mirror db\n",
+ src_port);
+ return -1;
+ }
+ data = tx_cb ? &port_info->tx : &port_info->rx;
+
+ for (i = 0; i < data->n_src_queue; i++) {
+ if (data->mirror_cb[i]) {
+ if (tx_cb) {
+ rte_eth_remove_tx_callback(src_port, i, data->mirror_cb[i]);
+ } else {
+ rte_eth_remove_rx_callback(src_port, i, data->mirror_cb[i]);
+ }
+ }
+ data->mirror_cb[i] = NULL;
+ }
+ free(data->mirror_cb);
+
+ if (data->pkt_buf) {
+ free(data->pkt_buf);
+ data->pkt_buf = NULL;
+ }
+
+ netdev_mirror_data_remove(src_port, tx_cb);
+ return 0;
+}
diff --git a/lib/netdev-offload.h b/lib/netdev-offload.h
index 4c0ed2a..75691a6 100644
--- a/lib/netdev-offload.h
+++ b/lib/netdev-offload.h
@@ -23,6 +23,10 @@
#include "packets.h"
#include "flow.h"
+#ifdef DPDK_NETDEV
+#include
Output VLAN for selected source port packets, if nonempty.
++ Please note: + This is different than This vlan is used + to add an additional vlan tag on the mirror traffic, either it + contains vlan or not. The receive end could choose to filter + out this additional vlan. This option is provided so the mirrored + traffic could maintain its original vlan informaiton, and this mirror + can be used to filter out un-wanted traffic such as in + . +
+Output VLAN for selected destination port packets, if nonempty.
++ Please note:This is different than + + This vlan is used to add an additional vlan tag on the mirror + traffic, either it contains vlan or not. The receive end could choose + to filter out this additional vlan. This option is provided so the + mirrored traffic could maintain its original vlan informaiton, and + this mirror can be used to filter out un-wanted traffic such as in + . +
+Maximum per-packet number of bytes to mirror.