@@ -2188,7 +2188,8 @@ out:
static struct dp_netdev_flow *
dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
struct match *match, const ovs_u128 *ufid,
- const struct nlattr *actions, size_t actions_len)
+ const struct nlattr *actions, size_t actions_len,
+ int rxqid,enum dpif_flow_put_flags flags)
OVS_REQUIRES(pmd->flow_mutex)
{
struct dp_netdev_flow *flow;
@@ -2227,10 +2228,23 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len));
netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask);
-
+ flow->cr.flow_tag = HW_NO_FREE_FLOW_TAG;
/* Select dpcls for in_port. Relies on in_port to be exact match. */
cls = dp_netdev_pmd_find_dpcls(pmd, in_port);
dpcls_insert(cls, &flow->cr, &mask);
+ bool probe = flags & DPIF_FP_PROBE;
+
+ if (pmd->dp->ppl_md.id == HW_OFFLOAD_PIPELINE && !probe) {
+ /* The classifier rule is sent as a message
+ * to the flow_table_offload thread
+ * */
+ VLOG_INFO(" hw_pipeline_dpcls_insert : %d",flags);
+ hw_pipeline_dpcls_insert(pmd->dp,flow,&flow->cr,in_port,
+ &match->wc.masks,rxqid);
+ }
+ else {
+ VLOG_INFO("skip hw_pipeline_dpcls_insert : %x\n",flow->cr.flow_tag );
+ }
cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node),
dp_netdev_flow_hash(&flow->ufid));
@@ -2302,7 +2316,7 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
if (put->flags & DPIF_FP_CREATE) {
if (cmap_count(&pmd->flow_table) < MAX_FLOWS) {
dp_netdev_flow_add(pmd, match, ufid, put->actions,
- put->actions_len);
+ put->actions_len,0,put->flags);
error = 0;
} else {
error = EFBIG;
@@ -4445,8 +4459,9 @@ static inline void
handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
struct dp_packet *packet,
const struct netdev_flow_key *key,
- struct ofpbuf *actions, struct ofpbuf *put_actions,
- int *lost_cnt, long long now)
+ struct ofpbuf *actions,
+ struct ofpbuf *put_actions,
+ int *lost_cnt, long long now,int rxqid)
{
struct ofpbuf *add_actions;
struct dp_packet_batch b;
@@ -4502,7 +4517,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
if (OVS_LIKELY(!netdev_flow)) {
netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid,
add_actions->data,
- add_actions->size);
+ add_actions->size,rxqid,0);
}
ovs_mutex_unlock(&pmd->flow_mutex);
emc_probabilistic_insert(pmd, key, netdev_flow);
@@ -4572,7 +4587,8 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd,
miss_cnt++;
handle_packet_upcall(pmd, packets[i], &keys[i], &actions,
- &put_actions, &lost_cnt, now);
+ &put_actions, &lost_cnt, now,
+ packets_->rxqid);
}
ofpbuf_uninit(&actions);
@@ -36,6 +36,15 @@
VLOG_DEFINE_THIS_MODULE(hw_pipeline);
+static int hw_pipeline_send_insert_flow(struct dp_netdev *dp,
+ odp_port_t in_port,
+ struct dp_netdev_flow *flow,
+ struct flow *masks,
+ int rxqid);
+
+uint32_t hw_pipeline_ft_pool_get(flow_tag_pool *p,
+ struct dp_netdev_flow *flow);
+
bool hw_pipeline_ft_pool_free(flow_tag_pool *p,uint32_t flow_tag);
bool hw_pipeline_ft_pool_is_valid(flow_tag_pool *p);
@@ -129,7 +138,47 @@ uint32_t hw_pipeline_ft_pool_uninit(flow_tag_pool *p)
rte_spinlock_unlock(&p->lock);
return 0;
}
+/*
+ * hw_pipeline_ft_pool_get returns an index from the pool
+ * The index is returned from the head.
+ *
+ * The function deals with 3 cases:
+ * 1. no more indexes in the pool . returns HW_NO_FREE_FLOW_TAG
+ * 2. There is an index:
+ * a. This is the last index
+ * b. This is the common index
+ * */
+uint32_t hw_pipeline_ft_pool_get(flow_tag_pool *p,struct dp_netdev_flow *flow)
+{
+ uint32_t next;
+ uint32_t index;
+
+ rte_spinlock_lock(&p->lock);
+ if (p->head != HW_NO_FREE_FLOW_TAG) {
+ //(case 2b , see function header above)
+ // returns the current head & update the head to head.next
+ index = p->head;
+ next = p->ft_data[index].next;
+ p->head = next;
+ if (next == HW_NO_FREE_FLOW_TAG) {
+ //last index (case 2a , see function header above)
+ p->tail = HW_NO_FREE_FLOW_TAG;
+ }
+ p->ft_data[index].sw_flow = flow;
+ p->ft_data[index].valid = true;
+ rte_spinlock_unlock(&p->lock);
+ return index;
+ }
+ else {
+ // no more free tags ( case 1, see function header above)
+ rte_spinlock_unlock(&p->lock);
+ VLOG_DBG("No more flow tags \n");
+ return HW_NO_FREE_FLOW_TAG;
+ }
+ rte_spinlock_unlock(&p->lock);
+ return index;
+}
/*
* hw_pipeline_ft_pool_free returns an index to the pool.
* The index is returned to the tail.
@@ -297,6 +346,26 @@ static bool hw_pipeline_msg_queue_enqueue(msg_queue *message_queue,
return true;
}
+
+static int hw_pipeline_send_insert_flow(struct dp_netdev *dp,
+ odp_port_t in_port, struct dp_netdev_flow *flow, struct flow *masks,
+ int rxqid)
+{
+ msg_queue_elem rule;
+
+ rule.data.sw_flow.in_port = in_port;
+ rule.data.sw_flow.rxqid = rxqid;
+ memcpy(&rule.data.sw_flow.sw_flow,flow,sizeof(struct dp_netdev_flow));
+ memcpy(&rule.data.sw_flow.sw_flow_mask,masks,sizeof(struct flow));
+ rule.mode = HW_PIPELINE_INSERT_RULE;
+ if (OVS_UNLIKELY(
+ !hw_pipeline_msg_queue_enqueue(&dp->message_queue,&rule))) {
+ VLOG_ERR("queue overflow");
+ return -1;
+ }
+ return 0;
+}
+
void *hw_pipeline_thread(void *pdp)
{
struct dp_netdev *dp= (struct dp_netdev *)pdp;
@@ -368,6 +437,38 @@ static int hw_pipeline_send_remove_flow(struct dp_netdev *dp,uint32_t flow_tag,
}
return 0;
}
+
+/* Insert 'rule' into 'cls'.
+ * Get a unique tag from pool
+ * The function sends a message to the message queue
+ * to insert a rule to HW, but
+ * in the context of hw_pipeline_thread
+ * */
+void
+hw_pipeline_dpcls_insert(struct dp_netdev *dp,
+ struct dp_netdev_flow *netdev_flow,
+ struct dpcls_rule *rule,
+ odp_port_t in_port,
+ struct flow *wc_masks,
+ int rxqid)
+{
+ uint32_t flow_tag=HW_NO_FREE_FLOW_TAG;
+
+ flow_tag = hw_pipeline_ft_pool_get(&dp->ft_pool,netdev_flow);
+ if (OVS_UNLIKELY(flow_tag == HW_NO_FREE_FLOW_TAG)) {
+ VLOG_INFO("No more free Tags \n");
+ return;
+ }
+
+ rule->flow_tag = flow_tag;
+
+ if (OVS_UNLIKELY(hw_pipeline_send_insert_flow(dp,in_port,netdev_flow,
+ wc_masks,rxqid)== -1)) {
+ VLOG_ERR("The Message Queue is FULL \n");
+ return;
+ }
+}
+
/* Removes 'rule' from 'cls', also distracting the 'rule'.
* Free the unique tag back to pool.
* The function sends a message to the message queue
PMD send a flow that would be inserted by HW classifier Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> Conflicts: lib/dpif-netdev.c --- lib/dpif-netdev.c | 30 ++++++++++++---- lib/hw-pipeline.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 7 deletions(-)