diff mbox series

[ovs-dev,43/62] netdev-offload-dpdk: add conntrack offload init version

Message ID 20201228092520.11807-44-taoyunxiang@cmss.chinamobile.com
State Not Applicable
Headers show
Series DPDK Offload API to test | expand

Commit Message

taoyunxiang Dec. 28, 2020, 9:25 a.m. UTC
From: Rongyin <rongyin@cmss.chinamobile.com>

NOTE: dpif-netdev: transmit mod_flag && nat_info to offload item
      netdev-offload: add nat_info into offload item
      netdev-offload-dpdk:
          translate jump action when combined recirc and set action
          translate ct action when pkt->md.ct_state == est
          translate ipv4 nat action into ip/port set action
          translate recirc_id match set into rte_flow_attr.group
          not translate ct_clear and other ct action,just continue

Code Source From: Self Code

Description:

     Add conntrack offload init version. The details as above.

Jira:  #[Optional]
市场项目编号(名称):[Optional]
---
 lib/dpif-netdev.c         | 193 ++++++++++++++++++++++++++++++++++++++++++++--
 lib/netdev-offload-dpdk.c | 139 ++++++++++++++++++++++-----------
 lib/netdev-offload.h      |   7 +-
 lib/odp-execute.c         |   2 +
 lib/odp-execute.h         |   5 ++
 lib/packets.h             |   3 +
 6 files changed, 298 insertions(+), 51 deletions(-)
diff mbox series

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index a2cf82b..4beea5b 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -417,6 +417,9 @@  struct dp_flow_offload_item {
     struct match match;
     struct nlattr *actions;
     size_t actions_len;
+    uint8_t mod_flag; /* from packet->md.mod_flag */
+    bool   should_jump;
+    struct nat_action_info_t nat_action;
 
     struct ovs_list node;
 };
@@ -2372,6 +2375,7 @@  dp_netdev_alloc_flow_offload(struct dp_netdev_pmd_thread *pmd,
     offload->pmd = pmd;
     offload->flow = flow;
     offload->op = op;
+    offload->should_jump = false;
 
     dp_netdev_flow_ref(flow);
     dp_netdev_pmd_try_ref(pmd);
@@ -2468,6 +2472,9 @@  dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload)
     }
     info.flow_mark = mark;
     info.dpif_type_str = dpif_type_str;
+    info.mod_flag = offload->mod_flag;
+    info.nat_action = offload->nat_action;
+    info.ct_enable = false;
 
     port = netdev_ports_get(in_port, dpif_type_str);
     if (!port) {
@@ -2577,17 +2584,180 @@  queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd,
     dp_netdev_append_flow_offload(offload);
 }
 
+/* return value show if this flow should be offloaded*/
+static bool
+parse_netdev_flow_put(struct match *match, const struct nlattr *actions,
+                      size_t actions_len,  struct nat_action_info_t *nat_action,
+                      uint8_t mod_flag)
+{
+    bool action_has_recirc = false;
+    bool action_has_set = false;
+    bool ret = false;
+    struct nlattr *nla;
+    size_t left;
+
+    /* filter non IP pkt out */
+    if ((match->flow.dl_type != htons(ETH_TYPE_IP)) &&
+        (match->flow.dl_type != htons(ETH_TYPE_IPV6))) {
+        goto out;
+    }
+    /*parse actions to decide flags */
+    NL_ATTR_FOR_EACH_UNSAFE (nla, left, actions, actions_len) {
+        if (nl_attr_type(nla) == OVS_ACTION_ATTR_RECIRC) {
+            action_has_recirc = true;
+        } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET ||
+                   nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) {
+            action_has_set = true;
+        } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_CT) {
+            const struct nlattr *b;
+            unsigned int ct_left;
+
+            if (nat_action == NULL) {
+                continue;
+            }
+
+            NL_ATTR_FOR_EACH_UNSAFE (b, ct_left, nl_attr_get(nla),
+                                     nl_attr_get_size(nla)) {
+                if ( nl_attr_type(b) == OVS_CT_ATTR_NAT) {
+                    const struct nlattr *b_nest;
+                    unsigned int left_nest;
+                    bool ip_min_specified = false;
+                    bool proto_num_min_specified = false;
+                    bool ip_max_specified = false;
+                    bool proto_num_max_specified = false;
+                    memset(nat_action, 0, sizeof *nat_action);
+
+                    NL_NESTED_FOR_EACH_UNSAFE (b_nest, left_nest, b) {
+                        enum ovs_nat_attr sub_type_nest = nl_attr_type(b_nest);
+
+                        switch (sub_type_nest) {
+                        case OVS_NAT_ATTR_SRC:
+                        case OVS_NAT_ATTR_DST:
+                            nat_action->nat_action |=
+                                ((sub_type_nest == OVS_NAT_ATTR_SRC)
+                                    ? NAT_ACTION_SRC : NAT_ACTION_DST);
+                            break;
+                        case OVS_NAT_ATTR_IP_MIN:
+                            memcpy(&(nat_action->min_addr),
+                                   nl_attr_get(b_nest),
+                                   nl_attr_get_size(b_nest));
+                            ip_min_specified = true;
+                            break;
+                        case OVS_NAT_ATTR_IP_MAX:
+                            memcpy(&(nat_action->max_addr),
+                                   nl_attr_get(b_nest),
+                                   nl_attr_get_size(b_nest));
+                            ip_max_specified = true;
+                            break;
+                        case OVS_NAT_ATTR_PROTO_MIN:
+                            nat_action->min_port =
+                                nl_attr_get_u16(b_nest);
+                            proto_num_min_specified = true;
+                            break;
+                        case OVS_NAT_ATTR_PROTO_MAX:
+                            nat_action->max_port =
+                                nl_attr_get_u16(b_nest);
+                            proto_num_max_specified = true;
+                            break;
+                        case OVS_NAT_ATTR_PERSISTENT:
+                        case OVS_NAT_ATTR_PROTO_HASH:
+                        case OVS_NAT_ATTR_PROTO_RANDOM:
+                            break;
+                        case OVS_NAT_ATTR_UNSPEC:
+                        case __OVS_NAT_ATTR_MAX:
+                            OVS_NOT_REACHED();
+                        }
+                    }
+
+                    if (ip_min_specified && !ip_max_specified) {
+                        nat_action->max_addr = nat_action->min_addr;
+                    }
+                    if (proto_num_min_specified && !proto_num_max_specified) {
+                        nat_action->max_port = nat_action->min_port;
+                    }
+                    if (proto_num_min_specified || proto_num_max_specified) {
+                        if (nat_action->nat_action & NAT_ACTION_SRC) {
+                            nat_action->nat_action |= NAT_ACTION_SRC_PORT;
+                        } else if (nat_action->nat_action & NAT_ACTION_DST) {
+                            nat_action->nat_action |= NAT_ACTION_DST_PORT;
+                        }
+                    }
+                    break;
+                } else { /* (b) == OVS_CT_ATTR_NAT */
+                    continue;
+                }
+            } /* nl_attr_get_size(nla))  */
+        } else { /* (nla) == OVS_ACTION_ATTR_CT */
+            continue;
+        }
+    } /*FOR_EACH_UNSAFE (nla, left, actions, actions_len) */
+
+    /* mod_flag has been set,means it's loopbacked pkt with new match
+     * we should try to offload jump action and group id
+     * in this condition: there are three scenarios:
+     * 1. care ct_state && ct_state = est  (should offload)
+     * 2. care ct_state && ct_state != est (not offload)
+     * 3. don't care ct_state (should try offload)
+     * For 1, the rte_flow ct action will be set
+     * For 3, the rte_flow ct action won't be set
+     */
+    if (mod_flag) {
+        if (match->wc.masks.ct_state &&
+            !(match->wc.masks.ct_state & match->flow.ct_state & CS_ESTABLISHED) ){
+            ret = false;
+        } else {
+            ret = true;
+        }
+        goto out;
+    }
+
+    /* no mod_flag means: 1. no set action at all
+     *                    2. set action but is the first translated pkt
+     * For 1 , only established pkt should try to offload
+     * For 2 , still established pkt should be offload
+     * so in this condition: we cannot offload non-est pkt
+     */
+    if (match->wc.masks.ct_state &&
+        !(match->wc.masks.ct_state & match->flow.ct_state & CS_ESTABLISHED) ){
+        goto out;
+    }
+
+    /* no mod_flag and no recirc flow: we should try to offload */
+    if (action_has_recirc==false) {
+        ret = true;
+        goto out;
+    }
+    /* no mod_flag and recirc and set and +est,means it's first loopback pkt
+     * we should try to offload jump action and "no" group id
+     */
+    if (action_has_set==true) {
+        if (match->wc.masks.ct_state &&
+            (match->wc.masks.ct_state & match->flow.ct_state & CS_ESTABLISHED)) {
+            ret = true;
+            goto out;
+        }
+    }
+
+
+out:
+    return ret;
+}
+
 static void
 queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd,
                       struct dp_netdev_flow *flow, struct match *match,
-                      const struct nlattr *actions, size_t actions_len)
+                      const struct nlattr *actions, size_t actions_len,uint8_t mod_flag)
 {
     struct dp_flow_offload_item *offload;
     int op;
+    struct nat_action_info_t nat_action;
 
     if (!netdev_is_flow_api_enabled()) {
         return;
     }
+    if (!parse_netdev_flow_put(match,actions,actions_len,&nat_action,mod_flag)) {
+        return;
+    }
 
     if (ovsthread_once_start(&offload_thread_once)) {
         xpthread_cond_init(&dp_flow_offload.cond, NULL);
@@ -2606,6 +2776,8 @@  queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd,
     offload->actions = xmalloc(actions_len);
     memcpy(offload->actions, actions, actions_len);
     offload->actions_len = actions_len;
+    offload->mod_flag = mod_flag;
+    offload->nat_action = nat_action;
 
     dp_netdev_append_flow_offload(offload);
 }
@@ -3366,7 +3538,8 @@  dp_netdev_get_mega_ufid(const struct match *match, ovs_u128 *mega_ufid)
 static struct dp_netdev_flow *
 dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
                    struct match *match, const ovs_u128 *ufid,
-                   const struct nlattr *actions, size_t actions_len)
+                   const struct nlattr *actions, size_t actions_len,
+                   uint8_t mod_flag)
     OVS_REQUIRES(pmd->flow_mutex)
 {
     struct ds extra_info = DS_EMPTY_INITIALIZER;
@@ -3431,7 +3604,7 @@  dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
 
     /*Don't try to offload flows that has been offloaded failed */
     if (megaflow_to_offload_st_find(&flow->ufid) == false) {
-        queue_netdev_flow_put(pmd, flow, match, actions, actions_len);
+        queue_netdev_flow_put(pmd, flow, match, actions, actions_len,mod_flag);
     }
 
     if (OVS_UNLIKELY(!VLOG_DROP_DBG((&upcall_rl)))) {
@@ -3502,7 +3675,7 @@  flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
         if (put->flags & DPIF_FP_CREATE) {
             if (cmap_count(&pmd->flow_table) < MAX_FLOWS) {
                 dp_netdev_flow_add(pmd, match, ufid, put->actions,
-                                   put->actions_len);
+                                   put->actions_len,(uint8_t)0);
                 error = 0;
             } else {
                 error = EFBIG;
@@ -3524,7 +3697,7 @@  flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
             /*Don't try to offload flows that has been offloaded failed */
             if (megaflow_to_offload_st_find(&netdev_flow->ufid) == false) {
                 queue_netdev_flow_put(pmd, netdev_flow, match,
-                                      put->actions, put->actions_len);
+                                      put->actions, put->actions_len,(uint8_t)0);
             }
 
             if (stats) {
@@ -6793,6 +6966,7 @@  handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
     ovs_u128 ufid;
     int error;
     uint64_t cycles = cycles_counter_update(&pmd->perf_stats);
+    uint8_t mod_flag;
 
     match.tun_md.valid = false;
     miniflow_expand(&key->mf, &match.flow);
@@ -6824,9 +6998,15 @@  handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
     /* We can't allow the packet batching in the next loop to execute
      * the actions.  Otherwise, if there are any slow path actions,
      * we'll send the packet up twice. */
+
+    /* get mod_flag before execute action, so can set group_id=0 for first
+     * loopbacked pkt with set action
+     */
+    mod_flag = packet->md.mod_flag;
     dp_packet_batch_init_packet(&b, packet);
     dp_netdev_execute_actions(pmd, &b, true, &match.flow,
                               actions->data, actions->size);
+    /* get nat action after execute action, so can set nat info in time */
 
     add_actions = put_actions->size ? put_actions : actions;
     if (OVS_LIKELY(error != ENOSPC)) {
@@ -6842,7 +7022,8 @@  handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
         if (OVS_LIKELY(!netdev_flow)) {
             netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid,
                                              add_actions->data,
-                                             add_actions->size);
+                                             add_actions->size,
+                                             mod_flag);
         }
         ovs_mutex_unlock(&pmd->flow_mutex);
         uint32_t hash = dp_netdev_flow_hash(&netdev_flow->ufid);
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index ad6302b..d4a80bb 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -573,6 +573,11 @@  dump_flow_action(struct ds *s, const struct rte_flow_action *actions)
         } else {
             ds_put_format(s, "  Set-%s-tcp/udp-port = null\n", dirstr);
         }
+    } else if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP) {
+        const struct rte_flow_action_jump *jump = actions->conf;
+        ds_put_format(s, "  Set-Jump to : %"PRIu32"\n", jump->group);
+    } else if (actions->type == RTE_FLOW_ACTION_TYPE_CT) {
+        ds_put_cstr(s, "  Set-CT-enable bit\n");
     } else {
         ds_put_format(s, "unknown rte flow action (%d)\n", actions->type);
     }
@@ -807,26 +812,21 @@  parse_flow_match(struct flow_patterns *patterns,
 {
     uint8_t *next_proto_mask = NULL;
     uint8_t proto = 0;
-    struct flow *consumed_masks;
 
 
-    consumed_masks = &match->wc.masks;
-    /* Not attemp to offload in_port/recirc_id ?  */
-    memset(&consumed_masks->in_port, 0, sizeof consumed_masks->in_port);
-    consumed_masks->recirc_id = 0;
-    consumed_masks->packet_type = 0;
-
     if (!strcmp(netdev_get_type(netdev), "vxlan") &&
         !parse_vxlan_match(patterns, match)) {
-        memset(&consumed_masks->tunnel, 0, sizeof consumed_masks->tunnel);
     } else if (!strcmp(netdev_get_type(netdev), "geneve") &&
         !parse_geneve_match(patterns, match)) {
-        memset(&consumed_masks->tunnel, 0, sizeof consumed_masks->tunnel);
     } else if (netdev_vport_is_vport_class(netdev->netdev_class)) {
         VLOG_DBG("in port (%s) not supported", netdev->name);
         return -1;
     }
 
+    /* Group id */
+    if (info->mod_flag) {
+        info->group_id = match->flow.recirc_id;
+    }
     /* Eth */
     if (match->flow.packet_type == htonl(PT_ETH)) {
         struct rte_flow_item_eth *spec, *mask;
@@ -843,8 +843,6 @@  parse_flow_match(struct flow_patterns *patterns,
         mask->type = match->wc.masks.dl_type;
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ETH, spec, mask);
-        memset(&consumed_masks->dl_dst, 0, sizeof consumed_masks->dl_dst);
-        memset(&consumed_masks->dl_src, 0, sizeof consumed_masks->dl_src);
 
     } else {
         /*
@@ -856,7 +854,6 @@  parse_flow_match(struct flow_patterns *patterns,
          */
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ETH, NULL, NULL);
     }
-    consumed_masks->dl_type = 0;
 
     /* VLAN */
     if (match->wc.masks.vlans[0].tci && match->flow.vlans[0].tci) {
@@ -873,7 +870,6 @@  parse_flow_match(struct flow_patterns *patterns,
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_VLAN, spec, mask);
     }
-    memset(&consumed_masks->vlans[0], 0, sizeof consumed_masks->vlans[0]);
 
     /* do not attempt to offload frags. */
     if (match->flow.nw_frag != OVS_FRAG_TYPE_NONE && match->wc.masks.nw_frag) {
@@ -881,7 +877,6 @@  parse_flow_match(struct flow_patterns *patterns,
                     match->wc.masks.nw_frag);
         return -1;
     }
-    consumed_masks->nw_frag = 0;
 
     /* IP v4 */
     if (match->flow.dl_type == htons(ETH_TYPE_IP)) {
@@ -913,11 +908,6 @@  parse_flow_match(struct flow_patterns *patterns,
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask);
 
-        consumed_masks->nw_tos = 0;
-        consumed_masks->nw_ttl = 0;
-        consumed_masks->nw_proto = 0;
-        consumed_masks->nw_src = 0;
-        consumed_masks->nw_dst = 0;
 
         /* Save proto for L4 protocol setup.Not consider mask
         proto = spec->hdr.next_proto_id &
@@ -959,11 +949,6 @@  parse_flow_match(struct flow_patterns *patterns,
         memcpy(mask->hdr.dst_addr, &match->wc.masks.ipv6_dst,
                sizeof mask->hdr.dst_addr);
 
-        consumed_masks->nw_proto = 0;
-        consumed_masks->nw_ttl = 0;
-        consumed_masks->nw_tos = 0;
-        memset(&consumed_masks->ipv6_src, 0, sizeof consumed_masks->ipv6_src);
-        memset(&consumed_masks->ipv6_dst, 0, sizeof consumed_masks->ipv6_dst);
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV6, spec, mask);
 
@@ -1001,9 +986,6 @@  parse_flow_match(struct flow_patterns *patterns,
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_TCP, spec, mask);
 
-        consumed_masks->tp_src = 0;
-        consumed_masks->tp_dst = 0;
-        consumed_masks->tcp_flags = 0;
 
         /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match. */
         if (next_proto_mask) {
@@ -1023,8 +1005,6 @@  parse_flow_match(struct flow_patterns *patterns,
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_UDP, spec, mask);
 
-        consumed_masks->tp_src = 0;
-        consumed_masks->tp_dst = 0;
 
         /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match. */
         if (next_proto_mask) {
@@ -1044,8 +1024,6 @@  parse_flow_match(struct flow_patterns *patterns,
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_SCTP, spec, mask);
 
-        consumed_masks->tp_src = 0;
-        consumed_masks->tp_dst = 0;
 
         /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match. */
         if (next_proto_mask) {
@@ -1065,8 +1043,6 @@  parse_flow_match(struct flow_patterns *patterns,
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ICMP, spec, mask);
 
-        consumed_masks->tp_src = 0;
-        consumed_masks->tp_dst = 0;
 
         /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match. */
         if (next_proto_mask) {
@@ -1101,15 +1077,16 @@  parse_flow_match(struct flow_patterns *patterns,
             *next_proto_mask = 0;
         }
     }
+    VLOG_DBG("TIMO DBG. ct_state mask =0x%04x,ct_state =0x%04x",
+                match->wc.masks.ct_state, match->flow.ct_state);
+    /* ct state */
+    if (match->wc.masks.ct_state &&
+        (match->wc.masks.ct_state & match->flow.ct_state & CS_ESTABLISHED) ){
+        info->ct_enable = true;
+    }
 
     add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_END, NULL, NULL);
 
-    if (!is_all_zeros(consumed_masks, sizeof *consumed_masks)) {
-        VLOG_DBG("Cannot match all matches. dl_type=0x%04x",
-                    ntohs(match->flow.dl_type));
-        return -1;
-    }
-
     return 0;
 }
 
@@ -1340,6 +1317,61 @@  parse_set_actions(struct flow_actions *actions,
     return 0;
 }
 
+static int
+add_ipv4_nat_action(struct flow_actions *actions,
+                    struct nat_action_info_t *nat_action)
+{
+
+    if (nat_action->nat_action & NAT_ACTION_SRC) {
+        __be32 ipv4_src;
+
+        /*only support set fixed sip/dip ,not support range*/
+        if (ntohl(nat_action->min_addr.ipv4) ==
+            ntohl(nat_action->max_addr.ipv4) ) {
+            ipv4_src = nat_action->min_addr.ipv4;
+            add_set_flow_action__(actions, &ipv4_src, NULL, sizeof(ipv4_src),
+                                  RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC);
+        } else {
+            return -1;
+        }
+    }
+    if (nat_action->nat_action & NAT_ACTION_DST) {
+        __be32 ipv4_dst;
+        /*only support set fixed sip/dip ,not support range*/
+        if (ntohl(nat_action->min_addr.ipv4) ==
+            ntohl(nat_action->max_addr.ipv4) ) {
+            ipv4_dst = nat_action->min_addr.ipv4;
+            add_set_flow_action__(actions, &ipv4_dst, NULL, sizeof(ipv4_dst),
+                                  RTE_FLOW_ACTION_TYPE_SET_IPV4_DST);
+        } else {
+            return -1;
+        }
+    }
+    if (nat_action->nat_action & NAT_ACTION_SRC_PORT) {
+        __be16 tcp_src;
+        /*only support set fixed sport/dport ,not support range*/
+        if (nat_action->min_port == nat_action->max_port) {
+            tcp_src = ntohs(nat_action->min_port);
+            add_set_flow_action__(actions, &tcp_src, NULL, sizeof(tcp_src),
+                                  RTE_FLOW_ACTION_TYPE_SET_TP_SRC);
+        } else {
+            return -1;
+        }
+    }
+    if (nat_action->nat_action & NAT_ACTION_DST_PORT) {
+        __be16 tcp_dst;
+        /*only support set fixed sport/dport ,not support range*/
+        if (nat_action->min_port == nat_action->max_port) {
+            tcp_dst = ntohs(nat_action->min_port);
+            add_set_flow_action__(actions, &tcp_dst, NULL, sizeof(tcp_dst),
+                                  RTE_FLOW_ACTION_TYPE_SET_TP_DST);
+        } else {
+            return -1;
+        }
+    }
+    return 0;
+}
+
 /* Maximum number of items in struct rte_flow_action_vxlan_encap.
  * ETH / IPv4(6) / UDP / VXLAN / END
  */
@@ -1662,6 +1694,16 @@  parse_flow_actions(struct netdev *netdev,
 
         } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_VLAN) {
             add_flow_action(actions, RTE_FLOW_ACTION_TYPE_OF_POP_VLAN, NULL);
+
+        } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_RECIRC) {
+            struct rte_flow_action_jump *jump = xzalloc (sizeof *jump);
+            const uint32_t group = nl_attr_get_u32(nla);
+            jump->group = group;
+            add_flow_action(actions, RTE_FLOW_ACTION_TYPE_JUMP, jump);
+
+        /* for ct && ct clear no need to translate*/
+        } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_CT_CLEAR ||
+                   nl_attr_type(nla) == OVS_ACTION_ATTR_CT ) {
         } else {
             VLOG_DBG_RL(&rl, "Unsupported action type %d", nl_attr_type(nla));
             return -1;
@@ -1673,6 +1715,16 @@  parse_flow_actions(struct netdev *netdev,
         return -1;
     }
 
+    /* add conntrack action */
+    if (info->ct_enable) {
+        add_flow_action(actions, RTE_FLOW_ACTION_TYPE_CT, NULL);
+    }
+    /* translate nat action to rte_flow set action */
+    if (info->nat_action.nat_action) {
+        if(add_ipv4_nat_action(actions,&(info->nat_action)) ) {
+            return -1;
+        }
+    }
     add_flow_action(actions, RTE_FLOW_ACTION_TYPE_END, NULL);
     return 0;
 }
@@ -1684,7 +1736,8 @@  netdev_offload_dpdk_actions(struct netdev *netdev,
                             size_t actions_len,
                             struct offload_info *info)
 {
-    const struct rte_flow_attr flow_attr = { .ingress = 1, .transfer = 1 };
+    const struct rte_flow_attr flow_attr = { .ingress = 1, .transfer = 1 ,
+                                             .group = info->group_id };
     struct flow_actions actions = { .actions = NULL, .cnt = 0, 
                                     .valid_ucast = false };
     struct rte_flow *flow = NULL;
@@ -1806,9 +1859,7 @@  netdev_offload_dpdk_validate_flow(const struct match *match)
     /* only offload ipv4/ipv6 */
     if ((match->flow.dl_type != htons(ETH_TYPE_IP)) &&
         (match->flow.dl_type != htons(ETH_TYPE_IPV6))) {
-        VLOG_DBG("RY DBG: dl_type=0x%04x no need to be offloaded",
-                    ntohs(match->flow.dl_type));
-        return -1;
+        goto err;
     }
 
     /* support conntrack now so mark this checker
@@ -1841,7 +1892,7 @@  netdev_offload_dpdk_validate_flow(const struct match *match)
      */
 
     /* Unsupported L4. */
-    if (masks->igmp_group_ip4 || masks->ct_tp_src || masks->ct_tp_dst) {
+    if (masks->igmp_group_ip4) {
         goto err;
     }
 
diff --git a/lib/netdev-offload.h b/lib/netdev-offload.h
index f15f86f..7c39706 100644
--- a/lib/netdev-offload.h
+++ b/lib/netdev-offload.h
@@ -22,6 +22,7 @@ 
 #include "openvswitch/types.h"
 #include "packets.h"
 #include "flow.h"
+#include "conntrack.h"
 
 #ifdef  __cplusplus
 extern "C" {
@@ -39,7 +40,6 @@  struct smap;
 struct sset;
 struct ovs_action_push_tnl;
 
-
 /* Offload-capable (HW) netdev information */
 struct netdev_hw_info {
     bool oor;		/* Out of Offload Resources ? */
@@ -77,6 +77,11 @@  struct offload_info {
     bool tc_modify_flow_deleted; /* Indicate the tc modify flow put success
                                   * to delete the original flow. */
     const char *dpif_type_str;   /* dpif type string. */
+
+    uint8_t mod_flag; /* from packet->md.mod_flag */
+    bool    ct_enable; /* if true, we should translate to rte_flow_action_ct */
+    uint32_t group_id;
+    struct nat_action_info_t nat_action;
 };
 
 int netdev_flow_flush(struct netdev *);
diff --git a/lib/odp-execute.c b/lib/odp-execute.c
index 42d3335..b3cb71b 100644
--- a/lib/odp-execute.c
+++ b/lib/odp-execute.c
@@ -955,12 +955,14 @@  odp_execute_actions(void *dp, struct dp_packet_batch *batch, bool steal,
         case OVS_ACTION_ATTR_SET:
             DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
                 odp_execute_set_action(packet, nl_attr_get(a));
+                packet->md.mod_flag |= 1 << SET_ACTION_SET ;
             }
             break;
 
         case OVS_ACTION_ATTR_SET_MASKED:
             DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
                 odp_execute_masked_set_action(packet, nl_attr_get(a));
+                packet->md.mod_flag |= 1 << SET_ACTION_SET ;
             }
             break;
 
diff --git a/lib/odp-execute.h b/lib/odp-execute.h
index a3578a5..74e308c 100644
--- a/lib/odp-execute.h
+++ b/lib/odp-execute.h
@@ -28,6 +28,11 @@  struct dp_packet;
 struct pkt_metadata;
 struct dp_packet_batch;
 
+enum {
+    RECIRC_ACTION_SET,
+    SET_ACTION_SET,
+};
+
 typedef void (*odp_execute_cb)(void *dp, struct dp_packet_batch *batch,
                                const struct nlattr *action, bool should_steal);
 
diff --git a/lib/packets.h b/lib/packets.h
index 9d5d241..4eafaca 100644
--- a/lib/packets.h
+++ b/lib/packets.h
@@ -112,6 +112,8 @@  PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0,
     struct conn *conn;          /* Cached conntrack connection. */
     bool reply;                 /* True if reply direction. */
     bool icmp_related;          /* True if ICMP related. */
+    uint8_t  mod_flag;          /* Modify flag */
+
 );
 
 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1,
@@ -168,6 +170,7 @@  pkt_metadata_init(struct pkt_metadata *md, odp_port_t port)
     md->tunnel.ipv6_dst = in6addr_any;
     md->in_port.odp_port = port;
     md->conn = NULL;
+    md->mod_flag = 0;
 }
 
 /* This function prefetches the cachelines touched by pkt_metadata_init()