get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/810041/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 810041,
    "url": "http://patchwork.ozlabs.org/api/patches/810041/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/openvswitch/patch/1504603381-30071-5-git-send-email-yliu@fridaylinux.org/",
    "project": {
        "id": 47,
        "url": "http://patchwork.ozlabs.org/api/projects/47/?format=api",
        "name": "Open vSwitch",
        "link_name": "openvswitch",
        "list_id": "ovs-dev.openvswitch.org",
        "list_email": "ovs-dev@openvswitch.org",
        "web_url": "http://openvswitch.org/",
        "scm_url": "git@github.com:openvswitch/ovs.git",
        "webscm_url": "https://github.com/openvswitch/ovs",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<1504603381-30071-5-git-send-email-yliu@fridaylinux.org>",
    "list_archive_url": null,
    "date": "2017-09-05T09:22:57",
    "name": "[ovs-dev,v2,4/8] netdev-dpdk: implement flow put with rte flow",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "54b3fc3ca0bde80313d4459a20efb832a6b9606a",
    "submitter": {
        "id": 72215,
        "url": "http://patchwork.ozlabs.org/api/people/72215/?format=api",
        "name": "Yuanhan Liu",
        "email": "yliu@fridaylinux.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/openvswitch/patch/1504603381-30071-5-git-send-email-yliu@fridaylinux.org/mbox/",
    "series": [
        {
            "id": 1528,
            "url": "http://patchwork.ozlabs.org/api/series/1528/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/openvswitch/list/?series=1528",
            "date": "2017-09-05T09:22:53",
            "name": "OVS-DPDK flow offload with rte_flow",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/1528/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/810041/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/810041/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<ovs-dev-bounces@openvswitch.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "dev@openvswitch.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "ovs-dev@mail.linuxfoundation.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=openvswitch.org\n\t(client-ip=140.211.169.12; helo=mail.linuxfoundation.org;\n\tenvelope-from=ovs-dev-bounces@openvswitch.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n\tunprotected) header.d=fridaylinux-org.20150623.gappssmtp.com\n\theader.i=@fridaylinux-org.20150623.gappssmtp.com\n\theader.b=\"lvgeUpxb\"; dkim-atps=neutral"
        ],
        "Received": [
            "from mail.linuxfoundation.org (mail.linuxfoundation.org\n\t[140.211.169.12])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xmhDK0K7fz9s0g\n\tfor <incoming@patchwork.ozlabs.org>;\n\tTue,  5 Sep 2017 19:26:25 +1000 (AEST)",
            "from mail.linux-foundation.org (localhost [127.0.0.1])\n\tby mail.linuxfoundation.org (Postfix) with ESMTP id 0067FAE1;\n\tTue,  5 Sep 2017 09:23:51 +0000 (UTC)",
            "from smtp1.linuxfoundation.org (smtp1.linux-foundation.org\n\t[172.17.192.35])\n\tby mail.linuxfoundation.org (Postfix) with ESMTPS id 33F03AF4\n\tfor <dev@openvswitch.org>; Tue,  5 Sep 2017 09:23:51 +0000 (UTC)",
            "from mail-pg0-f53.google.com (mail-pg0-f53.google.com\n\t[74.125.83.53])\n\tby smtp1.linuxfoundation.org (Postfix) with ESMTPS id 5AAA11E8\n\tfor <dev@openvswitch.org>; Tue,  5 Sep 2017 09:23:49 +0000 (UTC)",
            "by mail-pg0-f53.google.com with SMTP id j6so8140510pgc.1\n\tfor <dev@openvswitch.org>; Tue, 05 Sep 2017 02:23:49 -0700 (PDT)",
            "from localhost.localdomain ([101.228.204.163])\n\tby smtp.gmail.com with ESMTPSA id\n\ti187sm295921pfe.71.2017.09.05.02.23.45\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tTue, 05 Sep 2017 02:23:47 -0700 (PDT)"
        ],
        "X-Greylist": "whitelisted by SQLgrey-1.7.6",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=fridaylinux-org.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=hnEdNO8g6nyGg0p+dNw8L9ukK77X4pLPIOb6W6N+kyQ=;\n\tb=lvgeUpxbxUOmsK7bxk61NUSJ+CdZN7afA3aIN+D1/ai1drf/s2neLNDdW6rJBTNVNr\n\t2BblO9/XNHE139Yjak1TegVHva16AKsRavJPMXPTJesIOX5/kQRgBv0KwUx5Uwl/4wxG\n\tt422Jkw6/Httha112KAA2yd67GLCcZcsCgISCSVD4zq1ZwgIg5KC96idjLx7op4jfYea\n\tvV6wv8h0dwAEk6HZpfFH00F7o0J2yIWm3To8qQVIP4l0/H9a4JflhPGGwrfGVEau/p2p\n\t6wWKhn6ijYQQA6PaAkq1K8f93Nl5mLKp22Zm2MMqAvBXn9hoi3Op2REG4KWEeVGTFN/l\n\tbVog==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=hnEdNO8g6nyGg0p+dNw8L9ukK77X4pLPIOb6W6N+kyQ=;\n\tb=f5t/6jNtSAMxlHmX46jJl0o9eCETKYxpIp5KJEQ18r6gdg+lSTrPxA0qvPD73m4C9i\n\tL78hLGz6i/PI8nPTgt++mQWK2tW1S3JTvHUWoL0gTR6zsr63ETwmoCADGq0GDI1zI/1Y\n\tRAewdd+CDfQ6eerMpZgeMEBCDDj3gxPmHLbiAfRx9QYr+tNKxMHvDt+HTdPPhtyXhXwz\n\tf1msktafJfHNZz+kTqUbxi/iIlRdGP5GyByy/Vn7dWAkDcDeIWQR23hlkUmQ9Z/u4VJL\n\tROdh3hpAJSyNWzHRZwqQfbzqYVyXFyMnITxwV0K6ODHWf87c+F5H8c+6Qi/dqyMJHuTA\n\tZ6oA==",
        "X-Gm-Message-State": "AHPjjUgkVZxvt1L/91e24w03i55sIC6IsHE/YeEhF4eVs3exal7AuaBT\n\tW5LlQq6ZZQFgzNzwvAJEng==",
        "X-Google-Smtp-Source": "ADKCNb4VfEj54Q9TTX4nb1XHH0KE6t1KkLgHlTQHYlf3Bn8aD/jT/EXGQf0PW8fsqHDSUNLrMnE81g==",
        "X-Received": "by 10.84.210.108 with SMTP id z99mr3589336plh.341.1504603428432; \n\tTue, 05 Sep 2017 02:23:48 -0700 (PDT)",
        "From": "Yuanhan Liu <yliu@fridaylinux.org>",
        "To": "dev@openvswitch.org",
        "Date": "Tue,  5 Sep 2017 17:22:57 +0800",
        "Message-Id": "<1504603381-30071-5-git-send-email-yliu@fridaylinux.org>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1504603381-30071-1-git-send-email-yliu@fridaylinux.org>",
        "References": "<1504603381-30071-1-git-send-email-yliu@fridaylinux.org>",
        "X-Spam-Status": "No, score=0.0 required=5.0 tests=DKIM_SIGNED,DKIM_VALID,\n\tRCVD_IN_DNSWL_NONE autolearn=disabled version=3.3.1",
        "X-Spam-Checker-Version": "SpamAssassin 3.3.1 (2010-03-16) on\n\tsmtp1.linux-foundation.org",
        "Subject": "[ovs-dev] [PATCH v2 4/8] netdev-dpdk: implement flow put with rte\n\tflow",
        "X-BeenThere": "ovs-dev@openvswitch.org",
        "X-Mailman-Version": "2.1.12",
        "Precedence": "list",
        "List-Id": "<ovs-dev.openvswitch.org>",
        "List-Unsubscribe": "<https://mail.openvswitch.org/mailman/options/ovs-dev>,\n\t<mailto:ovs-dev-request@openvswitch.org?subject=unsubscribe>",
        "List-Archive": "<http://mail.openvswitch.org/pipermail/ovs-dev/>",
        "List-Post": "<mailto:ovs-dev@openvswitch.org>",
        "List-Help": "<mailto:ovs-dev-request@openvswitch.org?subject=help>",
        "List-Subscribe": "<https://mail.openvswitch.org/mailman/listinfo/ovs-dev>,\n\t<mailto:ovs-dev-request@openvswitch.org?subject=subscribe>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Sender": "ovs-dev-bounces@openvswitch.org",
        "Errors-To": "ovs-dev-bounces@openvswitch.org"
    },
    "content": "From: Finn Christensen <fc@napatech.com>\n\nThe basic yet the major part of this patch is to translate the \"match\"\nto rte flow patterns. And then, we create a rte flow with a MARK action.\nAfterwards, all pkts matches the flow will have the mark id in the mbuf.\n\nFor any unsupported flows, such as MPLS, -1 is returned, meaning the\nflow offload is failed and then skipped.\n\nCo-authored-by: Yuanhan Liu <yliu@fridaylinux.org>\nSigned-off-by: Finn Christensen <fc@napatech.com>\nSigned-off-by: Yuanhan Liu <yliu@fridaylinux.org>\n---\n\nv2: - convert some macros to functions\n    - do not hardcode the max number of flow/action\n    - fix L2 patterns for Intel nic\n    - add comments for not implemented offload methods\n---\n lib/netdev-dpdk.c | 421 +++++++++++++++++++++++++++++++++++++++++++++++++++++-\n 1 file changed, 420 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c\nindex 46f9885..37b0f99 100644\n--- a/lib/netdev-dpdk.c\n+++ b/lib/netdev-dpdk.c\n@@ -58,6 +58,7 @@\n #include \"smap.h\"\n #include \"sset.h\"\n #include \"unaligned.h\"\n+#include \"uuid.h\"\n #include \"timeval.h\"\n #include \"unixctl.h\"\n \n@@ -3400,6 +3401,424 @@ get_rte_flow_by_ufid(const ovs_u128 *ufid)\n }\n \n \n+struct flow_patterns {\n+    struct rte_flow_item *items;\n+    int cnt;\n+    int max;\n+};\n+\n+struct flow_actions {\n+    struct rte_flow_action *actions;\n+    int cnt;\n+    int max;\n+};\n+\n+static void\n+add_flow_pattern(struct flow_patterns *patterns, enum rte_flow_item_type type,\n+                 const void *spec, const void *mask)\n+{\n+    int cnt = patterns->cnt;\n+\n+    if (cnt == 0) {\n+        patterns->max = 8;\n+        patterns->items = xcalloc(patterns->max, sizeof(struct rte_flow_item));\n+    } else if (cnt == patterns->max) {\n+        patterns->max *= 2;\n+        patterns->items = xrealloc(patterns->items, patterns->max *\n+                                   sizeof(struct rte_flow_item));\n+    }\n+\n+    patterns->items[cnt].type = type;\n+    patterns->items[cnt].spec = spec;\n+    patterns->items[cnt].mask = mask;\n+    patterns->items[cnt].last = NULL;\n+    patterns->cnt++;\n+}\n+\n+static void\n+add_flow_action(struct flow_actions *actions, enum rte_flow_action_type type,\n+                const void *conf)\n+{\n+    int cnt = actions->cnt;\n+\n+    if (cnt == 0) {\n+        actions->max = 8;\n+        actions->actions = xcalloc(actions->max,\n+                                   sizeof(struct rte_flow_action));\n+    } else if (cnt == actions->max) {\n+        actions->max *= 2;\n+        actions->actions = xrealloc(actions->actions, actions->max *\n+                                    sizeof(struct rte_flow_action));\n+    }\n+\n+    actions->actions[cnt].type = type;\n+    actions->actions[cnt].conf = conf;\n+    actions->cnt++;\n+}\n+\n+static int\n+netdev_dpdk_add_rte_flow_offload(struct netdev *netdev,\n+                                 const struct match *match,\n+                                 struct nlattr *nl_actions OVS_UNUSED,\n+                                 size_t actions_len,\n+                                 const ovs_u128 *ufid,\n+                                 struct offload_info *info)\n+{\n+    struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);\n+    const struct rte_flow_attr flow_attr = {\n+        .group = 0,\n+        .priority = 0,\n+        .ingress = 1,\n+        .egress = 0\n+    };\n+    struct flow_patterns patterns = { .items = NULL, .cnt = 0 };\n+    struct flow_actions actions = { .actions = NULL, .cnt = 0 };\n+    struct rte_flow *flow;\n+    struct rte_flow_error error;\n+    uint8_t *ipv4_next_proto_mask = NULL;\n+    int ret = 0;\n+\n+    /* Eth */\n+    struct rte_flow_item_eth eth_spec;\n+    struct rte_flow_item_eth eth_mask;\n+    memset(&eth_mask, 0, sizeof(eth_mask));\n+    if (match->wc.masks.dl_src.be16[0] ||\n+        match->wc.masks.dl_src.be16[1] ||\n+        match->wc.masks.dl_src.be16[2] ||\n+        match->wc.masks.dl_dst.be16[0] ||\n+        match->wc.masks.dl_dst.be16[1] ||\n+        match->wc.masks.dl_dst.be16[2]) {\n+        rte_memcpy(&eth_spec.dst, &match->flow.dl_dst, sizeof(eth_spec.dst));\n+        rte_memcpy(&eth_spec.src, &match->flow.dl_src, sizeof(eth_spec.src));\n+        eth_spec.type = match->flow.dl_type;\n+\n+        rte_memcpy(&eth_mask.dst, &match->wc.masks.dl_dst,\n+                   sizeof(eth_mask.dst));\n+        rte_memcpy(&eth_mask.src, &match->wc.masks.dl_src,\n+                   sizeof(eth_mask.src));\n+        eth_mask.type = match->wc.masks.dl_type;\n+\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ETH,\n+                         &eth_spec, &eth_mask);\n+    } else {\n+        /*\n+         * If user specifies a flow (like UDP flow) without L2 patterns,\n+         * OVS will at least set the dl_type. Normally, it's enough to\n+         * create an eth pattern just with it. Unluckily, some Intel's\n+         * NIC (such as XL710) doesn't support that. Below is a workaround,\n+         * which simply matches any L2 pkts.\n+         */\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ETH, NULL, NULL);\n+    }\n+\n+    /* VLAN */\n+    struct rte_flow_item_vlan vlan_spec;\n+    struct rte_flow_item_vlan vlan_mask;\n+    memset(&vlan_mask, 0, sizeof(vlan_mask));\n+    if (match->wc.masks.vlans[0].tci && match->flow.vlans[0].tci) {\n+        vlan_spec.tci  = match->flow.vlans[0].tci;\n+        vlan_mask.tci  = match->wc.masks.vlans[0].tci;\n+\n+        /* match any protocols */\n+        vlan_mask.tpid = 0;\n+\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_VLAN,\n+                         &vlan_spec, &vlan_mask);\n+    }\n+\n+    /* IP v4 */\n+    uint8_t proto = 0;\n+    struct rte_flow_item_ipv4 ipv4_spec;\n+    struct rte_flow_item_ipv4 ipv4_mask;\n+    memset(&ipv4_mask, 0, sizeof(ipv4_mask));\n+    if ((match->flow.dl_type == ntohs(ETH_TYPE_IP)) &&\n+        (match->wc.masks.nw_src || match->wc.masks.nw_dst ||\n+         match->wc.masks.nw_tos || match->wc.masks.nw_ttl ||\n+         match->wc.masks.nw_proto)) {\n+        ipv4_spec.hdr.type_of_service = match->flow.nw_tos;\n+        ipv4_spec.hdr.time_to_live    = match->flow.nw_tos;\n+        ipv4_spec.hdr.next_proto_id   = match->flow.nw_proto;\n+        ipv4_spec.hdr.src_addr        = match->flow.nw_src;\n+        ipv4_spec.hdr.dst_addr        = match->flow.nw_dst;\n+\n+        ipv4_mask.hdr.type_of_service = match->wc.masks.nw_tos;\n+        ipv4_mask.hdr.time_to_live    = match->wc.masks.nw_tos;\n+        ipv4_mask.hdr.next_proto_id   = match->wc.masks.nw_proto;\n+        ipv4_mask.hdr.src_addr        = match->wc.masks.nw_src;\n+        ipv4_mask.hdr.dst_addr        = match->wc.masks.nw_dst;\n+\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_IPV4,\n+                         &ipv4_spec, &ipv4_mask);\n+\n+        /* Save proto for L4 protocol setup */\n+        proto = ipv4_spec.hdr.next_proto_id & ipv4_mask.hdr.next_proto_id;\n+\n+        /* Remember proto mask address for later modification */\n+        ipv4_next_proto_mask = &ipv4_mask.hdr.next_proto_id;\n+    }\n+\n+    if (proto != IPPROTO_ICMP && proto != IPPROTO_UDP  &&\n+        proto != IPPROTO_SCTP && proto != IPPROTO_TCP  &&\n+        (match->wc.masks.tp_src ||\n+         match->wc.masks.tp_dst ||\n+         match->wc.masks.tcp_flags)) {\n+        VLOG_INFO(\"L4 Protocol (%u) not supported\", proto);\n+        ret = -1;\n+        goto out;\n+    }\n+\n+    struct rte_flow_item_udp udp_spec;\n+    struct rte_flow_item_udp udp_mask;\n+    memset(&udp_mask, 0, sizeof(udp_mask));\n+    if ((proto == IPPROTO_UDP) &&\n+        (match->wc.masks.tp_src || match->wc.masks.tp_dst)) {\n+        udp_spec.hdr.src_port = match->flow.tp_src;\n+        udp_spec.hdr.dst_port = match->flow.tp_dst;\n+\n+        udp_mask.hdr.src_port = match->wc.masks.tp_src;\n+        udp_mask.hdr.dst_port = match->wc.masks.tp_dst;\n+\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_UDP,\n+                         &udp_spec, &udp_mask);\n+\n+        /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match */\n+        if (ipv4_next_proto_mask) {\n+            *ipv4_next_proto_mask = 0;\n+        }\n+    }\n+\n+    struct rte_flow_item_sctp sctp_spec;\n+    struct rte_flow_item_sctp sctp_mask;\n+    memset(&sctp_mask, 0, sizeof(sctp_mask));\n+    if ((proto == IPPROTO_SCTP) &&\n+        (match->wc.masks.tp_src || match->wc.masks.tp_dst)) {\n+        sctp_spec.hdr.src_port = match->flow.tp_src;\n+        sctp_spec.hdr.dst_port = match->flow.tp_dst;\n+\n+        sctp_mask.hdr.src_port = match->wc.masks.tp_src;\n+        sctp_mask.hdr.dst_port = match->wc.masks.tp_dst;\n+\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_SCTP,\n+                         &sctp_spec, &sctp_mask);\n+\n+        /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match */\n+        if (ipv4_next_proto_mask) {\n+            *ipv4_next_proto_mask = 0;\n+        }\n+    }\n+\n+    struct rte_flow_item_icmp icmp_spec;\n+    struct rte_flow_item_icmp icmp_mask;\n+    memset(&icmp_mask, 0, sizeof(icmp_mask));\n+    if ((proto == IPPROTO_ICMP) &&\n+        (match->wc.masks.tp_src || match->wc.masks.tp_dst)) {\n+        icmp_spec.hdr.icmp_type = (uint8_t)ntohs(match->flow.tp_src);\n+        icmp_spec.hdr.icmp_code = (uint8_t)ntohs(match->flow.tp_dst);\n+\n+        icmp_mask.hdr.icmp_type = (uint8_t)ntohs(match->wc.masks.tp_src);\n+        icmp_mask.hdr.icmp_code = (uint8_t)ntohs(match->wc.masks.tp_dst);\n+\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ICMP,\n+                         &icmp_spec, &icmp_mask);\n+\n+        /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match */\n+        if (ipv4_next_proto_mask) {\n+            *ipv4_next_proto_mask = 0;\n+        }\n+    }\n+\n+    struct rte_flow_item_tcp tcp_spec;\n+    struct rte_flow_item_tcp tcp_mask;\n+    memset(&tcp_mask, 0, sizeof(tcp_mask));\n+    if ((proto == IPPROTO_TCP) &&\n+        (match->wc.masks.tp_src ||\n+         match->wc.masks.tp_dst ||\n+         match->wc.masks.tcp_flags)) {\n+        tcp_spec.hdr.src_port  = match->flow.tp_src;\n+        tcp_spec.hdr.dst_port  = match->flow.tp_dst;\n+        tcp_spec.hdr.data_off  = ntohs(match->flow.tcp_flags) >> 8;\n+        tcp_spec.hdr.tcp_flags = ntohs(match->flow.tcp_flags) & 0xff;\n+\n+        tcp_mask.hdr.src_port  = match->wc.masks.tp_src;\n+        tcp_mask.hdr.dst_port  = match->wc.masks.tp_dst;\n+        tcp_mask.hdr.data_off  = ntohs(match->wc.masks.tcp_flags) >> 8;\n+        tcp_mask.hdr.tcp_flags = ntohs(match->wc.masks.tcp_flags) & 0xff;\n+\n+        add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_TCP,\n+                         &tcp_spec, &tcp_mask);\n+\n+        /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match */\n+        if (ipv4_next_proto_mask) {\n+            *ipv4_next_proto_mask = 0;\n+        }\n+    }\n+    add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_END, NULL, NULL);\n+\n+    struct rte_flow_action_mark mark;\n+    if (actions_len) {\n+        mark.id = info->flow_mark;\n+        add_flow_action(&actions, RTE_FLOW_ACTION_TYPE_MARK, &mark);\n+    } else {\n+        add_flow_action(&actions, RTE_FLOW_ACTION_TYPE_DROP, NULL);\n+        VLOG_INFO(\"no action given; drop pkts in hardware\\n\");\n+    }\n+    add_flow_action(&actions, RTE_FLOW_ACTION_TYPE_END, NULL);\n+\n+    flow = rte_flow_create(dev->port_id, &flow_attr, patterns.items,\n+                           actions.actions, &error);\n+    if (!flow) {\n+        VLOG_ERR(\"rte flow creat error: %u : message : %s\\n\",\n+                 error.type, error.message);\n+        ret = -1;\n+        goto out;\n+    }\n+    add_ufid_dpdk_flow_mapping(ufid, flow);\n+    VLOG_INFO(\"installed flow %p by ufid \"UUID_FMT\"\\n\",\n+              flow, UUID_ARGS((struct uuid *)ufid));\n+\n+out:\n+    free(patterns.items);\n+    free(actions.actions);\n+    return ret;\n+}\n+\n+/*\n+ * Validate for later rte flow offload creation. If any unsupported\n+ * flow are specified, return -1.\n+ */\n+static int\n+netdev_dpdk_validate_flow(const struct match *match)\n+{\n+    struct match match_zero_wc;\n+\n+    /* Create a wc-zeroed version of flow */\n+    match_init(&match_zero_wc, &match->flow, &match->wc);\n+\n+#define CHECK_NONZERO_BYTES(addr, size) do {    \\\n+    uint8_t *padr = (uint8_t *)(addr);          \\\n+    int i;                                      \\\n+    for (i = 0; i < (size); i++) {              \\\n+        if (padr[i] != 0) {                     \\\n+            goto err;                           \\\n+        }                                       \\\n+    }                                           \\\n+} while (0)\n+\n+#define CHECK_NONZERO(var)              do {    \\\n+    if ((var) != 0) {                           \\\n+        goto err;                               \\\n+    }                                           \\\n+} while (0)\n+\n+    CHECK_NONZERO_BYTES(&match_zero_wc.flow.tunnel,\n+                        sizeof(match_zero_wc.flow.tunnel));\n+    CHECK_NONZERO(match->wc.masks.metadata);\n+    CHECK_NONZERO(match->wc.masks.skb_priority);\n+    CHECK_NONZERO(match->wc.masks.pkt_mark);\n+    CHECK_NONZERO(match->wc.masks.dp_hash);\n+\n+    /* recirc id must be zero */\n+    CHECK_NONZERO(match_zero_wc.flow.recirc_id);\n+\n+    CHECK_NONZERO(match->wc.masks.ct_state);\n+    CHECK_NONZERO(match->wc.masks.ct_zone);\n+    CHECK_NONZERO(match->wc.masks.ct_mark);\n+    CHECK_NONZERO(match->wc.masks.ct_label.u64.hi);\n+    CHECK_NONZERO(match->wc.masks.ct_label.u64.lo);\n+    CHECK_NONZERO(match->wc.masks.ct_nw_proto);\n+    CHECK_NONZERO(match->wc.masks.ct_tp_src);\n+    CHECK_NONZERO(match->wc.masks.ct_tp_dst);\n+    CHECK_NONZERO(match->wc.masks.conj_id);\n+    CHECK_NONZERO(match->wc.masks.actset_output);\n+\n+    /* unsupported L2 */\n+    CHECK_NONZERO_BYTES(&match->wc.masks.mpls_lse,\n+                        sizeof(match_zero_wc.flow.mpls_lse) /\n+                        sizeof(ovs_be32));\n+\n+    /* unsupported L3 */\n+    CHECK_NONZERO_BYTES(&match->wc.masks.ipv6_src, sizeof(struct in6_addr));\n+    CHECK_NONZERO_BYTES(&match->wc.masks.ipv6_dst, sizeof(struct in6_addr));\n+    CHECK_NONZERO(match->wc.masks.ipv6_label);\n+    CHECK_NONZERO_BYTES(&match->wc.masks.nd_target, sizeof(struct in6_addr));\n+    CHECK_NONZERO_BYTES(&match->wc.masks.arp_sha, sizeof(struct eth_addr));\n+    CHECK_NONZERO_BYTES(&match->wc.masks.arp_tha, sizeof(struct eth_addr));\n+\n+    /* If fragmented, then don't HW accelerate - for now */\n+    CHECK_NONZERO(match_zero_wc.flow.nw_frag);\n+\n+    /* unsupported L4 */\n+    CHECK_NONZERO(match->wc.masks.igmp_group_ip4);\n+\n+    return 0;\n+\n+err:\n+    VLOG_INFO(\"Cannot HW accelerate this flow\");\n+    return -1;\n+}\n+\n+static int\n+netdev_dpdk_destroy_rte_flow(struct netdev_dpdk *dev,\n+                             const ovs_u128 *ufid,\n+                             struct rte_flow *rte_flow)\n+{\n+    struct rte_flow_error error;\n+    int ret;\n+\n+    ret = rte_flow_destroy(dev->port_id, rte_flow, &error);\n+    if (ret == 0) {\n+        del_ufid_dpdk_flow_mapping(ufid);\n+        VLOG_INFO(\"removed rte flow %p associated with ufid \" UUID_FMT \"\\n\",\n+                  rte_flow, UUID_ARGS((struct uuid *)ufid));\n+    } else {\n+        VLOG_ERR(\"rte flow destroy error: %u : message : %s\\n\",\n+                 error.type, error.message);\n+    }\n+\n+    return ret;\n+}\n+\n+static int\n+netdev_dpdk_flow_put(struct netdev *netdev, struct match *match,\n+                     struct nlattr *actions, size_t actions_len,\n+                     const ovs_u128 *ufid, struct offload_info *info,\n+                     struct dpif_flow_stats *stats OVS_UNUSED)\n+{\n+    struct rte_flow *rte_flow;\n+    int ret;\n+\n+    /*\n+     * If an old rte_flow exists, it means it's a flow modification.\n+     * Here destroy the old rte flow first before adding a new one.\n+     */\n+    rte_flow = get_rte_flow_by_ufid(ufid);\n+    if (rte_flow) {\n+        ret = netdev_dpdk_destroy_rte_flow(netdev_dpdk_cast(netdev),\n+                                           ufid, rte_flow);\n+        if (ret < 0)\n+            return ret;\n+    }\n+\n+    ret = netdev_dpdk_validate_flow(match);\n+    if (ret < 0) {\n+        return ret;\n+    }\n+\n+    return netdev_dpdk_add_rte_flow_offload(netdev, match, actions,\n+                                            actions_len, ufid, info);\n+}\n+\n+#define DPDK_FLOW_OFFLOAD_API                                 \\\n+    NULL,                   /* flow_flush */                  \\\n+    NULL,                   /* flow_dump_create */            \\\n+    NULL,                   /* flow_dump_destroy */           \\\n+    NULL,                   /* flow_dump_next */              \\\n+    netdev_dpdk_flow_put,                                     \\\n+    NULL,                   /* flow_get */                    \\\n+    NULL,                   /* flow_del */                    \\\n+    NULL                    /* init_flow_api */\n+\n+\n #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT,    \\\n                           SET_CONFIG, SET_TX_MULTIQ, SEND,    \\\n                           GET_CARRIER, GET_STATS,             \\\n@@ -3472,7 +3891,7 @@ get_rte_flow_by_ufid(const ovs_u128 *ufid)\n     RXQ_RECV,                                                 \\\n     NULL,                       /* rx_wait */                 \\\n     NULL,                       /* rxq_drain */               \\\n-    NO_OFFLOAD_API                                            \\\n+    DPDK_FLOW_OFFLOAD_API                                     \\\n }\n \n static const struct netdev_class dpdk_class =\n",
    "prefixes": [
        "ovs-dev",
        "v2",
        "4/8"
    ]
}