diff mbox series

[ovs-dev,V2,2/2] dpif-netdev: Introduce netdev array cache

Message ID 20210714130055.7030-2-elibr@nvidia.com
State Superseded
Headers show
Series [ovs-dev,V2,1/2] dpif-netdev: Do not execute packet recovery without experimental support | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot success github build: passed

Commit Message

Eli Britstein July 14, 2021, 1 p.m. UTC
Port numbers are usually small. Maintain an array of netdev handles indexed
by port numbers. It accelerates looking up for them for
netdev_hw_miss_packet_recover().

Reported-by: Cian Ferriter <cian.ferriter@intel.com>
Signed-off-by: Eli Britstein <elibr@nvidia.com>
Reviewed-by: Gaetan Rivet <gaetanr@nvidia.com>
---
 lib/dpif-netdev-private-thread.h |  4 +++
 lib/dpif-netdev.c                | 43 +++++++++++++++++++++++++++++---
 2 files changed, 43 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/lib/dpif-netdev-private-thread.h b/lib/dpif-netdev-private-thread.h
index ba79c4a0a..52755fbae 100644
--- a/lib/dpif-netdev-private-thread.h
+++ b/lib/dpif-netdev-private-thread.h
@@ -50,6 +50,9 @@  struct dp_netdev_pmd_thread_ctx {
     bool smc_enable_db;
 };
 
+/* Size of netdev's cache. */
+#define DP_PMD_NETDEV_CACHE_SIZE 1024
+
 /* PMD: Poll modes drivers.  PMD accesses devices via polling to eliminate
  * the performance overhead of interrupt processing.  Therefore netdev can
  * not implement rx-wait for these devices.  dpif-netdev needs to poll
@@ -192,6 +195,7 @@  struct dp_netdev_pmd_thread {
      * other instance will only be accessed by its own pmd thread. */
     struct hmap tnl_port_cache;
     struct hmap send_port_cache;
+    struct netdev *send_netdev_cache[DP_PMD_NETDEV_CACHE_SIZE];
 
     /* Keep track of detailed PMD performance statistics. */
     struct pmd_perf_stats perf_stats;
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 1823bf565..50ea85d48 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -5540,6 +5540,12 @@  pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd)
         free(tx_port_cached);
     }
     HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) {
+        uint32_t port_no_ind;
+
+        port_no_ind = odp_to_u32(tx_port_cached->port->port_no);
+        if (port_no_ind < ARRAY_SIZE(pmd->send_netdev_cache)) {
+            pmd->send_netdev_cache[port_no_ind] = NULL;
+        }
         free(tx_port_cached);
     }
 }
@@ -5566,9 +5572,16 @@  pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
         }
 
         if (netdev_n_txq(tx_port->port->netdev)) {
+            uint32_t port_no_ind;
+
             tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached);
             hmap_insert(&pmd->send_port_cache, &tx_port_cached->node,
                         hash_port_no(tx_port_cached->port->port_no));
+            port_no_ind = odp_to_u32(tx_port_cached->port->port_no);
+            if (port_no_ind < ARRAY_SIZE(pmd->send_netdev_cache)) {
+                pmd->send_netdev_cache[port_no_ind] =
+                    tx_port_cached->port->netdev;
+            }
         }
     }
 }
@@ -6217,6 +6230,7 @@  dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
     hmap_init(&pmd->tx_ports);
     hmap_init(&pmd->tnl_port_cache);
     hmap_init(&pmd->send_port_cache);
+    memset(pmd->send_netdev_cache, 0, sizeof pmd->send_netdev_cache);
     cmap_init(&pmd->tx_bonds);
 
     /* Initialize DPIF function pointer to the default configured version. */
@@ -6241,6 +6255,7 @@  dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
     struct dpcls *cls;
 
     dp_netdev_pmd_flow_flush(pmd);
+    memset(pmd->send_netdev_cache, 0, sizeof pmd->send_netdev_cache);
     hmap_destroy(&pmd->send_port_cache);
     hmap_destroy(&pmd->tnl_port_cache);
     hmap_destroy(&pmd->tx_ports);
@@ -6754,20 +6769,40 @@  smc_lookup_single(struct dp_netdev_pmd_thread *pmd,
 static struct tx_port * pmd_send_port_cache_lookup(
     const struct dp_netdev_pmd_thread *pmd, odp_port_t port_no);
 
+OVS_UNUSED
+static inline struct netdev *
+pmd_netdev_cache_lookup(const struct dp_netdev_pmd_thread *pmd,
+                        odp_port_t port_no)
+{
+    uint32_t port_no_ind;
+    struct tx_port *p;
+
+    port_no_ind = odp_to_u32(port_no);
+    if (port_no_ind < ARRAY_SIZE(pmd->send_netdev_cache)) {
+        return pmd->send_netdev_cache[port_no_ind];
+    }
+
+    p = pmd_send_port_cache_lookup(pmd, port_no);
+    if (p) {
+        return p->port->netdev;
+    }
+    return NULL;
+}
+
 inline int
 dp_netdev_hw_flow(const struct dp_netdev_pmd_thread *pmd,
                   odp_port_t port_no OVS_UNUSED,
                   struct dp_packet *packet,
                   struct dp_netdev_flow **flow)
 {
-    struct tx_port *p OVS_UNUSED;
+    struct netdev *netdev OVS_UNUSED;
     uint32_t mark;
 
 #ifdef ALLOW_EXPERIMENTAL_API /* Packet restoration API required. */
     /* Restore the packet if HW processing was terminated before completion. */
-    p = pmd_send_port_cache_lookup(pmd, port_no);
-    if (OVS_LIKELY(p)) {
-        int err = netdev_hw_miss_packet_recover(p->port->netdev, packet);
+    netdev = pmd_netdev_cache_lookup(pmd, port_no);
+    if (OVS_LIKELY(netdev)) {
+        int err = netdev_hw_miss_packet_recover(netdev, packet);
 
         if (err && err != EOPNOTSUPP) {
             COVERAGE_INC(datapath_drop_hw_miss_recover);