[ovs-dev,ovs-dev,v2] dpif-netdev: Allow to set max capacity of flow on netdev
diff mbox series

Message ID 1578475968-36897-1-git-send-email-xiangxia.m.yue@gmail.com
State New
Headers show
Series
  • [ovs-dev,ovs-dev,v2] dpif-netdev: Allow to set max capacity of flow on netdev
Related show

Commit Message

Tonghao Zhang Jan. 8, 2020, 9:32 a.m. UTC
From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

For installing more than MAX_FLOWS (65536) flows to netdev datapath.
Add the ovs-appctl subcommand "dpif-netdev/pmd-set-max-flow" which
can change the flow number which netdev datapath support.

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
v2:
* change int type to atomic_uint32_t
* check max flow number is whether valid (0 < max-flow < UINT_MAX).
---
 lib/dpif-netdev.c | 50 +++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 48 insertions(+), 2 deletions(-)

Comments

0-day Robot Jan. 8, 2020, 9:59 a.m. UTC | #1
Bleep bloop.  Greetings Tonghao Zhang, I am a robot and I have tried out your patch.
Thanks for your contribution.

I encountered some error that I wasn't expecting.  See the details below.


checkpatch:
WARNING: Line has non-spaces leading whitespace
WARNING: Line has trailing whitespace
#54 FILE: lib/dpif-netdev.c:1141:
    

Lines checked: 113, Warnings: 2, Errors: 0


Please check this out.  If you feel there has been an error, please email aconole@redhat.com

Thanks,
0-day Robot
Ben Pfaff Jan. 8, 2020, 6:47 p.m. UTC | #2
On Wed, Jan 08, 2020 at 05:32:48PM +0800, xiangxia.m.yue@gmail.com wrote:
> From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> 
> For installing more than MAX_FLOWS (65536) flows to netdev datapath.
> Add the ovs-appctl subcommand "dpif-netdev/pmd-set-max-flow" which
> can change the flow number which netdev datapath support.
> 
> Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> ---
> v2:
> * change int type to atomic_uint32_t
> * check max flow number is whether valid (0 < max-flow < UINT_MAX).

Thanks.  I suggest folding in the following mainly stylistic changes.

Also, please document the new commands in lib/dpif-netdev-unixctl.man
and mention them in NEWS.

Thanks,

Ben.

-8<--------------------------cut here-------------------------->8--

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 38f0c65ab2e4..71aee1f7203f 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -1133,9 +1133,9 @@ dpif_netdev_set_max_flow(struct unixctl_conn *conn,
 {
     long long max_flow = atoll(argv[1]);
 
-    if (max_flow <= 0 || max_flow >= UINT_MAX) {
+    if (max_flow <= 0 || max_flow >= UINT32_MAX) {
         unixctl_command_reply_error(conn,
-                                    "max-flow should: > 0 and < UINT_MAX\n");
+                                    "max-flow should: > 0 and < UINT_MAX");
         return;
     }
 
@@ -1149,14 +1149,12 @@ dpif_netdev_show_max_flow(struct unixctl_conn *conn,
                           const char *argv[] OVS_UNUSED,
                           void *aux OVS_UNUSED)
 {
-    struct ds reply = DS_EMPTY_INITIALIZER;
     uint32_t max_flow;
-
     atomic_read_relaxed(&netdev_max_flow, &max_flow);
 
-    ds_put_format(&reply,"%u\n", max_flow);
-    unixctl_command_reply(conn, ds_cstr(&reply));
-    ds_destroy(&reply);
+    char *reply = xasprintf("%u", max_flow);
+    unixctl_command_reply(conn, reply);
+    free(reply);
 }
 
 static int
@@ -3390,7 +3388,6 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
                 struct dpif_flow_stats *stats)
 {
     struct dp_netdev_flow *netdev_flow;
-    uint32_t max_flow;
     int error = 0;
 
     if (stats) {
@@ -3401,6 +3398,7 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
     netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
     if (!netdev_flow) {
         if (put->flags & DPIF_FP_CREATE) {
+            uint32_t max_flow;
             atomic_read_relaxed(&netdev_max_flow, &max_flow);
             if (cmap_count(&pmd->flow_table) < max_flow) {
                 dp_netdev_flow_add(pmd, match, ufid, put->actions,

Patch
diff mbox series

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 8485b54db0d8..b5ce70f7d1d1 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -97,7 +97,6 @@  DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
 #define DEFAULT_TX_FLUSH_INTERVAL 0
 
 /* Configuration parameters. */
-enum { MAX_FLOWS = 65536 };     /* Maximum number of flows in flow table. */
 enum { MAX_METERS = 65536 };    /* Maximum number of meters. */
 enum { MAX_BANDS = 8 };         /* Maximum number of bands / meter. */
 enum { N_METER_LOCKS = 64 };    /* Maximum number of meters. */
@@ -105,6 +104,9 @@  enum { N_METER_LOCKS = 64 };    /* Maximum number of meters. */
 /* Protects against changes to 'dp_netdevs'. */
 static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
 
+/* Maximum number of flows in flow table. */
+static atomic_uint32_t netdev_max_flow = ATOMIC_VAR_INIT(65536);
+
 /* Contains all 'struct dp_netdev's. */
 static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex)
     = SHASH_INITIALIZER(&dp_netdevs);
@@ -1112,6 +1114,40 @@  pmd_info_show_perf(struct ds *reply,
     }
 }
 
+static void
+dpif_netdev_set_max_flow(struct unixctl_conn *conn,
+                         int argc OVS_UNUSED,
+                         const char *argv[],
+                         void *aux OVS_UNUSED)
+{
+    long long max_flow = atoll(argv[1]);
+
+    if (max_flow <= 0 || max_flow >= UINT_MAX) {
+        unixctl_command_reply_error(conn,
+                                    "max-flow should: > 0 and < UINT_MAX\n");
+        return;
+    }
+    
+    atomic_store_relaxed(&netdev_max_flow, max_flow);
+    unixctl_command_reply(conn, NULL);
+}
+
+static void
+dpif_netdev_show_max_flow(struct unixctl_conn *conn,
+                          int argc OVS_UNUSED,
+                          const char *argv[] OVS_UNUSED,
+                          void *aux OVS_UNUSED)
+{
+    struct ds reply = DS_EMPTY_INITIALIZER;
+    uint32_t max_flow;
+
+    atomic_read_relaxed(&netdev_max_flow, &max_flow);
+
+    ds_put_format(&reply,"%u\n", max_flow);
+    unixctl_command_reply(conn, ds_cstr(&reply));
+    ds_destroy(&reply);
+}
+
 static int
 compare_poll_list(const void *a_, const void *b_)
 {
@@ -1416,6 +1452,14 @@  dpif_netdev_init(void)
                              "[-us usec] [-q qlen]",
                              0, 10, pmd_perf_log_set_cmd,
                              NULL);
+    unixctl_command_register("dpif-netdev/pmd-set-max-flow",
+                             "number",
+                             1, 1, dpif_netdev_set_max_flow,
+                             NULL);
+    unixctl_command_register("dpif-netdev/pmd-show-max-flow",
+                             "",
+                             0, 0, dpif_netdev_show_max_flow,
+                             NULL);
     return 0;
 }
 
@@ -3335,6 +3379,7 @@  flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
                 struct dpif_flow_stats *stats)
 {
     struct dp_netdev_flow *netdev_flow;
+    uint32_t max_flow;
     int error = 0;
 
     if (stats) {
@@ -3345,7 +3390,8 @@  flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
     netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL);
     if (!netdev_flow) {
         if (put->flags & DPIF_FP_CREATE) {
-            if (cmap_count(&pmd->flow_table) < MAX_FLOWS) {
+            atomic_read_relaxed(&netdev_max_flow, &max_flow);
+            if (cmap_count(&pmd->flow_table) < max_flow) {
                 dp_netdev_flow_add(pmd, match, ufid, put->actions,
                                    put->actions_len);
                 error = 0;