diff mbox

[ovs-dev,ovs,V8,12/26] netdev-tc-offloads: Add flower mask to priority map

Message ID 1493824097-47495-13-git-send-email-roid@mellanox.com
State Changes Requested
Headers show

Commit Message

Roi Dayan May 3, 2017, 3:08 p.m. UTC
From: Paul Blakey <paulb@mellanox.com>

Flower classifer requires a different priority per mask,
so we hash the mask and generate a new priority for
each new mask used.

Signed-off-by: Paul Blakey <paulb@mellanox.com>
Reviewed-by: Roi Dayan <roid@mellanox.com>
Reviewed-by: Simon Horman <simon.horman@netronome.com>
---
 lib/netdev-tc-offloads.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

Comments

Flavio Leitner May 9, 2017, 6:23 p.m. UTC | #1
On Wed, May 03, 2017 at 06:08:03PM +0300, Roi Dayan wrote:
> From: Paul Blakey <paulb@mellanox.com>
> 
> Flower classifer requires a different priority per mask,
> so we hash the mask and generate a new priority for
> each new mask used.
> 
> Signed-off-by: Paul Blakey <paulb@mellanox.com>
> Reviewed-by: Roi Dayan <roid@mellanox.com>
> Reviewed-by: Simon Horman <simon.horman@netronome.com>
> ---
>  lib/netdev-tc-offloads.c | 38 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 38 insertions(+)
> 
> diff --git a/lib/netdev-tc-offloads.c b/lib/netdev-tc-offloads.c
> index 9faea97..7e33fff 100644
> --- a/lib/netdev-tc-offloads.c
> +++ b/lib/netdev-tc-offloads.c
> @@ -209,6 +209,44 @@ find_ufid(int prio, int handle, struct netdev *netdev, ovs_u128 *ufid)
>      return (data != NULL);
>  }
>  
> +struct prio_map_data {
> +    struct hmap_node node;
> +    struct tc_flower_key mask;
> +    ovs_be16 protocol;
> +    uint16_t prio;
> +};
> +
> +static uint16_t
> +get_prio_for_tc_flower(struct tc_flower *flower)
> +{
> +    static struct hmap prios = HMAP_INITIALIZER(&prios);
> +    static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER;
> +    static int last_prio = 0;
> +    size_t key_len = sizeof(struct tc_flower_key);
> +    size_t hash = hash_bytes(&flower->mask, key_len,
> +                             (OVS_FORCE uint32_t) flower->key.eth_type);
> +    struct prio_map_data *data;
> +    struct prio_map_data *new_data;
> +
> +    ovs_mutex_lock(&prios_lock);
> +    HMAP_FOR_EACH_WITH_HASH(data, node, hash, &prios) {
> +        if (!memcmp(&flower->mask, &data->mask, key_len)
> +            && data->protocol == flower->key.eth_type) {
> +            ovs_mutex_unlock(&prios_lock);
> +            return data->prio;
> +        }
> +    }
> +
> +    new_data = xzalloc(sizeof *new_data);
> +    memcpy(&new_data->mask, &flower->mask, key_len);
> +    new_data->prio = ++last_prio;

Well, not sure if it is realistic to have more than 65k different
masks which is the tc limit, but here it silently overflows.  I
suspect that tc-flower will fail to insert the flow, but I haven't
looked further.  It may be harmless, but wanna to point out anyways.

Flavio



> +    new_data->protocol = flower->key.eth_type;
> +    hmap_insert(&prios, &new_data->node, hash);
> +    ovs_mutex_unlock(&prios_lock);
> +
> +    return new_data->prio;
> +}
> +
>  int
>  netdev_tc_flow_flush(struct netdev *netdev)
>  {
> -- 
> 2.7.4
> 
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
Flavio Leitner May 9, 2017, 7:10 p.m. UTC | #2
On Tue, May 09, 2017 at 03:23:12PM -0300, Flavio Leitner wrote:
> On Wed, May 03, 2017 at 06:08:03PM +0300, Roi Dayan wrote:
> > From: Paul Blakey <paulb@mellanox.com>
> > 
> > Flower classifer requires a different priority per mask,
> > so we hash the mask and generate a new priority for
> > each new mask used.
> > 
> > Signed-off-by: Paul Blakey <paulb@mellanox.com>
> > Reviewed-by: Roi Dayan <roid@mellanox.com>
> > Reviewed-by: Simon Horman <simon.horman@netronome.com>
> > ---
> >  lib/netdev-tc-offloads.c | 38 ++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 38 insertions(+)
> > 
> > diff --git a/lib/netdev-tc-offloads.c b/lib/netdev-tc-offloads.c
> > index 9faea97..7e33fff 100644
> > --- a/lib/netdev-tc-offloads.c
> > +++ b/lib/netdev-tc-offloads.c
> > @@ -209,6 +209,44 @@ find_ufid(int prio, int handle, struct netdev *netdev, ovs_u128 *ufid)
> >      return (data != NULL);
> >  }
> >  
> > +struct prio_map_data {
> > +    struct hmap_node node;
> > +    struct tc_flower_key mask;
> > +    ovs_be16 protocol;
> > +    uint16_t prio;
> > +};
> > +
> > +static uint16_t
> > +get_prio_for_tc_flower(struct tc_flower *flower)
> > +{
> > +    static struct hmap prios = HMAP_INITIALIZER(&prios);
> > +    static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER;
> > +    static int last_prio = 0;
> > +    size_t key_len = sizeof(struct tc_flower_key);
> > +    size_t hash = hash_bytes(&flower->mask, key_len,
> > +                             (OVS_FORCE uint32_t) flower->key.eth_type);
> > +    struct prio_map_data *data;
> > +    struct prio_map_data *new_data;
> > +
> > +    ovs_mutex_lock(&prios_lock);
> > +    HMAP_FOR_EACH_WITH_HASH(data, node, hash, &prios) {
> > +        if (!memcmp(&flower->mask, &data->mask, key_len)
> > +            && data->protocol == flower->key.eth_type) {
> > +            ovs_mutex_unlock(&prios_lock);
> > +            return data->prio;
> > +        }
> > +    }
> > +
> > +    new_data = xzalloc(sizeof *new_data);
> > +    memcpy(&new_data->mask, &flower->mask, key_len);
> > +    new_data->prio = ++last_prio;
> 
> Well, not sure if it is realistic to have more than 65k different
> masks which is the tc limit, but here it silently overflows.  I
> suspect that tc-flower will fail to insert the flow, but I haven't
> looked further.  It may be harmless, but wanna to point out anyways.

Turns out that Marcelo had already pointed out this issue on V3.
https://mail.openvswitch.org/pipermail/ovs-dev/2017-February/328957.html

I guess at least it deserves a comment explaining briefly why
it isn't a problem.
Roi Dayan May 16, 2017, 9:44 a.m. UTC | #3
On 09/05/2017 22:10, Flavio Leitner wrote:
> On Tue, May 09, 2017 at 03:23:12PM -0300, Flavio Leitner wrote:
>> On Wed, May 03, 2017 at 06:08:03PM +0300, Roi Dayan wrote:
>>> From: Paul Blakey <paulb@mellanox.com>
>>>
>>> Flower classifer requires a different priority per mask,
>>> so we hash the mask and generate a new priority for
>>> each new mask used.
>>>
>>> Signed-off-by: Paul Blakey <paulb@mellanox.com>
>>> Reviewed-by: Roi Dayan <roid@mellanox.com>
>>> Reviewed-by: Simon Horman <simon.horman@netronome.com>
>>> ---
>>>  lib/netdev-tc-offloads.c | 38 ++++++++++++++++++++++++++++++++++++++
>>>  1 file changed, 38 insertions(+)
>>>
>>> diff --git a/lib/netdev-tc-offloads.c b/lib/netdev-tc-offloads.c
>>> index 9faea97..7e33fff 100644
>>> --- a/lib/netdev-tc-offloads.c
>>> +++ b/lib/netdev-tc-offloads.c
>>> @@ -209,6 +209,44 @@ find_ufid(int prio, int handle, struct netdev *netdev, ovs_u128 *ufid)
>>>      return (data != NULL);
>>>  }
>>>
>>> +struct prio_map_data {
>>> +    struct hmap_node node;
>>> +    struct tc_flower_key mask;
>>> +    ovs_be16 protocol;
>>> +    uint16_t prio;
>>> +};
>>> +
>>> +static uint16_t
>>> +get_prio_for_tc_flower(struct tc_flower *flower)
>>> +{
>>> +    static struct hmap prios = HMAP_INITIALIZER(&prios);
>>> +    static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER;
>>> +    static int last_prio = 0;
>>> +    size_t key_len = sizeof(struct tc_flower_key);
>>> +    size_t hash = hash_bytes(&flower->mask, key_len,
>>> +                             (OVS_FORCE uint32_t) flower->key.eth_type);
>>> +    struct prio_map_data *data;
>>> +    struct prio_map_data *new_data;
>>> +
>>> +    ovs_mutex_lock(&prios_lock);
>>> +    HMAP_FOR_EACH_WITH_HASH(data, node, hash, &prios) {
>>> +        if (!memcmp(&flower->mask, &data->mask, key_len)
>>> +            && data->protocol == flower->key.eth_type) {
>>> +            ovs_mutex_unlock(&prios_lock);
>>> +            return data->prio;
>>> +        }
>>> +    }
>>> +
>>> +    new_data = xzalloc(sizeof *new_data);
>>> +    memcpy(&new_data->mask, &flower->mask, key_len);
>>> +    new_data->prio = ++last_prio;
>>
>> Well, not sure if it is realistic to have more than 65k different
>> masks which is the tc limit, but here it silently overflows.  I
>> suspect that tc-flower will fail to insert the flow, but I haven't
>> looked further.  It may be harmless, but wanna to point out anyways.
>
> Turns out that Marcelo had already pointed out this issue on V3.
> https://mail.openvswitch.org/pipermail/ovs-dev/2017-February/328957.html
>
> I guess at least it deserves a comment explaining briefly why
> it isn't a problem.
>

I missed it. I saw Paul replied it as well.
So basically we re use the same prio for same mask/eth combination.
I updated last_prio to be the same type.
I added inline comments.
I'll probably move this function to be tc_flower_get_prio() as it's 
flower related and can be with other tc flower api.

We potentially can still get the same prio for different mask/eth combo.
in this case we can either continue as normal and tc will fail or fail 
right away without trying to call tc.

I'll update to do the latter and fail right away.

thanks
diff mbox

Patch

diff --git a/lib/netdev-tc-offloads.c b/lib/netdev-tc-offloads.c
index 9faea97..7e33fff 100644
--- a/lib/netdev-tc-offloads.c
+++ b/lib/netdev-tc-offloads.c
@@ -209,6 +209,44 @@  find_ufid(int prio, int handle, struct netdev *netdev, ovs_u128 *ufid)
     return (data != NULL);
 }
 
+struct prio_map_data {
+    struct hmap_node node;
+    struct tc_flower_key mask;
+    ovs_be16 protocol;
+    uint16_t prio;
+};
+
+static uint16_t
+get_prio_for_tc_flower(struct tc_flower *flower)
+{
+    static struct hmap prios = HMAP_INITIALIZER(&prios);
+    static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER;
+    static int last_prio = 0;
+    size_t key_len = sizeof(struct tc_flower_key);
+    size_t hash = hash_bytes(&flower->mask, key_len,
+                             (OVS_FORCE uint32_t) flower->key.eth_type);
+    struct prio_map_data *data;
+    struct prio_map_data *new_data;
+
+    ovs_mutex_lock(&prios_lock);
+    HMAP_FOR_EACH_WITH_HASH(data, node, hash, &prios) {
+        if (!memcmp(&flower->mask, &data->mask, key_len)
+            && data->protocol == flower->key.eth_type) {
+            ovs_mutex_unlock(&prios_lock);
+            return data->prio;
+        }
+    }
+
+    new_data = xzalloc(sizeof *new_data);
+    memcpy(&new_data->mask, &flower->mask, key_len);
+    new_data->prio = ++last_prio;
+    new_data->protocol = flower->key.eth_type;
+    hmap_insert(&prios, &new_data->node, hash);
+    ovs_mutex_unlock(&prios_lock);
+
+    return new_data->prio;
+}
+
 int
 netdev_tc_flow_flush(struct netdev *netdev)
 {