diff mbox

[net-next] fq_codel: Avoid regenerating skb flow hash unless necessary

Message ID 20170118210428.24006-1-acollins@cradlepoint.com
State Accepted, archived
Delegated to: David Miller
Headers show

Commit Message

Andrew Collins Jan. 18, 2017, 9:04 p.m. UTC
The fq_codel qdisc currently always regenerates the skb flow hash.
This wastes some cycles and prevents flow seperation in cases where
the traffic has been encrypted and can no longer be understood by the
flow dissector.

Change it to use the prexisting flow hash if one exists, and only
regenerate if necessary.

Signed-off-by: Andrew Collins <acollins@cradlepoint.com>
---
 net/sched/sch_fq_codel.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

Comments

Eric Dumazet Jan. 18, 2017, 9:13 p.m. UTC | #1
On Wed, 2017-01-18 at 14:04 -0700, Andrew Collins wrote:
> The fq_codel qdisc currently always regenerates the skb flow hash.
> This wastes some cycles and prevents flow seperation in cases where
> the traffic has been encrypted and can no longer be understood by the
> flow dissector.
> 
> Change it to use the prexisting flow hash if one exists, and only
> regenerate if necessary.
> 
> Signed-off-by: Andrew Collins <acollins@cradlepoint.com>
> ---

Acked-by: Eric Dumazet <edumazet@google.com>

Thanks !
David Miller Jan. 20, 2017, 5:15 p.m. UTC | #2
From: Andrew Collins <acollins@cradlepoint.com>
Date: Wed, 18 Jan 2017 14:04:28 -0700

> The fq_codel qdisc currently always regenerates the skb flow hash.
> This wastes some cycles and prevents flow seperation in cases where
> the traffic has been encrypted and can no longer be understood by the
> flow dissector.
> 
> Change it to use the prexisting flow hash if one exists, and only
> regenerate if necessary.
> 
> Signed-off-by: Andrew Collins <acollins@cradlepoint.com>

Applied, thanks.
Tom Herbert Jan. 20, 2017, 5:23 p.m. UTC | #3
On Wed, Jan 18, 2017 at 1:04 PM, Andrew Collins
<acollins@cradlepoint.com> wrote:
> The fq_codel qdisc currently always regenerates the skb flow hash.
> This wastes some cycles and prevents flow seperation in cases where
> the traffic has been encrypted and can no longer be understood by the
> flow dissector.
>
> Change it to use the prexisting flow hash if one exists, and only
> regenerate if necessary.
>
> Signed-off-by: Andrew Collins <acollins@cradlepoint.com>
> ---
>  net/sched/sch_fq_codel.c | 6 +-----
>  1 file changed, 1 insertion(+), 5 deletions(-)
>
> diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
> index a5ea0e9..2f50e4c 100644
> --- a/net/sched/sch_fq_codel.c
> +++ b/net/sched/sch_fq_codel.c
> @@ -57,7 +57,6 @@ struct fq_codel_sched_data {
>         struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
>         u32             *backlogs;      /* backlog table [flows_cnt] */
>         u32             flows_cnt;      /* number of flows */
> -       u32             perturbation;   /* hash perturbation */
>         u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
>         u32             drop_batch_size;
>         u32             memory_limit;
> @@ -75,9 +74,7 @@ struct fq_codel_sched_data {
>  static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
>                                   struct sk_buff *skb)
>  {
> -       u32 hash = skb_get_hash_perturb(skb, q->perturbation);
> -
> -       return reciprocal_scale(hash, q->flows_cnt);
> +       return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);

So the perturbing of the hash is no longer needed for fq_codel?

>  }
>
>  static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
> @@ -482,7 +479,6 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
>         q->memory_limit = 32 << 20; /* 32 MBytes */
>         q->drop_batch_size = 64;
>         q->quantum = psched_mtu(qdisc_dev(sch));
> -       q->perturbation = prandom_u32();
>         INIT_LIST_HEAD(&q->new_flows);
>         INIT_LIST_HEAD(&q->old_flows);
>         codel_params_init(&q->cparams);
> --
> 2.9.3
>
Eric Dumazet Jan. 20, 2017, 5:31 p.m. UTC | #4
On Fri, 2017-01-20 at 09:23 -0800, Tom Herbert wrote:

> So the perturbing of the hash is no longer needed for fq_codel?

Right, we never implemented a timer to change the perturbation anyway.

Note that local TCP flows now implement their own hash change when
needed.
diff mbox

Patch

diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a5ea0e9..2f50e4c 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -57,7 +57,6 @@  struct fq_codel_sched_data {
 	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
 	u32		*backlogs;	/* backlog table [flows_cnt] */
 	u32		flows_cnt;	/* number of flows */
-	u32		perturbation;	/* hash perturbation */
 	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
 	u32		drop_batch_size;
 	u32		memory_limit;
@@ -75,9 +74,7 @@  struct fq_codel_sched_data {
 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
 				  struct sk_buff *skb)
 {
-	u32 hash = skb_get_hash_perturb(skb, q->perturbation);
-
-	return reciprocal_scale(hash, q->flows_cnt);
+	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
 }
 
 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -482,7 +479,6 @@  static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
 	q->memory_limit = 32 << 20; /* 32 MBytes */
 	q->drop_batch_size = 64;
 	q->quantum = psched_mtu(qdisc_dev(sch));
-	q->perturbation = prandom_u32();
 	INIT_LIST_HEAD(&q->new_flows);
 	INIT_LIST_HEAD(&q->old_flows);
 	codel_params_init(&q->cparams);