diff mbox series

[v3,net-next,2/3] netem: add uapi to express delay and jitter in nanoseconds

Message ID 1510182748-10991-3-git-send-email-dave.taht@gmail.com
State Accepted, archived
Delegated to: David Miller
Headers show
Series netem: add nsec scheduling and slot feature | expand

Commit Message

Dave Taht Nov. 8, 2017, 11:12 p.m. UTC
netem userspace has long relied on a horrible /proc/net/psched hack
to translate the current notion of "ticks" to nanoseconds.

Expressing latency and jitter instead, in well defined nanoseconds,
increases the dynamic range of emulated delays and jitter in netem.

It will also ease a transition where reducing a tick to nsec
equivalence would constrain the max delay in prior versions of
netem to only 4.3 seconds.

Signed-off-by: Dave Taht <dave.taht@gmail.com>
Suggested-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
---
 include/uapi/linux/pkt_sched.h |  2 ++
 net/sched/sch_netem.c          | 14 ++++++++++++++
 2 files changed, 16 insertions(+)

Comments

Stephen Hemminger Nov. 8, 2017, 11:24 p.m. UTC | #1
On Wed,  8 Nov 2017 15:12:27 -0800
Dave Taht <dave.taht@gmail.com> wrote:

> --- a/net/sched/sch_netem.c
> +++ b/net/sched/sch_netem.c
> @@ -819,6 +819,8 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
>  	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
>  	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
>  	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
> +	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
> +	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
>  };
>  
>  static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
> @@ -916,6 +918,12 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
>  		q->rate = max_t(u64, q->rate,
>  				nla_get_u64(tb[TCA_NETEM_RATE64]));
>  
> +	if (tb[TCA_NETEM_LATENCY64])
> +		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
> +
> +	if (tb[TCA_NETEM_JITTER64])
> +		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
> +
>  	if (tb[TCA_NETEM_ECN])
>  		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
>  

Although some of the maths use signed 64 bit.
I think the API should be unsigned 64 bit.  Or do you want to allow
negative latency?
Dave Taht Nov. 8, 2017, 11:36 p.m. UTC | #2
On Wed, Nov 8, 2017 at 3:24 PM, Stephen Hemminger
<stephen@networkplumber.org> wrote:
> On Wed,  8 Nov 2017 15:12:27 -0800
> Dave Taht <dave.taht@gmail.com> wrote:
>
>> --- a/net/sched/sch_netem.c
>> +++ b/net/sched/sch_netem.c
>> @@ -819,6 +819,8 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
>>       [TCA_NETEM_LOSS]        = { .type = NLA_NESTED },
>>       [TCA_NETEM_ECN]         = { .type = NLA_U32 },
>>       [TCA_NETEM_RATE64]      = { .type = NLA_U64 },
>> +     [TCA_NETEM_LATENCY64]   = { .type = NLA_S64 },
>> +     [TCA_NETEM_JITTER64]    = { .type = NLA_S64 },
>>  };
>>
>>  static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
>> @@ -916,6 +918,12 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
>>               q->rate = max_t(u64, q->rate,
>>                               nla_get_u64(tb[TCA_NETEM_RATE64]));
>>
>> +     if (tb[TCA_NETEM_LATENCY64])
>> +             q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
>> +
>> +     if (tb[TCA_NETEM_JITTER64])
>> +             q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
>> +
>>       if (tb[TCA_NETEM_ECN])
>>               q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
>>
>
> Although some of the maths use signed 64 bit.
> I think the API should be unsigned 64 bit.  Or do you want to allow
> negative latency?

Personally I find things simpler to reason about when signed, and the
userspace side of the code (currently) offers the ability to generically
have signed time values for "other stuff".

The constrained range of 63 vs 64 bits we can debate in 272 years or so.

I'll let eric cast the tie vote.
Eric Dumazet Nov. 8, 2017, 11:43 p.m. UTC | #3
On Wed, Nov 8, 2017 at 3:36 PM, Dave Taht <dave.taht@gmail.com> wrote:
> On Wed, Nov 8, 2017 at 3:24 PM, Stephen Hemminger
> <stephen@networkplumber.org> wrote:
>> On Wed,  8 Nov 2017 15:12:27 -0800
>> Dave Taht <dave.taht@gmail.com> wrote:
>>
>>> --- a/net/sched/sch_netem.c
>>> +++ b/net/sched/sch_netem.c
>>> @@ -819,6 +819,8 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
>>>       [TCA_NETEM_LOSS]        = { .type = NLA_NESTED },
>>>       [TCA_NETEM_ECN]         = { .type = NLA_U32 },
>>>       [TCA_NETEM_RATE64]      = { .type = NLA_U64 },
>>> +     [TCA_NETEM_LATENCY64]   = { .type = NLA_S64 },
>>> +     [TCA_NETEM_JITTER64]    = { .type = NLA_S64 },
>>>  };
>>>
>>>  static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
>>> @@ -916,6 +918,12 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
>>>               q->rate = max_t(u64, q->rate,
>>>                               nla_get_u64(tb[TCA_NETEM_RATE64]));
>>>
>>> +     if (tb[TCA_NETEM_LATENCY64])
>>> +             q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
>>> +
>>> +     if (tb[TCA_NETEM_JITTER64])
>>> +             q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
>>> +
>>>       if (tb[TCA_NETEM_ECN])
>>>               q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
>>>
>>
>> Although some of the maths use signed 64 bit.
>> I think the API should be unsigned 64 bit.  Or do you want to allow
>> negative latency?
>
> Personally I find things simpler to reason about when signed, and the
> userspace side of the code (currently) offers the ability to generically
> have signed time values for "other stuff".
>
> The constrained range of 63 vs 64 bits we can debate in 272 years or so.

ktime_get_ns() returns number of nanosec in u64, since machine _boot_

So we wont have overflows, unless a linux host can stay up for more
than 500 years.

This seems unlikely.
diff mbox series

Patch

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 6a2c5ea..8fe6d18 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -537,6 +537,8 @@  enum {
 	TCA_NETEM_ECN,
 	TCA_NETEM_RATE64,
 	TCA_NETEM_PAD,
+	TCA_NETEM_LATENCY64,
+	TCA_NETEM_JITTER64,
 	__TCA_NETEM_MAX,
 };
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e64e0e0..47d6dec 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -819,6 +819,8 @@  static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
+	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
+	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 };
 
 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -916,6 +918,12 @@  static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 		q->rate = max_t(u64, q->rate,
 				nla_get_u64(tb[TCA_NETEM_RATE64]));
 
+	if (tb[TCA_NETEM_LATENCY64])
+		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
+
+	if (tb[TCA_NETEM_JITTER64])
+		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
+
 	if (tb[TCA_NETEM_ECN])
 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 
@@ -1020,6 +1028,12 @@  static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
 		goto nla_put_failure;
 
+	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
+		goto nla_put_failure;
+
+	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
+		goto nla_put_failure;
+
 	cor.delay_corr = q->delay_cor.rho;
 	cor.loss_corr = q->loss_cor.rho;
 	cor.dup_corr = q->dup_cor.rho;