diff mbox series

[bpf,1/2] bpf/test_run: fix unkillable BPF_PROG_TEST_RUN

Message ID 20190212234239.174386-1-sdf@google.com
State Accepted
Delegated to: BPF Maintainers
Headers show
Series [bpf,1/2] bpf/test_run: fix unkillable BPF_PROG_TEST_RUN | expand

Commit Message

Stanislav Fomichev Feb. 12, 2019, 11:42 p.m. UTC
Syzbot found out that running BPF_PROG_TEST_RUN with repeat=0xffffffff
makes process unkillable. The problem is that when CONFIG_PREEMPT is
enabled, we never see need_resched() return true. This is due to the
fact that preempt_enable() (which we do in bpf_test_run_one on each
iteration) now handles resched if it's needed.

Let's disable preemption for the whole run, not per test. In this case
we can properly see whether resched is needed.
Let's also properly return -EINTR to the userspace in case of a signal
interrupt.

See recent discussion:
http://lore.kernel.org/netdev/CAH3MdRWHr4N8jei8jxDppXjmw-Nw=puNDLbu1dQOFQHxfU2onA@mail.gmail.com

I'll follow up with the same fix bpf_prog_test_run_flow_dissector in
bpf-next.

Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 net/bpf/test_run.c | 45 ++++++++++++++++++++++++---------------------
 1 file changed, 24 insertions(+), 21 deletions(-)

Comments

Daniel Borkmann Feb. 16, 2019, 1:17 a.m. UTC | #1
On 02/13/2019 12:42 AM, Stanislav Fomichev wrote:
> Syzbot found out that running BPF_PROG_TEST_RUN with repeat=0xffffffff
> makes process unkillable. The problem is that when CONFIG_PREEMPT is
> enabled, we never see need_resched() return true. This is due to the
> fact that preempt_enable() (which we do in bpf_test_run_one on each
> iteration) now handles resched if it's needed.
> 
> Let's disable preemption for the whole run, not per test. In this case
> we can properly see whether resched is needed.
> Let's also properly return -EINTR to the userspace in case of a signal
> interrupt.
> 
> See recent discussion:
> http://lore.kernel.org/netdev/CAH3MdRWHr4N8jei8jxDppXjmw-Nw=puNDLbu1dQOFQHxfU2onA@mail.gmail.com
> 
> I'll follow up with the same fix bpf_prog_test_run_flow_dissector in
> bpf-next.
> 
> Reported-by: syzbot <syzkaller@googlegroups.com>
> Signed-off-by: Stanislav Fomichev <sdf@google.com>
> ---
>  net/bpf/test_run.c | 45 ++++++++++++++++++++++++---------------------
>  1 file changed, 24 insertions(+), 21 deletions(-)
> 
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index fa2644d276ef..e31e1b20f7f4 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -13,27 +13,13 @@
>  #include <net/sock.h>
>  #include <net/tcp.h>
>  
> -static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> -		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> -{
> -	u32 ret;
> -
> -	preempt_disable();
> -	rcu_read_lock();
> -	bpf_cgroup_storage_set(storage);
> -	ret = BPF_PROG_RUN(prog, ctx);
> -	rcu_read_unlock();
> -	preempt_enable();
> -
> -	return ret;
> -}
> -
> -static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
> -			u32 *time)
> +static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
> +			u32 *retval, u32 *time)
>  {
>  	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
>  	enum bpf_cgroup_storage_type stype;
>  	u64 time_start, time_spent = 0;
> +	int ret = 0;
>  	u32 i;
>  
>  	for_each_cgroup_storage_type(stype) {
> @@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
>  
>  	if (!repeat)
>  		repeat = 1;
> +
> +	rcu_read_lock();
> +	preempt_disable();
>  	time_start = ktime_get_ns();
>  	for (i = 0; i < repeat; i++) {
> -		*ret = bpf_test_run_one(prog, ctx, storage);
> +		bpf_cgroup_storage_set(storage);
> +		*retval = BPF_PROG_RUN(prog, ctx);
> +
> +		if (signal_pending(current)) {
> +			ret = -EINTR;
> +			break;
> +		}

Wouldn't it be enough to just move the signal_pending() test to
the above as you did to actually fix the unkillable issue? For
CONFIG_PREEMPT the below need_resched() is never triggered as you
mention as preempt_enable() handles rescheduling internally in
this situation, so moving it only out should suffice.

The rationale for disabling preemption for the whole run is imho
a bit different, namely that you would not screw up the ktime
measurements due to rescheduling happening in between otherwise.

But then, once preemption is disabled for the whole run, is there
a need to move out the extra signal_pending() test (presumably as
need_resched() does not handle TIF_SIGPENDING but only TIF_NEED_RESCHED
but we still wouldn't get into a unkillable situation here, no)?

>  		if (need_resched()) {
> -			if (signal_pending(current))
> -				break;
>  			time_spent += ktime_get_ns() - time_start;
> +			preempt_enable();
> +			rcu_read_unlock();
> +
>  			cond_resched();
> +
> +			rcu_read_lock();
> +			preempt_disable();
>  			time_start = ktime_get_ns();
>  		}
>  	}
>  	time_spent += ktime_get_ns() - time_start;
> +	preempt_enable();
> +	rcu_read_unlock();
> +
>  	do_div(time_spent, repeat);
>  	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
>  
>  	for_each_cgroup_storage_type(stype)
>  		bpf_cgroup_storage_free(storage[stype]);
>  
> -	return 0;
> +	return ret;
>  }
>  
>  static int bpf_test_finish(const union bpf_attr *kattr,
>
Stanislav Fomichev Feb. 18, 2019, 5:29 p.m. UTC | #2
On 02/16, Daniel Borkmann wrote:
> On 02/13/2019 12:42 AM, Stanislav Fomichev wrote:
> > Syzbot found out that running BPF_PROG_TEST_RUN with repeat=0xffffffff
> > makes process unkillable. The problem is that when CONFIG_PREEMPT is
> > enabled, we never see need_resched() return true. This is due to the
> > fact that preempt_enable() (which we do in bpf_test_run_one on each
> > iteration) now handles resched if it's needed.
> > 
> > Let's disable preemption for the whole run, not per test. In this case
> > we can properly see whether resched is needed.
> > Let's also properly return -EINTR to the userspace in case of a signal
> > interrupt.
> > 
> > See recent discussion:
> > http://lore.kernel.org/netdev/CAH3MdRWHr4N8jei8jxDppXjmw-Nw=puNDLbu1dQOFQHxfU2onA@mail.gmail.com
> > 
> > I'll follow up with the same fix bpf_prog_test_run_flow_dissector in
> > bpf-next.
> > 
> > Reported-by: syzbot <syzkaller@googlegroups.com>
> > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > ---
> >  net/bpf/test_run.c | 45 ++++++++++++++++++++++++---------------------
> >  1 file changed, 24 insertions(+), 21 deletions(-)
> > 
> > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > index fa2644d276ef..e31e1b20f7f4 100644
> > --- a/net/bpf/test_run.c
> > +++ b/net/bpf/test_run.c
> > @@ -13,27 +13,13 @@
> >  #include <net/sock.h>
> >  #include <net/tcp.h>
> >  
> > -static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> > -		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> > -{
> > -	u32 ret;
> > -
> > -	preempt_disable();
> > -	rcu_read_lock();
> > -	bpf_cgroup_storage_set(storage);
> > -	ret = BPF_PROG_RUN(prog, ctx);
> > -	rcu_read_unlock();
> > -	preempt_enable();
> > -
> > -	return ret;
> > -}
> > -
> > -static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
> > -			u32 *time)
> > +static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
> > +			u32 *retval, u32 *time)
> >  {
> >  	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
> >  	enum bpf_cgroup_storage_type stype;
> >  	u64 time_start, time_spent = 0;
> > +	int ret = 0;
> >  	u32 i;
> >  
> >  	for_each_cgroup_storage_type(stype) {
> > @@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
> >  
> >  	if (!repeat)
> >  		repeat = 1;
> > +
> > +	rcu_read_lock();
> > +	preempt_disable();
> >  	time_start = ktime_get_ns();
> >  	for (i = 0; i < repeat; i++) {
> > -		*ret = bpf_test_run_one(prog, ctx, storage);
> > +		bpf_cgroup_storage_set(storage);
> > +		*retval = BPF_PROG_RUN(prog, ctx);
> > +
> > +		if (signal_pending(current)) {
> > +			ret = -EINTR;
> > +			break;
> > +		}
> 
> Wouldn't it be enough to just move the signal_pending() test to
> the above as you did to actually fix the unkillable issue? For
> CONFIG_PREEMPT the below need_resched() is never triggered as you
> mention as preempt_enable() handles rescheduling internally in
> this situation, so moving it only out should suffice.
> 
> The rationale for disabling preemption for the whole run is imho
> a bit different, namely that you would not screw up the ktime
> measurements due to rescheduling happening in between otherwise.
That's exactly the reason why we need to preempt_disable() the whole
run; we can't preempt on preempt_enable(), it would screw up our
ktime estimation.

> But then, once preemption is disabled for the whole run, is there
> a need to move out the extra signal_pending() test (presumably as
> need_resched() does not handle TIF_SIGPENDING but only TIF_NEED_RESCHED
> but we still wouldn't get into a unkillable situation here, no)?
I'm not sure, they look like two separate flags, it feels safer to handle
them separately (and we have a precedent in do_check in verifier.c). While
we do set them both when sending signal, it looks like need_resched is
for the cases where we wake up a task with a higher priority. So, in
theory, we can have a signal_pending without need_resched. (Also, with
CONFIG_PREEMT=y kernel, there is another complication with
preempt_count()).

> 
> >  		if (need_resched()) {
> > -			if (signal_pending(current))
> > -				break;
> >  			time_spent += ktime_get_ns() - time_start;
> > +			preempt_enable();
> > +			rcu_read_unlock();
> > +
> >  			cond_resched();
> > +
> > +			rcu_read_lock();
> > +			preempt_disable();
> >  			time_start = ktime_get_ns();
> >  		}
> >  	}
> >  	time_spent += ktime_get_ns() - time_start;
> > +	preempt_enable();
> > +	rcu_read_unlock();
> > +
> >  	do_div(time_spent, repeat);
> >  	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
> >  
> >  	for_each_cgroup_storage_type(stype)
> >  		bpf_cgroup_storage_free(storage[stype]);
> >  
> > -	return 0;
> > +	return ret;
> >  }
> >  
> >  static int bpf_test_finish(const union bpf_attr *kattr,
> > 
>
Daniel Borkmann Feb. 18, 2019, 11:23 p.m. UTC | #3
On 02/18/2019 06:29 PM, Stanislav Fomichev wrote:
> On 02/16, Daniel Borkmann wrote:
>> On 02/13/2019 12:42 AM, Stanislav Fomichev wrote:
>>> Syzbot found out that running BPF_PROG_TEST_RUN with repeat=0xffffffff
>>> makes process unkillable. The problem is that when CONFIG_PREEMPT is
>>> enabled, we never see need_resched() return true. This is due to the
>>> fact that preempt_enable() (which we do in bpf_test_run_one on each
>>> iteration) now handles resched if it's needed.
>>>
>>> Let's disable preemption for the whole run, not per test. In this case
>>> we can properly see whether resched is needed.
>>> Let's also properly return -EINTR to the userspace in case of a signal
>>> interrupt.
>>>
>>> See recent discussion:
>>> http://lore.kernel.org/netdev/CAH3MdRWHr4N8jei8jxDppXjmw-Nw=puNDLbu1dQOFQHxfU2onA@mail.gmail.com
>>>
>>> I'll follow up with the same fix bpf_prog_test_run_flow_dissector in
>>> bpf-next.
>>>
>>> Reported-by: syzbot <syzkaller@googlegroups.com>
>>> Signed-off-by: Stanislav Fomichev <sdf@google.com>
>>> ---
>>>  net/bpf/test_run.c | 45 ++++++++++++++++++++++++---------------------
>>>  1 file changed, 24 insertions(+), 21 deletions(-)
>>>
>>> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
>>> index fa2644d276ef..e31e1b20f7f4 100644
>>> --- a/net/bpf/test_run.c
>>> +++ b/net/bpf/test_run.c
>>> @@ -13,27 +13,13 @@
>>>  #include <net/sock.h>
>>>  #include <net/tcp.h>
>>>  
>>> -static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
>>> -		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
>>> -{
>>> -	u32 ret;
>>> -
>>> -	preempt_disable();
>>> -	rcu_read_lock();
>>> -	bpf_cgroup_storage_set(storage);
>>> -	ret = BPF_PROG_RUN(prog, ctx);
>>> -	rcu_read_unlock();
>>> -	preempt_enable();
>>> -
>>> -	return ret;
>>> -}
>>> -
>>> -static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
>>> -			u32 *time)
>>> +static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
>>> +			u32 *retval, u32 *time)
>>>  {
>>>  	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
>>>  	enum bpf_cgroup_storage_type stype;
>>>  	u64 time_start, time_spent = 0;
>>> +	int ret = 0;
>>>  	u32 i;
>>>  
>>>  	for_each_cgroup_storage_type(stype) {
>>> @@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
>>>  
>>>  	if (!repeat)
>>>  		repeat = 1;
>>> +
>>> +	rcu_read_lock();
>>> +	preempt_disable();
>>>  	time_start = ktime_get_ns();
>>>  	for (i = 0; i < repeat; i++) {
>>> -		*ret = bpf_test_run_one(prog, ctx, storage);
>>> +		bpf_cgroup_storage_set(storage);
>>> +		*retval = BPF_PROG_RUN(prog, ctx);
>>> +
>>> +		if (signal_pending(current)) {
>>> +			ret = -EINTR;
>>> +			break;
>>> +		}
>>
>> Wouldn't it be enough to just move the signal_pending() test to
>> the above as you did to actually fix the unkillable issue? For
>> CONFIG_PREEMPT the below need_resched() is never triggered as you
>> mention as preempt_enable() handles rescheduling internally in
>> this situation, so moving it only out should suffice.
>>
>> The rationale for disabling preemption for the whole run is imho
>> a bit different, namely that you would not screw up the ktime
>> measurements due to rescheduling happening in between otherwise.
> That's exactly the reason why we need to preempt_disable() the whole
> run; we can't preempt on preempt_enable(), it would screw up our
> ktime estimation.
> 
>> But then, once preemption is disabled for the whole run, is there
>> a need to move out the extra signal_pending() test (presumably as
>> need_resched() does not handle TIF_SIGPENDING but only TIF_NEED_RESCHED
>> but we still wouldn't get into a unkillable situation here, no)?
> I'm not sure, they look like two separate flags, it feels safer to handle
> them separately (and we have a precedent in do_check in verifier.c). While
> we do set them both when sending signal, it looks like need_resched is
> for the cases where we wake up a task with a higher priority. So, in
> theory, we can have a signal_pending without need_resched. (Also, with
> CONFIG_PREEMT=y kernel, there is another complication with
> preempt_count()).

Yeah, given there is no separation, it's better to move it out, agree.
Applied both, thanks!

>>>  		if (need_resched()) {
>>> -			if (signal_pending(current))
>>> -				break;
>>>  			time_spent += ktime_get_ns() - time_start;
>>> +			preempt_enable();
>>> +			rcu_read_unlock();
>>> +
>>>  			cond_resched();
>>> +
>>> +			rcu_read_lock();
>>> +			preempt_disable();
>>>  			time_start = ktime_get_ns();
>>>  		}
>>>  	}
>>>  	time_spent += ktime_get_ns() - time_start;
>>> +	preempt_enable();
>>> +	rcu_read_unlock();
>>> +
>>>  	do_div(time_spent, repeat);
>>>  	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
>>>  
>>>  	for_each_cgroup_storage_type(stype)
>>>  		bpf_cgroup_storage_free(storage[stype]);
>>>  
>>> -	return 0;
>>> +	return ret;
>>>  }
>>>  
>>>  static int bpf_test_finish(const union bpf_attr *kattr,
>>>
>>
diff mbox series

Patch

diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..e31e1b20f7f4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@ 
 #include <net/sock.h>
 #include <net/tcp.h>
 
-static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
-		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
-	u32 ret;
-
-	preempt_disable();
-	rcu_read_lock();
-	bpf_cgroup_storage_set(storage);
-	ret = BPF_PROG_RUN(prog, ctx);
-	rcu_read_unlock();
-	preempt_enable();
-
-	return ret;
-}
-
-static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
-			u32 *time)
+static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+			u32 *retval, u32 *time)
 {
 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
 	enum bpf_cgroup_storage_type stype;
 	u64 time_start, time_spent = 0;
+	int ret = 0;
 	u32 i;
 
 	for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@  static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
 
 	if (!repeat)
 		repeat = 1;
+
+	rcu_read_lock();
+	preempt_disable();
 	time_start = ktime_get_ns();
 	for (i = 0; i < repeat; i++) {
-		*ret = bpf_test_run_one(prog, ctx, storage);
+		bpf_cgroup_storage_set(storage);
+		*retval = BPF_PROG_RUN(prog, ctx);
+
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
 		if (need_resched()) {
-			if (signal_pending(current))
-				break;
 			time_spent += ktime_get_ns() - time_start;
+			preempt_enable();
+			rcu_read_unlock();
+
 			cond_resched();
+
+			rcu_read_lock();
+			preempt_disable();
 			time_start = ktime_get_ns();
 		}
 	}
 	time_spent += ktime_get_ns() - time_start;
+	preempt_enable();
+	rcu_read_unlock();
+
 	do_div(time_spent, repeat);
 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
 
 	for_each_cgroup_storage_type(stype)
 		bpf_cgroup_storage_free(storage[stype]);
 
-	return 0;
+	return ret;
 }
 
 static int bpf_test_finish(const union bpf_attr *kattr,