diff mbox series

[nf,3/3] xt_hashlimit: limit the max size of hashtable

Message ID 20200131205216.22213-4-xiyou.wangcong@gmail.com
State Awaiting Upstream
Delegated to: David Miller
Headers show
Series netfilter: xt_hashlimit: a few improvements | expand

Commit Message

Cong Wang Jan. 31, 2020, 8:52 p.m. UTC
The user-specified hashtable size is unbound, this could
easily lead to an OOM or a hung task as we hold the global
mutex while allocating and initializing the new hashtable.

The max value is derived from the max value when chosen by
the kernel.

Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
Cc: Pablo Neira Ayuso <pablo@netfilter.org>
Cc: Jozsef Kadlecsik <kadlec@netfilter.org>
Cc: Florian Westphal <fw@strlen.de>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
---
 net/netfilter/xt_hashlimit.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

Comments

Florian Westphal Jan. 31, 2020, 10:08 p.m. UTC | #1
Cong Wang <xiyou.wangcong@gmail.com> wrote:
> The user-specified hashtable size is unbound, this could
> easily lead to an OOM or a hung task as we hold the global
> mutex while allocating and initializing the new hashtable.
> 
> The max value is derived from the max value when chosen by
> the kernel.
> 
> Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
> Cc: Pablo Neira Ayuso <pablo@netfilter.org>
> Cc: Jozsef Kadlecsik <kadlec@netfilter.org>
> Cc: Florian Westphal <fw@strlen.de>
> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
> ---
>  net/netfilter/xt_hashlimit.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
> index 57a2639bcc22..6327134c5886 100644
> --- a/net/netfilter/xt_hashlimit.c
> +++ b/net/netfilter/xt_hashlimit.c
> @@ -272,6 +272,8 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
>  }
>  static void htable_gc(struct work_struct *work);
>  
> +#define HASHLIMIT_MAX_SIZE 8192
> +
>  static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
>  			 const char *name, u_int8_t family,
>  			 struct xt_hashlimit_htable **out_hinfo,
> @@ -290,7 +292,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
>  		size = (nr_pages << PAGE_SHIFT) / 16384 /
>  		       sizeof(struct hlist_head);
>  		if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
> -			size = 8192;
> +			size = HASHLIMIT_MAX_SIZE;
>  		if (size < 16)
>  			size = 16;
>  	}
> @@ -848,6 +850,8 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
>  
>  	if (cfg->gc_interval == 0 || cfg->expire == 0)
>  		return -EINVAL;
> +	if (cfg->size > HASHLIMIT_MAX_SIZE)
> +		return -ENOMEM;

Hmm, won't that break restore of rulesets that have something like

--hashlimit-size 10000?

AFAIU this limits the module to vmalloc requests of only 64kbyte.
I'm not opposed to a limit (or a cap), but 64k seems a bit low to me.
Cong Wang Jan. 31, 2020, 11:16 p.m. UTC | #2
On Fri, Jan 31, 2020 at 2:08 PM Florian Westphal <fw@strlen.de> wrote:
>
> Cong Wang <xiyou.wangcong@gmail.com> wrote:
> > The user-specified hashtable size is unbound, this could
> > easily lead to an OOM or a hung task as we hold the global
> > mutex while allocating and initializing the new hashtable.
> >
> > The max value is derived from the max value when chosen by
> > the kernel.
> >
> > Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
> > Cc: Pablo Neira Ayuso <pablo@netfilter.org>
> > Cc: Jozsef Kadlecsik <kadlec@netfilter.org>
> > Cc: Florian Westphal <fw@strlen.de>
> > Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
> > ---
> >  net/netfilter/xt_hashlimit.c | 6 +++++-
> >  1 file changed, 5 insertions(+), 1 deletion(-)
> >
> > diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
> > index 57a2639bcc22..6327134c5886 100644
> > --- a/net/netfilter/xt_hashlimit.c
> > +++ b/net/netfilter/xt_hashlimit.c
> > @@ -272,6 +272,8 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
> >  }
> >  static void htable_gc(struct work_struct *work);
> >
> > +#define HASHLIMIT_MAX_SIZE 8192
> > +
> >  static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
> >                        const char *name, u_int8_t family,
> >                        struct xt_hashlimit_htable **out_hinfo,
> > @@ -290,7 +292,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
> >               size = (nr_pages << PAGE_SHIFT) / 16384 /
> >                      sizeof(struct hlist_head);
> >               if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
> > -                     size = 8192;
> > +                     size = HASHLIMIT_MAX_SIZE;
> >               if (size < 16)
> >                       size = 16;
> >       }
> > @@ -848,6 +850,8 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
> >
> >       if (cfg->gc_interval == 0 || cfg->expire == 0)
> >               return -EINVAL;
> > +     if (cfg->size > HASHLIMIT_MAX_SIZE)
> > +             return -ENOMEM;
>
> Hmm, won't that break restore of rulesets that have something like
>
> --hashlimit-size 10000?
>
> AFAIU this limits the module to vmalloc requests of only 64kbyte.
> I'm not opposed to a limit (or a cap), but 64k seems a bit low to me.

8192 is from the current code which handles kernel-chosen size
(that is cfg->size==0), I personally have no idea what the max
should be. :)

Please suggest a number.

Thanks.
Florian Westphal Jan. 31, 2020, 11:36 p.m. UTC | #3
Cong Wang <xiyou.wangcong@gmail.com> wrote:
> On Fri, Jan 31, 2020 at 2:08 PM Florian Westphal <fw@strlen.de> wrote:
> >
> > Cong Wang <xiyou.wangcong@gmail.com> wrote:
> > > The user-specified hashtable size is unbound, this could
> > > easily lead to an OOM or a hung task as we hold the global
> > > mutex while allocating and initializing the new hashtable.
> > >
> > > The max value is derived from the max value when chosen by
> > > the kernel.
> > >
> > > Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
> > > Cc: Pablo Neira Ayuso <pablo@netfilter.org>
> > > Cc: Jozsef Kadlecsik <kadlec@netfilter.org>
> > > Cc: Florian Westphal <fw@strlen.de>
> > > Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
> > > ---
> > >  net/netfilter/xt_hashlimit.c | 6 +++++-
> > >  1 file changed, 5 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
> > > index 57a2639bcc22..6327134c5886 100644
> > > --- a/net/netfilter/xt_hashlimit.c
> > > +++ b/net/netfilter/xt_hashlimit.c
> > > @@ -272,6 +272,8 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
> > >  }
> > >  static void htable_gc(struct work_struct *work);
> > >
> > > +#define HASHLIMIT_MAX_SIZE 8192
> > > +
> > >  static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
> > >                        const char *name, u_int8_t family,
> > >                        struct xt_hashlimit_htable **out_hinfo,
> > > @@ -290,7 +292,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
> > >               size = (nr_pages << PAGE_SHIFT) / 16384 /
> > >                      sizeof(struct hlist_head);
> > >               if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
> > > -                     size = 8192;
> > > +                     size = HASHLIMIT_MAX_SIZE;
> > >               if (size < 16)
> > >                       size = 16;
> > >       }
> > > @@ -848,6 +850,8 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
> > >
> > >       if (cfg->gc_interval == 0 || cfg->expire == 0)
> > >               return -EINVAL;
> > > +     if (cfg->size > HASHLIMIT_MAX_SIZE)
> > > +             return -ENOMEM;
> >
> > Hmm, won't that break restore of rulesets that have something like
> >
> > --hashlimit-size 10000?
> >
> > AFAIU this limits the module to vmalloc requests of only 64kbyte.
> > I'm not opposed to a limit (or a cap), but 64k seems a bit low to me.
> 
> 8192 is from the current code which handles kernel-chosen size
> (that is cfg->size==0), I personally have no idea what the max
> should be. :)

Me neither :-/

> Please suggest a number.

O would propose a max alloc size (hard limit) of ~8 MByte of vmalloc
space, or maybe 16 at most.

1048576 max upperlimit -> ~8mbyte vmalloc request -> allows to store
up to 2**23 entries.

In order to prevent breaking userspace, perhaps make it so that the
kernel caps cfg.max at twice that value?  Would allow storing up to
16777216 addresses with an average chain depth of 16 (which is quite
large).  We could increase the max limit in case someone presents a use
case.

What do you think?
Cong Wang Feb. 1, 2020, 2:53 a.m. UTC | #4
On Fri, Jan 31, 2020 at 3:37 PM Florian Westphal <fw@strlen.de> wrote:
> O would propose a max alloc size (hard limit) of ~8 MByte of vmalloc
> space, or maybe 16 at most.
>
> 1048576 max upperlimit -> ~8mbyte vmalloc request -> allows to store
> up to 2**23 entries.

Changing HASHLIMIT_MAX_SIZE to 1048576 seems fine.

>
> In order to prevent breaking userspace, perhaps make it so that the
> kernel caps cfg.max at twice that value?  Would allow storing up to
> 16777216 addresses with an average chain depth of 16 (which is quite
> large).  We could increase the max limit in case someone presents a use
> case.
>

Not sure if I understand this, I don't see how cap'ing cfg->max could
help prevent breaking user-space? Are you suggesting to cap it with
HASHLIMIT_MAX_SIZE too? Something like below?

+       if (cfg->max > 2 * HASHLIMIT_MAX_SIZE)
+               cfg->max = 2 * HASHLIMIT_MAX_SIZE;

Thanks.
Florian Westphal Feb. 2, 2020, 6:16 a.m. UTC | #5
Cong Wang <xiyou.wangcong@gmail.com> wrote:
> > In order to prevent breaking userspace, perhaps make it so that the
> > kernel caps cfg.max at twice that value?  Would allow storing up to
> > 16777216 addresses with an average chain depth of 16 (which is quite
> > large).  We could increase the max limit in case someone presents a use
> > case.
> >
> 
> Not sure if I understand this, I don't see how cap'ing cfg->max could
> help prevent breaking user-space? Are you suggesting to cap it with
> HASHLIMIT_MAX_SIZE too? Something like below?
> 
> +       if (cfg->max > 2 * HASHLIMIT_MAX_SIZE)
> +               cfg->max = 2 * HASHLIMIT_MAX_SIZE;
> 

Yes, thats what I meant, cap the user-provided value to something thats
going to be less of a problem.

But now that I read it, the "2 *" part looks really silly, so I suggst
to go with " > FOO_MAX", else its not a maximum value after all.
Cong Wang Feb. 2, 2020, 6:27 p.m. UTC | #6
On Sat, Feb 1, 2020 at 10:16 PM Florian Westphal <fw@strlen.de> wrote:
>
> Cong Wang <xiyou.wangcong@gmail.com> wrote:
> > > In order to prevent breaking userspace, perhaps make it so that the
> > > kernel caps cfg.max at twice that value?  Would allow storing up to
> > > 16777216 addresses with an average chain depth of 16 (which is quite
> > > large).  We could increase the max limit in case someone presents a use
> > > case.
> > >
> >
> > Not sure if I understand this, I don't see how cap'ing cfg->max could
> > help prevent breaking user-space? Are you suggesting to cap it with
> > HASHLIMIT_MAX_SIZE too? Something like below?
> >
> > +       if (cfg->max > 2 * HASHLIMIT_MAX_SIZE)
> > +               cfg->max = 2 * HASHLIMIT_MAX_SIZE;
> >
>
> Yes, thats what I meant, cap the user-provided value to something thats
> going to be less of a problem.
>
> But now that I read it, the "2 *" part looks really silly, so I suggst
> to go with " > FOO_MAX", else its not a maximum value after all.

Ok, so here is what I have now:


+#define HASHLIMIT_MAX_SIZE 1048576
+
 static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
                                     struct xt_hashlimit_htable **hinfo,
                                     struct hashlimit_cfg3 *cfg,
@@ -847,6 +849,14 @@ static int hashlimit_mt_check_common(const struct
xt_mtchk_param *par,

        if (cfg->gc_interval == 0 || cfg->expire == 0)
                return -EINVAL;
+       if (cfg->size > HASHLIMIT_MAX_SIZE) {
+               cfg->size = HASHLIMIT_MAX_SIZE;
+               pr_info_ratelimited("size too large, truncated to
%u\n", cfg->size);
+       }
+       if (cfg->max > HASHLIMIT_MAX_SIZE) {
+               cfg->max = HASHLIMIT_MAX_SIZE;
+               pr_info_ratelimited("max too large, truncated to
%u\n", cfg->max);
+       }

Please let me know if it is still different with your suggestion.

Thanks!
Florian Westphal Feb. 2, 2020, 10:37 p.m. UTC | #7
Cong Wang <xiyou.wangcong@gmail.com> wrote:
> Ok, so here is what I have now:
> 
> 
> +#define HASHLIMIT_MAX_SIZE 1048576
> +
>  static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
>                                      struct xt_hashlimit_htable **hinfo,
>                                      struct hashlimit_cfg3 *cfg,
> @@ -847,6 +849,14 @@ static int hashlimit_mt_check_common(const struct
> xt_mtchk_param *par,
> 
>         if (cfg->gc_interval == 0 || cfg->expire == 0)
>                 return -EINVAL;
> +       if (cfg->size > HASHLIMIT_MAX_SIZE) {
> +               cfg->size = HASHLIMIT_MAX_SIZE;
> +               pr_info_ratelimited("size too large, truncated to
> %u\n", cfg->size);
> +       }
> +       if (cfg->max > HASHLIMIT_MAX_SIZE) {
> +               cfg->max = HASHLIMIT_MAX_SIZE;
> +               pr_info_ratelimited("max too large, truncated to
> %u\n", cfg->max);
> +       }
> 
> Please let me know if it is still different with your suggestion.

I am fine with this.
diff mbox series

Patch

diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 57a2639bcc22..6327134c5886 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -272,6 +272,8 @@  dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
 }
 static void htable_gc(struct work_struct *work);
 
+#define HASHLIMIT_MAX_SIZE 8192
+
 static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
 			 const char *name, u_int8_t family,
 			 struct xt_hashlimit_htable **out_hinfo,
@@ -290,7 +292,7 @@  static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
 		size = (nr_pages << PAGE_SHIFT) / 16384 /
 		       sizeof(struct hlist_head);
 		if (nr_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
-			size = 8192;
+			size = HASHLIMIT_MAX_SIZE;
 		if (size < 16)
 			size = 16;
 	}
@@ -848,6 +850,8 @@  static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
 
 	if (cfg->gc_interval == 0 || cfg->expire == 0)
 		return -EINVAL;
+	if (cfg->size > HASHLIMIT_MAX_SIZE)
+		return -ENOMEM;
 	if (par->family == NFPROTO_IPV4) {
 		if (cfg->srcmask > 32 || cfg->dstmask > 32)
 			return -EINVAL;