[v4,1/4] lib: introduce copy_struct_from_user() helper
diff mbox series

Message ID 20191001011055.19283-2-cyphar@cyphar.com
State New
Headers show
Series
  • lib: introduce copy_struct_from_user() helper
Related show

Commit Message

Aleksa Sarai Oct. 1, 2019, 1:10 a.m. UTC
A common pattern for syscall extensions is increasing the size of a
struct passed from userspace, such that the zero-value of the new fields
result in the old kernel behaviour (allowing for a mix of userspace and
kernel vintages to operate on one another in most cases).

While this interface exists for communication in both directions, only
one interface is straightforward to have reasonable semantics for
(userspace passing a struct to the kernel). For kernel returns to
userspace, what the correct semantics are (whether there should be an
error if userspace is unaware of a new extension) is very
syscall-dependent and thus probably cannot be unified between syscalls
(a good example of this problem is [1]).

Previously there was no common lib/ function that implemented
the necessary extension-checking semantics (and different syscalls
implemented them slightly differently or incompletely[2]). Future
patches replace common uses of this pattern to make use of
copy_struct_from_user().

Some in-kernel selftests that insure that the handling of alignment and
various byte patterns are all handled identically to memchr_inv() usage.

[1]: commit 1251201c0d34 ("sched/core: Fix uclamp ABI bug, clean up and
     robustify sched_read_attr() ABI logic and code")

[2]: For instance {sched_setattr,perf_event_open,clone3}(2) all do do
     similar checks to copy_struct_from_user() while rt_sigprocmask(2)
     always rejects differently-sized struct arguments.

Suggested-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
 include/linux/bitops.h  |   7 +++
 include/linux/uaccess.h |  70 +++++++++++++++++++++
 lib/strnlen_user.c      |   8 +--
 lib/test_user_copy.c    | 136 ++++++++++++++++++++++++++++++++++++++--
 lib/usercopy.c          |  55 ++++++++++++++++
 5 files changed, 263 insertions(+), 13 deletions(-)

Comments

Kees Cook Oct. 1, 2019, 1:58 a.m. UTC | #1
On Tue, Oct 01, 2019 at 11:10:52AM +1000, Aleksa Sarai wrote:
> A common pattern for syscall extensions is increasing the size of a
> struct passed from userspace, such that the zero-value of the new fields
> result in the old kernel behaviour (allowing for a mix of userspace and
> kernel vintages to operate on one another in most cases).
> 
> While this interface exists for communication in both directions, only
> one interface is straightforward to have reasonable semantics for
> (userspace passing a struct to the kernel). For kernel returns to
> userspace, what the correct semantics are (whether there should be an
> error if userspace is unaware of a new extension) is very
> syscall-dependent and thus probably cannot be unified between syscalls
> (a good example of this problem is [1]).
> 
> Previously there was no common lib/ function that implemented
> the necessary extension-checking semantics (and different syscalls
> implemented them slightly differently or incompletely[2]). Future
> patches replace common uses of this pattern to make use of
> copy_struct_from_user().
> 
> Some in-kernel selftests that insure that the handling of alignment and
> various byte patterns are all handled identically to memchr_inv() usage.
> 
> [1]: commit 1251201c0d34 ("sched/core: Fix uclamp ABI bug, clean up and
>      robustify sched_read_attr() ABI logic and code")
> 
> [2]: For instance {sched_setattr,perf_event_open,clone3}(2) all do do
>      similar checks to copy_struct_from_user() while rt_sigprocmask(2)
>      always rejects differently-sized struct arguments.
> 
> Suggested-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
> Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
> ---
>  include/linux/bitops.h  |   7 +++
>  include/linux/uaccess.h |  70 +++++++++++++++++++++
>  lib/strnlen_user.c      |   8 +--
>  lib/test_user_copy.c    | 136 ++++++++++++++++++++++++++++++++++++++--
>  lib/usercopy.c          |  55 ++++++++++++++++
>  5 files changed, 263 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/bitops.h b/include/linux/bitops.h
> index cf074bce3eb3..c94a9ff9f082 100644
> --- a/include/linux/bitops.h
> +++ b/include/linux/bitops.h
> @@ -4,6 +4,13 @@
>  #include <asm/types.h>
>  #include <linux/bits.h>
>  
> +/* Set bits in the first 'n' bytes when loaded from memory */
> +#ifdef __LITTLE_ENDIAN
> +#  define aligned_byte_mask(n) ((1UL << 8*(n))-1)
> +#else
> +#  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
> +#endif
> +
>  #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
>  #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
>  
> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
> index 70bbdc38dc37..8abbc713f7fb 100644
> --- a/include/linux/uaccess.h
> +++ b/include/linux/uaccess.h
> @@ -231,6 +231,76 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
>  
>  #endif		/* ARCH_HAS_NOCACHE_UACCESS */
>  
> +extern int check_zeroed_user(const void __user *from, size_t size);
> +
> +/**
> + * copy_struct_from_user: copy a struct from userspace
> + * @dst:   Destination address, in kernel space. This buffer must be @ksize
> + *         bytes long.
> + * @ksize: Size of @dst struct.
> + * @src:   Source address, in userspace.
> + * @usize: (Alleged) size of @src struct.
> + *
> + * Copies a struct from userspace to kernel space, in a way that guarantees
> + * backwards-compatibility for struct syscall arguments (as long as future
> + * struct extensions are made such that all new fields are *appended* to the
> + * old struct, and zeroed-out new fields have the same meaning as the old
> + * struct).
> + *
> + * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
> + * The recommended usage is something like the following:
> + *
> + *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
> + *   {
> + *      int err;
> + *      struct foo karg = {};
> + *
> + *      if (usize > PAGE_SIZE)
> + *        return -E2BIG;
> + *      if (usize < FOO_SIZE_VER0)
> + *        return -EINVAL;
> + *
> + *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
> + *      if (err)
> + *        return err;
> + *
> + *      // ...
> + *   }
> + *
> + * There are three cases to consider:
> + *  * If @usize == @ksize, then it's copied verbatim.
> + *  * If @usize < @ksize, then the userspace has passed an old struct to a
> + *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
> + *    are to be zero-filled.
> + *  * If @usize > @ksize, then the userspace has passed a new struct to an
> + *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
> + *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
> + *
> + * Returns (in all cases, some data may have been copied):
> + *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
> + *  * -EFAULT: access to userspace failed.
> + */
> +static __always_inline
> +int copy_struct_from_user(void *dst, size_t ksize,
> +			  const void __user *src, size_t usize)

And of course I forgot to realize both this and check_zeroed_user()
should also have the __must_check attribute. Sorry for forgetting that
earlier!

With that, please consider it:

Reviewed-by: Kees Cook <keescook@chromium.org>

Thanks for working on this!
Christian Brauner Oct. 1, 2019, 2:31 a.m. UTC | #2
On Mon, Sep 30, 2019 at 06:58:39PM -0700, Kees Cook wrote:
> On Tue, Oct 01, 2019 at 11:10:52AM +1000, Aleksa Sarai wrote:
> > A common pattern for syscall extensions is increasing the size of a
> > struct passed from userspace, such that the zero-value of the new fields
> > result in the old kernel behaviour (allowing for a mix of userspace and
> > kernel vintages to operate on one another in most cases).
> > 
> > While this interface exists for communication in both directions, only
> > one interface is straightforward to have reasonable semantics for
> > (userspace passing a struct to the kernel). For kernel returns to
> > userspace, what the correct semantics are (whether there should be an
> > error if userspace is unaware of a new extension) is very
> > syscall-dependent and thus probably cannot be unified between syscalls
> > (a good example of this problem is [1]).
> > 
> > Previously there was no common lib/ function that implemented
> > the necessary extension-checking semantics (and different syscalls
> > implemented them slightly differently or incompletely[2]). Future
> > patches replace common uses of this pattern to make use of
> > copy_struct_from_user().
> > 
> > Some in-kernel selftests that insure that the handling of alignment and
> > various byte patterns are all handled identically to memchr_inv() usage.
> > 
> > [1]: commit 1251201c0d34 ("sched/core: Fix uclamp ABI bug, clean up and
> >      robustify sched_read_attr() ABI logic and code")
> > 
> > [2]: For instance {sched_setattr,perf_event_open,clone3}(2) all do do
> >      similar checks to copy_struct_from_user() while rt_sigprocmask(2)
> >      always rejects differently-sized struct arguments.
> > 
> > Suggested-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
> > Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
> > ---
> >  include/linux/bitops.h  |   7 +++
> >  include/linux/uaccess.h |  70 +++++++++++++++++++++
> >  lib/strnlen_user.c      |   8 +--
> >  lib/test_user_copy.c    | 136 ++++++++++++++++++++++++++++++++++++++--
> >  lib/usercopy.c          |  55 ++++++++++++++++
> >  5 files changed, 263 insertions(+), 13 deletions(-)
> > 
> > diff --git a/include/linux/bitops.h b/include/linux/bitops.h
> > index cf074bce3eb3..c94a9ff9f082 100644
> > --- a/include/linux/bitops.h
> > +++ b/include/linux/bitops.h
> > @@ -4,6 +4,13 @@
> >  #include <asm/types.h>
> >  #include <linux/bits.h>
> >  
> > +/* Set bits in the first 'n' bytes when loaded from memory */
> > +#ifdef __LITTLE_ENDIAN
> > +#  define aligned_byte_mask(n) ((1UL << 8*(n))-1)
> > +#else
> > +#  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
> > +#endif
> > +
> >  #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
> >  #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
> >  
> > diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
> > index 70bbdc38dc37..8abbc713f7fb 100644
> > --- a/include/linux/uaccess.h
> > +++ b/include/linux/uaccess.h
> > @@ -231,6 +231,76 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
> >  
> >  #endif		/* ARCH_HAS_NOCACHE_UACCESS */
> >  
> > +extern int check_zeroed_user(const void __user *from, size_t size);
> > +
> > +/**
> > + * copy_struct_from_user: copy a struct from userspace
> > + * @dst:   Destination address, in kernel space. This buffer must be @ksize
> > + *         bytes long.
> > + * @ksize: Size of @dst struct.
> > + * @src:   Source address, in userspace.
> > + * @usize: (Alleged) size of @src struct.
> > + *
> > + * Copies a struct from userspace to kernel space, in a way that guarantees
> > + * backwards-compatibility for struct syscall arguments (as long as future
> > + * struct extensions are made such that all new fields are *appended* to the
> > + * old struct, and zeroed-out new fields have the same meaning as the old
> > + * struct).
> > + *
> > + * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
> > + * The recommended usage is something like the following:
> > + *
> > + *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
> > + *   {
> > + *      int err;
> > + *      struct foo karg = {};
> > + *
> > + *      if (usize > PAGE_SIZE)
> > + *        return -E2BIG;
> > + *      if (usize < FOO_SIZE_VER0)
> > + *        return -EINVAL;
> > + *
> > + *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
> > + *      if (err)
> > + *        return err;
> > + *
> > + *      // ...
> > + *   }
> > + *
> > + * There are three cases to consider:
> > + *  * If @usize == @ksize, then it's copied verbatim.
> > + *  * If @usize < @ksize, then the userspace has passed an old struct to a
> > + *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
> > + *    are to be zero-filled.
> > + *  * If @usize > @ksize, then the userspace has passed a new struct to an
> > + *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
> > + *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
> > + *
> > + * Returns (in all cases, some data may have been copied):
> > + *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
> > + *  * -EFAULT: access to userspace failed.
> > + */
> > +static __always_inline
> > +int copy_struct_from_user(void *dst, size_t ksize,
> > +			  const void __user *src, size_t usize)
> 
> And of course I forgot to realize both this and check_zeroed_user()
> should also have the __must_check attribute. Sorry for forgetting that
> earlier!

Just said to Aleksa that I'll just fix this up when I apply so he
doesn't have to resend. You ok with this, Kees?

> 
> With that, please consider it:
> 
> Reviewed-by: Kees Cook <keescook@chromium.org>

Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
Kees Cook Oct. 1, 2019, 4:28 p.m. UTC | #3
On Tue, Oct 01, 2019 at 04:31:27AM +0200, Christian Brauner wrote:
> On Mon, Sep 30, 2019 at 06:58:39PM -0700, Kees Cook wrote:
> > On Tue, Oct 01, 2019 at 11:10:52AM +1000, Aleksa Sarai wrote:
> > > +static __always_inline
> > > +int copy_struct_from_user(void *dst, size_t ksize,
> > > +			  const void __user *src, size_t usize)
> > 
> > And of course I forgot to realize both this and check_zeroed_user()
> > should also have the __must_check attribute. Sorry for forgetting that
> > earlier!
> 
> Just said to Aleksa that I'll just fix this up when I apply so he
> doesn't have to resend. You ok with this, Kees?

Yup; that's totally fine. Thanks!
Michael Ellerman Oct. 10, 2019, 11:19 a.m. UTC | #4
Hi Aleksa,

Aleksa Sarai <cyphar@cyphar.com> writes:
> A common pattern for syscall extensions is increasing the size of a
> struct passed from userspace, such that the zero-value of the new fields
> result in the old kernel behaviour (allowing for a mix of userspace and
> kernel vintages to operate on one another in most cases).
>
> While this interface exists for communication in both directions, only
> one interface is straightforward to have reasonable semantics for
> (userspace passing a struct to the kernel). For kernel returns to
> userspace, what the correct semantics are (whether there should be an
> error if userspace is unaware of a new extension) is very
> syscall-dependent and thus probably cannot be unified between syscalls
> (a good example of this problem is [1]).
>
> Previously there was no common lib/ function that implemented
> the necessary extension-checking semantics (and different syscalls
> implemented them slightly differently or incompletely[2]). Future
> patches replace common uses of this pattern to make use of
> copy_struct_from_user().
>
> Some in-kernel selftests that insure that the handling of alignment and
> various byte patterns are all handled identically to memchr_inv() usage.
...
> diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
> index 67bcd5dfd847..950ee88cd6ac 100644
> --- a/lib/test_user_copy.c
> +++ b/lib/test_user_copy.c
> @@ -31,14 +31,133 @@
...
> +static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
> +{
> +	int ret = 0;
> +	size_t start, end, i;
> +	size_t zero_start = size / 4;
> +	size_t zero_end = size - zero_start;
> +
> +	/*
> +	 * We conduct a series of check_nonzero_user() tests on a block of memory
> +	 * with the following byte-pattern (trying every possible [start,end]
> +	 * pair):
> +	 *
> +	 *   [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
> +	 *
> +	 * And we verify that check_nonzero_user() acts identically to memchr_inv().
> +	 */
> +
> +	memset(kmem, 0x0, size);
> +	for (i = 1; i < zero_start; i += 2)
> +		kmem[i] = 0xff;
> +	for (i = zero_end; i < size; i += 2)
> +		kmem[i] = 0xff;
> +
> +	ret |= test(copy_to_user(umem, kmem, size),
> +		    "legitimate copy_to_user failed");
> +
> +	for (start = 0; start <= size; start++) {
> +		for (end = start; end <= size; end++) {
> +			size_t len = end - start;
> +			int retval = check_zeroed_user(umem + start, len);
> +			int expected = is_zeroed(kmem + start, len);
> +
> +			ret |= test(retval != expected,
> +				    "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
> +				    retval, expected, start, end);
> +		}
> +	}

This is causing soft lockups for me on powerpc, eg:

  [  188.208315] watchdog: BUG: soft lockup - CPU#4 stuck for 22s! [modprobe:611]
  [  188.208782] Modules linked in: test_user_copy(+) vmx_crypto gf128mul crc32c_vpmsum virtio_balloon ip_tables x_tables autofs4
  [  188.209594] CPU: 4 PID: 611 Comm: modprobe Tainted: G             L    5.4.0-rc1-gcc-8.2.0-00001-gf5a1a536fa14-dirty #1151
  [  188.210392] NIP:  c000000000173650 LR: c000000000379cb0 CTR: c0000000007b20d0
  [  188.210612] REGS: c0000000ec213560 TRAP: 0901   Tainted: G             L     (5.4.0-rc1-gcc-8.2.0-00001-gf5a1a536fa14-dirty)
  [  188.210876] MSR:  8000000000009033 <SF,EE,ME,IR,DR,RI,LE>  CR: 28222422  XER: 20000000
  [  188.211060] CFAR: c000000000379cac IRQMASK: 0 
  [  188.211060] GPR00: c000000000379cb0 c0000000ec2137f0 c0000000013bbb00 c000000000f527f0 
  [  188.211060] GPR04: 000000000000004b 0000000000000000 00000000000085f5 c00000000fffb780 
  [  188.211060] GPR08: 0000000000000000 0000000000000000 c0000000fb9a3080 c008000000411478 
  [  188.211060] GPR12: c0000000007b20d0 c00000000fffb780 
  [  188.211802] NIP [c000000000173650] __might_sleep+0x20/0xc0
  [  188.211924] LR [c000000000379cb0] __might_fault+0x40/0x60
  [  188.212037] Call Trace:
  [  188.212101] [c0000000ec2137f0] [c0000000001b99b4] vprintk_func+0xc4/0x230 (unreliable)
  [  188.212274] [c0000000ec213810] [c0000000007b21fc] check_zeroed_user+0x12c/0x200
  [  188.212478] [c0000000ec213860] [c0080000004106cc] test_user_copy_init+0x67c/0x1210 [test_user_copy]
  [  188.212681] [c0000000ec2139a0] [c000000000010440] do_one_initcall+0x60/0x340
  [  188.212859] [c0000000ec213a70] [c000000000213d4c] do_init_module+0x7c/0x2f0
  [  188.213004] [c0000000ec213b00] [c000000000216f24] load_module+0x2d94/0x30e0
  [  188.213150] [c0000000ec213d00] [c000000000217578] __do_sys_finit_module+0xc8/0x150
  [  188.213350] [c0000000ec213e20] [c00000000000b5d8] system_call+0x5c/0x68
  [  188.213494] Instruction dump:
  [  188.213587] 409efec0 4e800020 60000000 60000000 3c4c0125 384284d0 7c0802a6 60000000 
  [  188.213767] fba1ffe8 fbc1fff0 fbe1fff8 7c9e2378 <f821ff81> 7c7f1b78 7cbd2b78 e94d0958 


I think it's partly because I have DEBUG_ATOMIC_SLEEP enabled, which
means each unsafe_get_user() calls __might_fault() etc.

But even turning that off, it still takes forever.

> @@ -106,6 +225,11 @@ static int __init test_user_copy_init(void)
>  #endif
>  #undef test_legit
>  
> +	/* Test usage of check_nonzero_user(). */
> +	ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);

I suspect it's just that PAGE_SIZE for me is 64K, and so the nested loop
above gets too big too fast.

If my math is right it's doing about 500 million iterations, vs ~2
million on a 4K kernel.

If I do the change below the entire test_user_copy module loads and runs
all the tests in about 10 seconds.

diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 950ee88cd6ac..03b617a36144 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -226,7 +226,7 @@ static int __init test_user_copy_init(void)
 #undef test_legit
 
        /* Test usage of check_nonzero_user(). */
-       ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
+       ret |= test_check_nonzero_user(kmem, usermem, 2 * 4096);
        /* Test usage of copy_struct_from_user(). */
        ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
 

How long does it take on your systems? Is 10s in the ball park, or is
there something else pathological happening on my machine, and shrinking
it to 4096 is just papering over it?

cheers
Aleksa Sarai Oct. 10, 2019, 11:40 a.m. UTC | #5
On 2019-10-10, Michael Ellerman <mpe@ellerman.id.au> wrote:
> Aleksa Sarai <cyphar@cyphar.com> writes:
> > A common pattern for syscall extensions is increasing the size of a
> > struct passed from userspace, such that the zero-value of the new fields
> > result in the old kernel behaviour (allowing for a mix of userspace and
> > kernel vintages to operate on one another in most cases).
> >
> > While this interface exists for communication in both directions, only
> > one interface is straightforward to have reasonable semantics for
> > (userspace passing a struct to the kernel). For kernel returns to
> > userspace, what the correct semantics are (whether there should be an
> > error if userspace is unaware of a new extension) is very
> > syscall-dependent and thus probably cannot be unified between syscalls
> > (a good example of this problem is [1]).
> >
> > Previously there was no common lib/ function that implemented
> > the necessary extension-checking semantics (and different syscalls
> > implemented them slightly differently or incompletely[2]). Future
> > patches replace common uses of this pattern to make use of
> > copy_struct_from_user().
> >
> > Some in-kernel selftests that insure that the handling of alignment and
> > various byte patterns are all handled identically to memchr_inv() usage.
> ...
> > diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
> > index 67bcd5dfd847..950ee88cd6ac 100644
> > --- a/lib/test_user_copy.c
> > +++ b/lib/test_user_copy.c
> > @@ -31,14 +31,133 @@
> ...
> > +static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
> > +{
> > +	int ret = 0;
> > +	size_t start, end, i;
> > +	size_t zero_start = size / 4;
> > +	size_t zero_end = size - zero_start;
> > +
> > +	/*
> > +	 * We conduct a series of check_nonzero_user() tests on a block of memory
> > +	 * with the following byte-pattern (trying every possible [start,end]
> > +	 * pair):
> > +	 *
> > +	 *   [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
> > +	 *
> > +	 * And we verify that check_nonzero_user() acts identically to memchr_inv().
> > +	 */
> > +
> > +	memset(kmem, 0x0, size);
> > +	for (i = 1; i < zero_start; i += 2)
> > +		kmem[i] = 0xff;
> > +	for (i = zero_end; i < size; i += 2)
> > +		kmem[i] = 0xff;
> > +
> > +	ret |= test(copy_to_user(umem, kmem, size),
> > +		    "legitimate copy_to_user failed");
> > +
> > +	for (start = 0; start <= size; start++) {
> > +		for (end = start; end <= size; end++) {
> > +			size_t len = end - start;
> > +			int retval = check_zeroed_user(umem + start, len);
> > +			int expected = is_zeroed(kmem + start, len);
> > +
> > +			ret |= test(retval != expected,
> > +				    "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
> > +				    retval, expected, start, end);
> > +		}
> > +	}
> 
> This is causing soft lockups for me on powerpc, eg:
> 
>   [  188.208315] watchdog: BUG: soft lockup - CPU#4 stuck for 22s! [modprobe:611]
>   [  188.208782] Modules linked in: test_user_copy(+) vmx_crypto gf128mul crc32c_vpmsum virtio_balloon ip_tables x_tables autofs4
>   [  188.209594] CPU: 4 PID: 611 Comm: modprobe Tainted: G             L    5.4.0-rc1-gcc-8.2.0-00001-gf5a1a536fa14-dirty #1151
>   [  188.210392] NIP:  c000000000173650 LR: c000000000379cb0 CTR: c0000000007b20d0
>   [  188.210612] REGS: c0000000ec213560 TRAP: 0901   Tainted: G             L     (5.4.0-rc1-gcc-8.2.0-00001-gf5a1a536fa14-dirty)
>   [  188.210876] MSR:  8000000000009033 <SF,EE,ME,IR,DR,RI,LE>  CR: 28222422  XER: 20000000
>   [  188.211060] CFAR: c000000000379cac IRQMASK: 0 
>   [  188.211060] GPR00: c000000000379cb0 c0000000ec2137f0 c0000000013bbb00 c000000000f527f0 
>   [  188.211060] GPR04: 000000000000004b 0000000000000000 00000000000085f5 c00000000fffb780 
>   [  188.211060] GPR08: 0000000000000000 0000000000000000 c0000000fb9a3080 c008000000411478 
>   [  188.211060] GPR12: c0000000007b20d0 c00000000fffb780 
>   [  188.211802] NIP [c000000000173650] __might_sleep+0x20/0xc0
>   [  188.211924] LR [c000000000379cb0] __might_fault+0x40/0x60
>   [  188.212037] Call Trace:
>   [  188.212101] [c0000000ec2137f0] [c0000000001b99b4] vprintk_func+0xc4/0x230 (unreliable)
>   [  188.212274] [c0000000ec213810] [c0000000007b21fc] check_zeroed_user+0x12c/0x200
>   [  188.212478] [c0000000ec213860] [c0080000004106cc] test_user_copy_init+0x67c/0x1210 [test_user_copy]
>   [  188.212681] [c0000000ec2139a0] [c000000000010440] do_one_initcall+0x60/0x340
>   [  188.212859] [c0000000ec213a70] [c000000000213d4c] do_init_module+0x7c/0x2f0
>   [  188.213004] [c0000000ec213b00] [c000000000216f24] load_module+0x2d94/0x30e0
>   [  188.213150] [c0000000ec213d00] [c000000000217578] __do_sys_finit_module+0xc8/0x150
>   [  188.213350] [c0000000ec213e20] [c00000000000b5d8] system_call+0x5c/0x68
>   [  188.213494] Instruction dump:
>   [  188.213587] 409efec0 4e800020 60000000 60000000 3c4c0125 384284d0 7c0802a6 60000000 
>   [  188.213767] fba1ffe8 fbc1fff0 fbe1fff8 7c9e2378 <f821ff81> 7c7f1b78 7cbd2b78 e94d0958 
> 
> 
> I think it's partly because I have DEBUG_ATOMIC_SLEEP enabled, which
> means each unsafe_get_user() calls __might_fault() etc.
> 
> But even turning that off, it still takes forever.
> 
> > @@ -106,6 +225,11 @@ static int __init test_user_copy_init(void)
> >  #endif
> >  #undef test_legit
> >  
> > +	/* Test usage of check_nonzero_user(). */
> > +	ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
> 
> I suspect it's just that PAGE_SIZE for me is 64K, and so the nested loop
> above gets too big too fast.
> 
> If my math is right it's doing about 500 million iterations, vs ~2
> million on a 4K kernel.
> 
> If I do the change below the entire test_user_copy module loads and runs
> all the tests in about 10 seconds.
> 
> diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
> index 950ee88cd6ac..03b617a36144 100644
> --- a/lib/test_user_copy.c
> +++ b/lib/test_user_copy.c
> @@ -226,7 +226,7 @@ static int __init test_user_copy_init(void)
>  #undef test_legit
>  
>         /* Test usage of check_nonzero_user(). */
> -       ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
> +       ret |= test_check_nonzero_user(kmem, usermem, 2 * 4096);
>         /* Test usage of copy_struct_from_user(). */
>         ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
>  
> 
> How long does it take on your systems? Is 10s in the ball park, or is
> there something else pathological happening on my machine, and shrinking
> it to 4096 is just papering over it?

Yeah, it takes about 5-10s on my laptop. We could switch it to just
everything within a 4K block, but the main reason for testing with
2*PAGE_SIZE is to make sure that check_nonzero_user() works across page
boundaries. Though we could only do check_nonzero_user() in the region
of the page boundary (maybe i E (PAGE_SIZE-512,PAGE_SIZE+512]?)

Making a single test run for ~40min doesn't seem like that good of an
idea in retrospect. :P
Kees Cook Oct. 10, 2019, 4:43 p.m. UTC | #6
On Thu, Oct 10, 2019 at 10:40:07PM +1100, Aleksa Sarai wrote:
> Yeah, it takes about 5-10s on my laptop. We could switch it to just
> everything within a 4K block, but the main reason for testing with
> 2*PAGE_SIZE is to make sure that check_nonzero_user() works across page
> boundaries. Though we could only do check_nonzero_user() in the region
> of the page boundary (maybe i E (PAGE_SIZE-512,PAGE_SIZE+512]?)

Yeah, I like this idea: just poke at the specific edge-case.

Patch
diff mbox series

diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cf074bce3eb3..c94a9ff9f082 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,6 +4,13 @@ 
 #include <asm/types.h>
 #include <linux/bits.h>
 
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+#  define aligned_byte_mask(n) ((1UL << 8*(n))-1)
+#else
+#  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
 #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
 
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 70bbdc38dc37..8abbc713f7fb 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -231,6 +231,76 @@  __copy_from_user_inatomic_nocache(void *to, const void __user *from,
 
 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
 
+extern int check_zeroed_user(const void __user *from, size_t size);
+
+/**
+ * copy_struct_from_user: copy a struct from userspace
+ * @dst:   Destination address, in kernel space. This buffer must be @ksize
+ *         bytes long.
+ * @ksize: Size of @dst struct.
+ * @src:   Source address, in userspace.
+ * @usize: (Alleged) size of @src struct.
+ *
+ * Copies a struct from userspace to kernel space, in a way that guarantees
+ * backwards-compatibility for struct syscall arguments (as long as future
+ * struct extensions are made such that all new fields are *appended* to the
+ * old struct, and zeroed-out new fields have the same meaning as the old
+ * struct).
+ *
+ * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
+ * The recommended usage is something like the following:
+ *
+ *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
+ *   {
+ *      int err;
+ *      struct foo karg = {};
+ *
+ *      if (usize > PAGE_SIZE)
+ *        return -E2BIG;
+ *      if (usize < FOO_SIZE_VER0)
+ *        return -EINVAL;
+ *
+ *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
+ *      if (err)
+ *        return err;
+ *
+ *      // ...
+ *   }
+ *
+ * There are three cases to consider:
+ *  * If @usize == @ksize, then it's copied verbatim.
+ *  * If @usize < @ksize, then the userspace has passed an old struct to a
+ *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
+ *    are to be zero-filled.
+ *  * If @usize > @ksize, then the userspace has passed a new struct to an
+ *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
+ *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
+ *
+ * Returns (in all cases, some data may have been copied):
+ *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
+ *  * -EFAULT: access to userspace failed.
+ */
+static __always_inline
+int copy_struct_from_user(void *dst, size_t ksize,
+			  const void __user *src, size_t usize)
+{
+	size_t size = min(ksize, usize);
+	size_t rest = max(ksize, usize) - size;
+
+	/* Deal with trailing bytes. */
+	if (usize < ksize) {
+		memset(dst + size, 0, rest);
+	} else if (usize > ksize) {
+		int ret = check_zeroed_user(src + size, rest);
+		if (ret <= 0)
+			return ret ?: -E2BIG;
+	}
+	/* Copy the interoperable parts of the struct. */
+	if (copy_from_user(dst, src, size))
+		return -EFAULT;
+	return 0;
+}
+
 /*
  * probe_kernel_read(): safely attempt to read from a location
  * @dst: pointer to the buffer that shall take the data
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 28ff554a1be8..6c0005d5dd5c 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -3,16 +3,10 @@ 
 #include <linux/export.h>
 #include <linux/uaccess.h>
 #include <linux/mm.h>
+#include <linux/bitops.h>
 
 #include <asm/word-at-a-time.h>
 
-/* Set bits in the first 'n' bytes when loaded from memory */
-#ifdef __LITTLE_ENDIAN
-#  define aligned_byte_mask(n) ((1ul << 8*(n))-1)
-#else
-#  define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
-#endif
-
 /*
  * Do a strnlen, return length of string *with* final '\0'.
  * 'count' is the user-supplied count, while 'max' is the
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 67bcd5dfd847..950ee88cd6ac 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,14 +31,133 @@ 
 # define TEST_U64
 #endif
 
-#define test(condition, msg)		\
-({					\
-	int cond = (condition);		\
-	if (cond)			\
-		pr_warn("%s\n", msg);	\
-	cond;				\
+#define test(condition, msg, ...)					\
+({									\
+	int cond = (condition);						\
+	if (cond)							\
+		pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__);	\
+	cond;								\
 })
 
+static bool is_zeroed(void *from, size_t size)
+{
+	return memchr_inv(from, 0x0, size) == NULL;
+}
+
+static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
+{
+	int ret = 0;
+	size_t start, end, i;
+	size_t zero_start = size / 4;
+	size_t zero_end = size - zero_start;
+
+	/*
+	 * We conduct a series of check_nonzero_user() tests on a block of memory
+	 * with the following byte-pattern (trying every possible [start,end]
+	 * pair):
+	 *
+	 *   [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+	 *
+	 * And we verify that check_nonzero_user() acts identically to memchr_inv().
+	 */
+
+	memset(kmem, 0x0, size);
+	for (i = 1; i < zero_start; i += 2)
+		kmem[i] = 0xff;
+	for (i = zero_end; i < size; i += 2)
+		kmem[i] = 0xff;
+
+	ret |= test(copy_to_user(umem, kmem, size),
+		    "legitimate copy_to_user failed");
+
+	for (start = 0; start <= size; start++) {
+		for (end = start; end <= size; end++) {
+			size_t len = end - start;
+			int retval = check_zeroed_user(umem + start, len);
+			int expected = is_zeroed(kmem + start, len);
+
+			ret |= test(retval != expected,
+				    "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+				    retval, expected, start, end);
+		}
+	}
+
+	return ret;
+}
+
+static int test_copy_struct_from_user(char *kmem, char __user *umem,
+				      size_t size)
+{
+	int ret = 0;
+	char *umem_src = NULL, *expected = NULL;
+	size_t ksize, usize;
+
+	umem_src = kmalloc(size, GFP_KERNEL);
+	if (ret |= test(umem_src == NULL, "kmalloc failed"))
+		goto out_free;
+
+	expected = kmalloc(size, GFP_KERNEL);
+	if (ret |= test(expected == NULL, "kmalloc failed"))
+		goto out_free;
+
+	/* Fill umem with a fixed byte pattern. */
+	memset(umem_src, 0x3e, size);
+	ret |= test(copy_to_user(umem, umem_src, size),
+		    "legitimate copy_to_user failed");
+
+	/* Check basic case -- (usize == ksize). */
+	ksize = size;
+	usize = size;
+
+	memcpy(expected, umem_src, ksize);
+
+	memset(kmem, 0x0, size);
+	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+		    "copy_struct_from_user(usize == ksize) failed");
+	ret |= test(memcmp(kmem, expected, ksize),
+		    "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+	/* Old userspace case -- (usize < ksize). */
+	ksize = size;
+	usize = size / 2;
+
+	memcpy(expected, umem_src, usize);
+	memset(expected + usize, 0x0, ksize - usize);
+
+	memset(kmem, 0x0, size);
+	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+		    "copy_struct_from_user(usize < ksize) failed");
+	ret |= test(memcmp(kmem, expected, ksize),
+		    "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+	/* New userspace (-E2BIG) case -- (usize > ksize). */
+	ksize = size / 2;
+	usize = size;
+
+	memset(kmem, 0x0, size);
+	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
+		    "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+	/* New userspace (success) case -- (usize > ksize). */
+	ksize = size / 2;
+	usize = size;
+
+	memcpy(expected, umem_src, ksize);
+	ret |= test(clear_user(umem + ksize, usize - ksize),
+		    "legitimate clear_user failed");
+
+	memset(kmem, 0x0, size);
+	ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+		    "copy_struct_from_user(usize > ksize) failed");
+	ret |= test(memcmp(kmem, expected, ksize),
+		    "copy_struct_from_user(usize > ksize) gives unexpected copy");
+
+out_free:
+	kfree(expected);
+	kfree(umem_src);
+	return ret;
+}
+
 static int __init test_user_copy_init(void)
 {
 	int ret = 0;
@@ -106,6 +225,11 @@  static int __init test_user_copy_init(void)
 #endif
 #undef test_legit
 
+	/* Test usage of check_nonzero_user(). */
+	ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
+	/* Test usage of copy_struct_from_user(). */
+	ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
+
 	/*
 	 * Invalid usage: none of these copies should succeed.
 	 */
diff --git a/lib/usercopy.c b/lib/usercopy.c
index c2bfbcaeb3dc..cbb4d9ec00f2 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,6 @@ 
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/uaccess.h>
+#include <linux/bitops.h>
 
 /* out-of-line parts */
 
@@ -31,3 +32,57 @@  unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
 }
 EXPORT_SYMBOL(_copy_to_user);
 #endif
+
+/**
+ * check_zeroed_user: check if a userspace buffer only contains zero bytes
+ * @from: Source address, in userspace.
+ * @size: Size of buffer.
+ *
+ * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
+ * userspace addresses (and is more efficient because we don't care where the
+ * first non-zero byte is).
+ *
+ * Returns:
+ *  * 0: There were non-zero bytes present in the buffer.
+ *  * 1: The buffer was full of zero bytes.
+ *  * -EFAULT: access to userspace failed.
+ */
+int check_zeroed_user(const void __user *from, size_t size)
+{
+	unsigned long val;
+	uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
+
+	if (unlikely(size == 0))
+		return 1;
+
+	from -= align;
+	size += align;
+
+	if (!user_access_begin(from, size))
+		return -EFAULT;
+
+	unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+	if (align)
+		val &= ~aligned_byte_mask(align);
+
+	while (size > sizeof(unsigned long)) {
+		if (unlikely(val))
+			goto done;
+
+		from += sizeof(unsigned long);
+		size -= sizeof(unsigned long);
+
+		unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+	}
+
+	if (size < sizeof(unsigned long))
+		val &= aligned_byte_mask(size);
+
+done:
+	user_access_end();
+	return (val == 0);
+err_fault:
+	user_access_end();
+	return -EFAULT;
+}
+EXPORT_SYMBOL(check_zeroed_user);