diff mbox series

[v5,01/18] kunit: test: add KUnit test runner core

Message ID 20190617082613.109131-2-brendanhiggins@google.com
State Not Applicable
Headers show
Series kunit: introduce KUnit, the Linux kernel unit testing framework | expand

Commit Message

Brendan Higgins June 17, 2019, 8:25 a.m. UTC
Add core facilities for defining unit tests; this provides a common way
to define test cases, functions that execute code which is under test
and determine whether the code under test behaves as expected; this also
provides a way to group together related test cases in test suites (here
we call them test_modules).

Just define test cases and how to execute them for now; setting
expectations on code will be defined later.

Signed-off-by: Brendan Higgins <brendanhiggins@google.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
---
Changes Since Last Revision:
 - Mostly minor fixes suggested by Stephen Boyd
 - Biggest change inlines a bunch of functions in the test case running
   logic to make it more readable as suggested by Stephen Boyd
---
 include/kunit/test.h | 161 +++++++++++++++++++++++++++++++++
 kunit/Kconfig        |  17 ++++
 kunit/Makefile       |   1 +
 kunit/test.c         | 210 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 389 insertions(+)
 create mode 100644 include/kunit/test.h
 create mode 100644 kunit/Kconfig
 create mode 100644 kunit/Makefile
 create mode 100644 kunit/test.c

Comments

Stephen Boyd June 20, 2019, 12:15 a.m. UTC | #1
Quoting Brendan Higgins (2019-06-17 01:25:56)
> diff --git a/kunit/test.c b/kunit/test.c
> new file mode 100644
> index 0000000000000..d05d254f1521f
> --- /dev/null
> +++ b/kunit/test.c
> @@ -0,0 +1,210 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Base unit test (KUnit) API.
> + *
> + * Copyright (C) 2019, Google LLC.
> + * Author: Brendan Higgins <brendanhiggins@google.com>
> + */
> +
> +#include <linux/sched/debug.h>
> +#include <kunit/test.h>
> +
> +static bool kunit_get_success(struct kunit *test)
> +{
> +       unsigned long flags;
> +       bool success;
> +
> +       spin_lock_irqsave(&test->lock, flags);
> +       success = test->success;
> +       spin_unlock_irqrestore(&test->lock, flags);

I still don't understand the locking scheme in this code. Is the
intention to make getter and setter APIs that are "safe" by adding in a
spinlock that is held around getting and setting various members in the
kunit structure?

In what situation is there more than one thread reading or writing the
kunit struct? Isn't it only a single process that is going to be
operating on this structure? And why do we need to disable irqs? Are we
expecting to be modifying the unit tests from irq contexts?

> +
> +       return success;
> +}
> +
> +static void kunit_set_success(struct kunit *test, bool success)
> +{
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&test->lock, flags);
> +       test->success = success;
> +       spin_unlock_irqrestore(&test->lock, flags);
> +}
> +
> +static int kunit_vprintk_emit(int level, const char *fmt, va_list args)
> +{
> +       return vprintk_emit(0, level, NULL, 0, fmt, args);
> +}
> +
> +static int kunit_printk_emit(int level, const char *fmt, ...)
> +{
> +       va_list args;
> +       int ret;
> +
> +       va_start(args, fmt);
> +       ret = kunit_vprintk_emit(level, fmt, args);
> +       va_end(args);
> +
> +       return ret;
> +}
> +
> +static void kunit_vprintk(const struct kunit *test,
> +                         const char *level,
> +                         struct va_format *vaf)
> +{
> +       kunit_printk_emit(level[1] - '0', "\t# %s: %pV", test->name, vaf);
> +}
> +
> +static bool kunit_has_printed_tap_version;

Can you please move this into function local scope in the function
below?

> +
> +static void kunit_print_tap_version(void)
> +{
> +       if (!kunit_has_printed_tap_version) {
> +               kunit_printk_emit(LOGLEVEL_INFO, "TAP version 14\n");
> +               kunit_has_printed_tap_version = true;
> +       }
> +}
> +
[...]
> +
> +static bool kunit_module_has_succeeded(struct kunit_module *module)
> +{
> +       const struct kunit_case *test_case;
> +       bool success = true;
> +
> +       for (test_case = module->test_cases; test_case->run_case; test_case++)
> +               if (!test_case->success) {
> +                       success = false;
> +                       break;

Why not 'return false'?

> +               }
> +
> +       return success;

And 'return true'?

> +}
> +
> +static size_t kunit_module_counter = 1;
> +
> +static void kunit_print_subtest_end(struct kunit_module *module)
> +{
> +       kunit_print_ok_not_ok(false,
> +                             kunit_module_has_succeeded(module),
> +                             kunit_module_counter++,
> +                             module->name);
> +}
> +
> +static void kunit_print_test_case_ok_not_ok(struct kunit_case *test_case,
> +                                           size_t test_number)
> +{
> +       kunit_print_ok_not_ok(true,
> +                             test_case->success,
> +                             test_number,
> +                             test_case->name);
> +}
> +
> +void kunit_init_test(struct kunit *test, const char *name)
> +{
> +       spin_lock_init(&test->lock);
> +       test->name = name;
> +       test->success = true;
> +}
> +
> +/*
> + * Performs all logic to run a test case.
> + */
> +static void kunit_run_case(struct kunit_module *module,
> +                          struct kunit_case *test_case)
> +{
> +       struct kunit test;
> +       int ret = 0;
> +
> +       kunit_init_test(&test, test_case->name);
> +
> +       if (module->init) {
> +               ret = module->init(&test);
> +               if (ret) {
> +                       kunit_err(&test, "failed to initialize: %d\n", ret);
> +                       kunit_set_success(&test, false);
> +                       return;
> +               }
> +       }
> +
> +       if (!ret)
> +               test_case->run_case(&test);

Do we need this if condition? ret can only be set to non-zero above but
then we'll exit the function early so it seems unnecessary. Given that,
ret should probably be moved into the module->init path.

> +
> +       if (module->exit)
> +               module->exit(&test);
> +
> +       test_case->success = kunit_get_success(&test);
> +}
> +
Brendan Higgins June 25, 2019, 8:28 p.m. UTC | #2
On Wed, Jun 19, 2019 at 5:15 PM Stephen Boyd <sboyd@kernel.org> wrote:
>
> Quoting Brendan Higgins (2019-06-17 01:25:56)
> > diff --git a/kunit/test.c b/kunit/test.c
> > new file mode 100644
> > index 0000000000000..d05d254f1521f
> > --- /dev/null
> > +++ b/kunit/test.c
> > @@ -0,0 +1,210 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * Base unit test (KUnit) API.
> > + *
> > + * Copyright (C) 2019, Google LLC.
> > + * Author: Brendan Higgins <brendanhiggins@google.com>
> > + */
> > +
> > +#include <linux/sched/debug.h>
> > +#include <kunit/test.h>
> > +
> > +static bool kunit_get_success(struct kunit *test)
> > +{
> > +       unsigned long flags;
> > +       bool success;
> > +
> > +       spin_lock_irqsave(&test->lock, flags);
> > +       success = test->success;
> > +       spin_unlock_irqrestore(&test->lock, flags);
>
> I still don't understand the locking scheme in this code. Is the
> intention to make getter and setter APIs that are "safe" by adding in a
> spinlock that is held around getting and setting various members in the
> kunit structure?

Yes, your understanding is correct. It is possible for a user to write
a test such that certain elements may be updated in different threads;
this would most likely happen in the case where someone wants to make
an assertion or an expectation in a thread created by a piece of code
under test. Although this should generally be avoided, it is possible,
and there are occasionally good reasons to do so, so it is
functionality that we should support.

Do you think I should add a comment to this effect?

> In what situation is there more than one thread reading or writing the
> kunit struct? Isn't it only a single process that is going to be

As I said above, it is possible that the code under test may spawn a
new thread that may make an expectation or an assertion. It is not a
super common use case, but it is possible.

> operating on this structure? And why do we need to disable irqs? Are we
> expecting to be modifying the unit tests from irq contexts?

There are instances where someone may want to test a driver which has
an interrupt handler in it. I actually have (not the greatest) example
here. Now in these cases, I expect someone to use a mock irqchip or
some other fake mechanism to trigger the interrupt handler and not
actual hardware; technically speaking in this case, it is not going to
be accessed from a "real" irq context; however, the code under test
should think that it is in an irq context; given that, I figured it is
best to just treat it as a real irq context. Does that make sense?

> > +
> > +       return success;
> > +}
> > +
> > +static void kunit_set_success(struct kunit *test, bool success)
> > +{
> > +       unsigned long flags;
> > +
> > +       spin_lock_irqsave(&test->lock, flags);
> > +       test->success = success;
> > +       spin_unlock_irqrestore(&test->lock, flags);
> > +}
> > +
> > +static int kunit_vprintk_emit(int level, const char *fmt, va_list args)
> > +{
> > +       return vprintk_emit(0, level, NULL, 0, fmt, args);
> > +}
> > +
> > +static int kunit_printk_emit(int level, const char *fmt, ...)
> > +{
> > +       va_list args;
> > +       int ret;
> > +
> > +       va_start(args, fmt);
> > +       ret = kunit_vprintk_emit(level, fmt, args);
> > +       va_end(args);
> > +
> > +       return ret;
> > +}
> > +
> > +static void kunit_vprintk(const struct kunit *test,
> > +                         const char *level,
> > +                         struct va_format *vaf)
> > +{
> > +       kunit_printk_emit(level[1] - '0', "\t# %s: %pV", test->name, vaf);
> > +}
> > +
> > +static bool kunit_has_printed_tap_version;
>
> Can you please move this into function local scope in the function
> below?

Sure, that makes sense.

> > +
> > +static void kunit_print_tap_version(void)
> > +{
> > +       if (!kunit_has_printed_tap_version) {
> > +               kunit_printk_emit(LOGLEVEL_INFO, "TAP version 14\n");
> > +               kunit_has_printed_tap_version = true;
> > +       }
> > +}
> > +
> [...]
> > +
> > +static bool kunit_module_has_succeeded(struct kunit_module *module)
> > +{
> > +       const struct kunit_case *test_case;
> > +       bool success = true;
> > +
> > +       for (test_case = module->test_cases; test_case->run_case; test_case++)
> > +               if (!test_case->success) {
> > +                       success = false;
> > +                       break;
>
> Why not 'return false'?

Also a good point. Will fix.

> > +               }
> > +
> > +       return success;
>
> And 'return true'?

Will fix.

> > +}
> > +
> > +static size_t kunit_module_counter = 1;
> > +
> > +static void kunit_print_subtest_end(struct kunit_module *module)
> > +{
> > +       kunit_print_ok_not_ok(false,
> > +                             kunit_module_has_succeeded(module),
> > +                             kunit_module_counter++,
> > +                             module->name);
> > +}
> > +
> > +static void kunit_print_test_case_ok_not_ok(struct kunit_case *test_case,
> > +                                           size_t test_number)
> > +{
> > +       kunit_print_ok_not_ok(true,
> > +                             test_case->success,
> > +                             test_number,
> > +                             test_case->name);
> > +}
> > +
> > +void kunit_init_test(struct kunit *test, const char *name)
> > +{
> > +       spin_lock_init(&test->lock);
> > +       test->name = name;
> > +       test->success = true;
> > +}
> > +
> > +/*
> > + * Performs all logic to run a test case.
> > + */
> > +static void kunit_run_case(struct kunit_module *module,
> > +                          struct kunit_case *test_case)
> > +{
> > +       struct kunit test;
> > +       int ret = 0;
> > +
> > +       kunit_init_test(&test, test_case->name);
> > +
> > +       if (module->init) {
> > +               ret = module->init(&test);
> > +               if (ret) {
> > +                       kunit_err(&test, "failed to initialize: %d\n", ret);
> > +                       kunit_set_success(&test, false);
> > +                       return;
> > +               }
> > +       }
> > +
> > +       if (!ret)
> > +               test_case->run_case(&test);
>
> Do we need this if condition? ret can only be set to non-zero above but
> then we'll exit the function early so it seems unnecessary. Given that,
> ret should probably be moved into the module->init path.

Whoops. Sorry, another instance of how it evolved over time and I
forgot why I did the check. Will fix.

> > +
> > +       if (module->exit)
> > +               module->exit(&test);
> > +
> > +       test_case->success = kunit_get_success(&test);
> > +}
> > +

Thanks!
Luis Chamberlain June 25, 2019, 9:44 p.m. UTC | #3
On Tue, Jun 25, 2019 at 01:28:25PM -0700, Brendan Higgins wrote:
> On Wed, Jun 19, 2019 at 5:15 PM Stephen Boyd <sboyd@kernel.org> wrote:
> >
> > Quoting Brendan Higgins (2019-06-17 01:25:56)
> > > diff --git a/kunit/test.c b/kunit/test.c
> > > new file mode 100644
> > > index 0000000000000..d05d254f1521f
> > > --- /dev/null
> > > +++ b/kunit/test.c
> > > @@ -0,0 +1,210 @@
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +/*
> > > + * Base unit test (KUnit) API.
> > > + *
> > > + * Copyright (C) 2019, Google LLC.
> > > + * Author: Brendan Higgins <brendanhiggins@google.com>
> > > + */
> > > +
> > > +#include <linux/sched/debug.h>
> > > +#include <kunit/test.h>
> > > +
> > > +static bool kunit_get_success(struct kunit *test)
> > > +{
> > > +       unsigned long flags;
> > > +       bool success;
> > > +
> > > +       spin_lock_irqsave(&test->lock, flags);
> > > +       success = test->success;
> > > +       spin_unlock_irqrestore(&test->lock, flags);
> >
> > I still don't understand the locking scheme in this code. Is the
> > intention to make getter and setter APIs that are "safe" by adding in a
> > spinlock that is held around getting and setting various members in the
> > kunit structure?
> 
> Yes, your understanding is correct. It is possible for a user to write
> a test such that certain elements may be updated in different threads;
> this would most likely happen in the case where someone wants to make
> an assertion or an expectation in a thread created by a piece of code
> under test. Although this should generally be avoided, it is possible,
> and there are occasionally good reasons to do so, so it is
> functionality that we should support.
> 
> Do you think I should add a comment to this effect?
> 
> > In what situation is there more than one thread reading or writing the
> > kunit struct? Isn't it only a single process that is going to be
> 
> As I said above, it is possible that the code under test may spawn a
> new thread that may make an expectation or an assertion. It is not a
> super common use case, but it is possible.

I wonder if it is worth to have then different types of tests based on
locking requirements. One with no locking, since it seems you imply
most tests would fall under this category, then locking, and another
with IRQ context.

If no locking is done at all for all tests which do not require locking,
is there any gains at run time? I'm sure it might be minimum but
curious.

> > operating on this structure? And why do we need to disable irqs? Are we
> > expecting to be modifying the unit tests from irq contexts?
> 
> There are instances where someone may want to test a driver which has
> an interrupt handler in it. I actually have (not the greatest) example
> here. Now in these cases, I expect someone to use a mock irqchip or
> some other fake mechanism to trigger the interrupt handler and not
> actual hardware; technically speaking in this case, it is not going to
> be accessed from a "real" irq context; however, the code under test
> should think that it is in an irq context; given that, I figured it is
> best to just treat it as a real irq context. Does that make sense?

Since its a new architecture and since you seem to imply most tests
don't require locking or even IRQs disabled, I think its worth to
consider the impact of adding such extreme locking requirements for
an initial ramp up.

  Luis
Brendan Higgins June 25, 2019, 10:14 p.m. UTC | #4
On Tue, Jun 25, 2019 at 2:44 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
>
> On Tue, Jun 25, 2019 at 01:28:25PM -0700, Brendan Higgins wrote:
> > On Wed, Jun 19, 2019 at 5:15 PM Stephen Boyd <sboyd@kernel.org> wrote:
> > >
> > > Quoting Brendan Higgins (2019-06-17 01:25:56)
> > > > diff --git a/kunit/test.c b/kunit/test.c
> > > > new file mode 100644
> > > > index 0000000000000..d05d254f1521f
> > > > --- /dev/null
> > > > +++ b/kunit/test.c
> > > > @@ -0,0 +1,210 @@
> > > > +// SPDX-License-Identifier: GPL-2.0
> > > > +/*
> > > > + * Base unit test (KUnit) API.
> > > > + *
> > > > + * Copyright (C) 2019, Google LLC.
> > > > + * Author: Brendan Higgins <brendanhiggins@google.com>
> > > > + */
> > > > +
> > > > +#include <linux/sched/debug.h>
> > > > +#include <kunit/test.h>
> > > > +
> > > > +static bool kunit_get_success(struct kunit *test)
> > > > +{
> > > > +       unsigned long flags;
> > > > +       bool success;
> > > > +
> > > > +       spin_lock_irqsave(&test->lock, flags);
> > > > +       success = test->success;
> > > > +       spin_unlock_irqrestore(&test->lock, flags);
> > >
> > > I still don't understand the locking scheme in this code. Is the
> > > intention to make getter and setter APIs that are "safe" by adding in a
> > > spinlock that is held around getting and setting various members in the
> > > kunit structure?
> >
> > Yes, your understanding is correct. It is possible for a user to write
> > a test such that certain elements may be updated in different threads;
> > this would most likely happen in the case where someone wants to make
> > an assertion or an expectation in a thread created by a piece of code
> > under test. Although this should generally be avoided, it is possible,
> > and there are occasionally good reasons to do so, so it is
> > functionality that we should support.
> >
> > Do you think I should add a comment to this effect?
> >
> > > In what situation is there more than one thread reading or writing the
> > > kunit struct? Isn't it only a single process that is going to be
> >
> > As I said above, it is possible that the code under test may spawn a
> > new thread that may make an expectation or an assertion. It is not a
> > super common use case, but it is possible.
>
> I wonder if it is worth to have then different types of tests based on
> locking requirements. One with no locking, since it seems you imply
> most tests would fall under this category, then locking, and another
> with IRQ context.
>
> If no locking is done at all for all tests which do not require locking,
> is there any gains at run time? I'm sure it might be minimum but
> curious.

Yeah, I don't think it is worth it.

I don't think we need to be squeezing every ounce of performance out
of unit tests, since they are inherently a cost and are not intended
to be run in a production deployed kernel as part of normal production
usage.

> > > operating on this structure? And why do we need to disable irqs? Are we
> > > expecting to be modifying the unit tests from irq contexts?
> >
> > There are instances where someone may want to test a driver which has
> > an interrupt handler in it. I actually have (not the greatest) example
> > here. Now in these cases, I expect someone to use a mock irqchip or
> > some other fake mechanism to trigger the interrupt handler and not
> > actual hardware; technically speaking in this case, it is not going to
> > be accessed from a "real" irq context; however, the code under test
> > should think that it is in an irq context; given that, I figured it is
> > best to just treat it as a real irq context. Does that make sense?
>
> Since its a new architecture and since you seem to imply most tests
> don't require locking or even IRQs disabled, I think its worth to
> consider the impact of adding such extreme locking requirements for
> an initial ramp up.

Fair enough, I can see the point of not wanting to use irq disabled
until we get someone complaining about it, but I think making it
thread safe is reasonable. It means there is one less thing to confuse
a KUnit user and the only penalty paid is some very minor performance.
Luis Chamberlain June 25, 2019, 10:33 p.m. UTC | #5
On Mon, Jun 17, 2019 at 01:25:56AM -0700, Brendan Higgins wrote:
> +/**
> + * module_test() - used to register a &struct kunit_module with KUnit.
> + * @module: a statically allocated &struct kunit_module.
> + *
> + * Registers @module with the test framework. See &struct kunit_module for more
> + * information.
> + */
> +#define module_test(module) \
> +		static int module_kunit_init##module(void) \
> +		{ \
> +			return kunit_run_tests(&module); \
> +		} \
> +		late_initcall(module_kunit_init##module)

Becuase late_initcall() is used, if these modules are built-in, this
would preclude the ability to test things prior to this part of the
kernel under UML or whatever architecture runs the tests. So, this
limits the scope of testing. Small detail but the scope whould be
documented.

> +static void kunit_print_tap_version(void)
> +{
> +	if (!kunit_has_printed_tap_version) {
> +		kunit_printk_emit(LOGLEVEL_INFO, "TAP version 14\n");

What is this TAP thing? Why should we care what version it is on?
Why are we printing this?

> +		kunit_has_printed_tap_version = true;
> +	}
> +}
> +
> +static size_t kunit_test_cases_len(struct kunit_case *test_cases)
> +{
> +	struct kunit_case *test_case;
> +	size_t len = 0;
> +
> +	for (test_case = test_cases; test_case->run_case; test_case++)

If we make the last test case NULL, we'd just check for test_case here,
and save ourselves an extra few bytes per test module. Any reason why
the last test case cannot be NULL?

> +void kunit_init_test(struct kunit *test, const char *name)
> +{
> +	spin_lock_init(&test->lock);
> +	test->name = name;
> +	test->success = true;
> +}
> +
> +/*
> + * Performs all logic to run a test case.
> + */
> +static void kunit_run_case(struct kunit_module *module,
> +			   struct kunit_case *test_case)
> +{
> +	struct kunit test;
> +	int ret = 0;
> +
> +	kunit_init_test(&test, test_case->name);
> +
> +	if (module->init) {
> +		ret = module->init(&test);

I believe if we used struct kunit_module *kmodule it would be much
clearer who's init this is.

  Luis
Luis Chamberlain June 25, 2019, 11:02 p.m. UTC | #6
On Tue, Jun 25, 2019 at 03:14:45PM -0700, Brendan Higgins wrote:
> On Tue, Jun 25, 2019 at 2:44 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> > Since its a new architecture and since you seem to imply most tests
> > don't require locking or even IRQs disabled, I think its worth to
> > consider the impact of adding such extreme locking requirements for
> > an initial ramp up.
> 
> Fair enough, I can see the point of not wanting to use irq disabled
> until we get someone complaining about it, but I think making it
> thread safe is reasonable. It means there is one less thing to confuse
> a KUnit user and the only penalty paid is some very minor performance.

One reason I'm really excited about kunit is speed... so by all means I
think we're at a good point to analyze performance optimizationsm if
they do make sense.

While on the topic of parallization, what about support for running
different test cases in parallel? Or at the very least different kunit
modules in parallel.  Few questions come up based on this prospect:

  * Why not support parallelism from the start?
  * Are you opposed to eventually having this added? For instance, there is
    enough code on lib/test_kmod.c for batching tons of kthreads each
    one running its own thing for testing purposes which could be used
    as template.
  * If we eventually *did* support it:
    - Would logs be skewed?
    - Could we have a way to query: give me log for only kunit module
      named "foo"?

  Luis
Brendan Higgins June 26, 2019, 12:07 a.m. UTC | #7
On Tue, Jun 25, 2019 at 3:33 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
>
> On Mon, Jun 17, 2019 at 01:25:56AM -0700, Brendan Higgins wrote:
> > +/**
> > + * module_test() - used to register a &struct kunit_module with KUnit.
> > + * @module: a statically allocated &struct kunit_module.
> > + *
> > + * Registers @module with the test framework. See &struct kunit_module for more
> > + * information.
> > + */
> > +#define module_test(module) \
> > +             static int module_kunit_init##module(void) \
> > +             { \
> > +                     return kunit_run_tests(&module); \
> > +             } \
> > +             late_initcall(module_kunit_init##module)
>
> Becuase late_initcall() is used, if these modules are built-in, this
> would preclude the ability to test things prior to this part of the
> kernel under UML or whatever architecture runs the tests. So, this
> limits the scope of testing. Small detail but the scope whould be
> documented.

You aren't the first person to complain about this (and I am not sure
it is the first time you have complained about it). Anyway, I have
some follow on patches that will improve the late_initcall thing, and
people seemed okay with discussing the follow on patches as part of a
subsequent patchset after this gets merged.

I will nevertheless document the restriction until then.

> > +static void kunit_print_tap_version(void)
> > +{
> > +     if (!kunit_has_printed_tap_version) {
> > +             kunit_printk_emit(LOGLEVEL_INFO, "TAP version 14\n");
>
> What is this TAP thing? Why should we care what version it is on?
> Why are we printing this?

It's part of the TAP specification[1]. Greg and Frank asked me to make
the intermediate format conform to TAP. Seems like something else I
should probable document...

> > +             kunit_has_printed_tap_version = true;
> > +     }
> > +}
> > +
> > +static size_t kunit_test_cases_len(struct kunit_case *test_cases)
> > +{
> > +     struct kunit_case *test_case;
> > +     size_t len = 0;
> > +
> > +     for (test_case = test_cases; test_case->run_case; test_case++)
>
> If we make the last test case NULL, we'd just check for test_case here,
> and save ourselves an extra few bytes per test module. Any reason why
> the last test case cannot be NULL?

Is there anyway to make that work with a statically defined array?

Basically, I want to be able to do something like:

static struct kunit_case example_test_cases[] = {
        KUNIT_CASE(example_simple_test),
        KUNIT_CASE(example_mock_test),
        {}
};

FYI,
#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }

In order to do what you are proposing, I think I need an array of
pointers to test cases, which is not ideal.

> > +void kunit_init_test(struct kunit *test, const char *name)
> > +{
> > +     spin_lock_init(&test->lock);
> > +     test->name = name;
> > +     test->success = true;
> > +}
> > +
> > +/*
> > + * Performs all logic to run a test case.
> > + */
> > +static void kunit_run_case(struct kunit_module *module,
> > +                        struct kunit_case *test_case)
> > +{
> > +     struct kunit test;
> > +     int ret = 0;
> > +
> > +     kunit_init_test(&test, test_case->name);
> > +
> > +     if (module->init) {
> > +             ret = module->init(&test);
>
> I believe if we used struct kunit_module *kmodule it would be much
> clearer who's init this is.

That's reasonable. I will fix in next revision.

Cheers!

[1] https://github.com/TestAnything/Specification/blob/tap-14-specification/specification.md
Luis Chamberlain June 26, 2019, 3:36 a.m. UTC | #8
On Tue, Jun 25, 2019 at 05:07:32PM -0700, Brendan Higgins wrote:
> On Tue, Jun 25, 2019 at 3:33 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> >
> > On Mon, Jun 17, 2019 at 01:25:56AM -0700, Brendan Higgins wrote:
> > > +/**
> > > + * module_test() - used to register a &struct kunit_module with KUnit.
> > > + * @module: a statically allocated &struct kunit_module.
> > > + *
> > > + * Registers @module with the test framework. See &struct kunit_module for more
> > > + * information.
> > > + */
> > > +#define module_test(module) \
> > > +             static int module_kunit_init##module(void) \
> > > +             { \
> > > +                     return kunit_run_tests(&module); \
> > > +             } \
> > > +             late_initcall(module_kunit_init##module)
> >
> > Becuase late_initcall() is used, if these modules are built-in, this
> > would preclude the ability to test things prior to this part of the
> > kernel under UML or whatever architecture runs the tests. So, this
> > limits the scope of testing. Small detail but the scope whould be
> > documented.
> 
> You aren't the first person to complain about this (and I am not sure
> it is the first time you have complained about it). Anyway, I have
> some follow on patches that will improve the late_initcall thing, and
> people seemed okay with discussing the follow on patches as part of a
> subsequent patchset after this gets merged.
> 
> I will nevertheless document the restriction until then.

To be clear, I am not complaining about it. I just find it simply
critical to document its limitations, so folks don't try to invest
time and energy on kunit right away for an early init test, if it
cannot support it.

If support for that requires some work, it may be worth mentioning
as well.

> > > +static void kunit_print_tap_version(void)
> > > +{
> > > +     if (!kunit_has_printed_tap_version) {
> > > +             kunit_printk_emit(LOGLEVEL_INFO, "TAP version 14\n");
> >
> > What is this TAP thing? Why should we care what version it is on?
> > Why are we printing this?
> 
> It's part of the TAP specification[1]. Greg and Frank asked me to make
> the intermediate format conform to TAP. Seems like something else I
> should probable document...

Yes thanks!

> > > +             kunit_has_printed_tap_version = true;
> > > +     }
> > > +}
> > > +
> > > +static size_t kunit_test_cases_len(struct kunit_case *test_cases)
> > > +{
> > > +     struct kunit_case *test_case;
> > > +     size_t len = 0;
> > > +
> > > +     for (test_case = test_cases; test_case->run_case; test_case++)
> >
> > If we make the last test case NULL, we'd just check for test_case here,
> > and save ourselves an extra few bytes per test module. Any reason why
> > the last test case cannot be NULL?
> 
> Is there anyway to make that work with a statically defined array?

No you're right.

> Basically, I want to be able to do something like:
> 
> static struct kunit_case example_test_cases[] = {
>         KUNIT_CASE(example_simple_test),
>         KUNIT_CASE(example_mock_test),
>         {}
> };
> 
> FYI,
> #define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }

> 
> In order to do what you are proposing, I think I need an array of
> pointers to test cases, which is not ideal.

Yeah, you're right. The only other alternative is to have a:

struct kunit_module {
       const char name[256];
       int (*init)(struct kunit *test);
       void (*exit)(struct kunit *test);
       struct kunit_case *test_cases;
+       unsigned int num_cases;
};

And then something like:

#define KUNIT_MODULE(name, init, exit, cases) { \
	.name = name, \
	.init = init, \
	.exit = exit, \
	.test_cases = cases,
	num_cases = ARRAY_SIZE(cases), \
}

Let's evaluate which is better: one extra test case per all test cases, or
an extra unsigned int for each kunit module.

  Luis
Stephen Boyd June 26, 2019, 3:40 a.m. UTC | #9
Quoting Brendan Higgins (2019-06-25 13:28:25)
> On Wed, Jun 19, 2019 at 5:15 PM Stephen Boyd <sboyd@kernel.org> wrote:
> >
> > Quoting Brendan Higgins (2019-06-17 01:25:56)
> > > diff --git a/kunit/test.c b/kunit/test.c
> > > new file mode 100644
> > > index 0000000000000..d05d254f1521f
> > > --- /dev/null
> > > +++ b/kunit/test.c
> > > @@ -0,0 +1,210 @@
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +/*
> > > + * Base unit test (KUnit) API.
> > > + *
> > > + * Copyright (C) 2019, Google LLC.
> > > + * Author: Brendan Higgins <brendanhiggins@google.com>
> > > + */
> > > +
> > > +#include <linux/sched/debug.h>
> > > +#include <kunit/test.h>
> > > +
> > > +static bool kunit_get_success(struct kunit *test)
> > > +{
> > > +       unsigned long flags;
> > > +       bool success;
> > > +
> > > +       spin_lock_irqsave(&test->lock, flags);
> > > +       success = test->success;
> > > +       spin_unlock_irqrestore(&test->lock, flags);
> >
> > I still don't understand the locking scheme in this code. Is the
> > intention to make getter and setter APIs that are "safe" by adding in a
> > spinlock that is held around getting and setting various members in the
> > kunit structure?
> 
> Yes, your understanding is correct. It is possible for a user to write
> a test such that certain elements may be updated in different threads;
> this would most likely happen in the case where someone wants to make
> an assertion or an expectation in a thread created by a piece of code
> under test. Although this should generally be avoided, it is possible,
> and there are occasionally good reasons to do so, so it is
> functionality that we should support.
> 
> Do you think I should add a comment to this effect?

No, I think the locking should be removed.

> 
> > In what situation is there more than one thread reading or writing the
> > kunit struct? Isn't it only a single process that is going to be
> 
> As I said above, it is possible that the code under test may spawn a
> new thread that may make an expectation or an assertion. It is not a
> super common use case, but it is possible.

Sure, sounds super possible and OK.

> 
> > operating on this structure? And why do we need to disable irqs? Are we
> > expecting to be modifying the unit tests from irq contexts?
> 
> There are instances where someone may want to test a driver which has
> an interrupt handler in it. I actually have (not the greatest) example
> here. Now in these cases, I expect someone to use a mock irqchip or
> some other fake mechanism to trigger the interrupt handler and not
> actual hardware; technically speaking in this case, it is not going to
> be accessed from a "real" irq context; however, the code under test
> should think that it is in an irq context; given that, I figured it is
> best to just treat it as a real irq context. Does that make sense?

Can you please describe the scenario in which grabbing the lock here,
updating a single variable, and then releasing the lock right after
does anything useful vs. not having the lock? I'm looking for a two CPU
scenario like below, but where it is a problem. There could be three
CPUs, or even one CPU and three threads if you want to describe the
extra thread scenario.

Here's my scenario where it isn't needed:

    CPU0                                      CPU1
    ----                                      ----
    kunit_run_test(&test)
                                              test_case_func()
					        ....
                                              [mock hardirq]
					        kunit_set_success(&test)
					      [hardirq ends]
                                                ...
                                                complete(&test_done)
      wait_for_completion(&test_done)
      kunit_get_success(&test)

We don't need to care about having locking here because success or
failure only happens in one place and it's synchronized with the
completion.

> 
> > > +
> > > +       return success;
> > > +}
> > > +
> > > +static void kunit_set_success(struct kunit *test, bool success)
> > > +{
> > > +       unsigned long flags;
> > > +
> > > +       spin_lock_irqsave(&test->lock, flags);
> > > +       test->success = success;
> > > +       spin_unlock_irqrestore(&test->lock, flags);
> > > +}
Brendan Higgins June 26, 2019, 6:41 a.m. UTC | #10
On Tue, Jun 25, 2019 at 4:02 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
>
> On Tue, Jun 25, 2019 at 03:14:45PM -0700, Brendan Higgins wrote:
> > On Tue, Jun 25, 2019 at 2:44 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> > > Since its a new architecture and since you seem to imply most tests
> > > don't require locking or even IRQs disabled, I think its worth to
> > > consider the impact of adding such extreme locking requirements for
> > > an initial ramp up.
> >
> > Fair enough, I can see the point of not wanting to use irq disabled
> > until we get someone complaining about it, but I think making it
> > thread safe is reasonable. It means there is one less thing to confuse
> > a KUnit user and the only penalty paid is some very minor performance.
>
> One reason I'm really excited about kunit is speed... so by all means I
> think we're at a good point to analyze performance optimizationsm if
> they do make sense.

Yeah, but I think there are much lower hanging fruit than this (as you
point out below). I am all for making/keeping KUnit super fast, but I
also don't want to waste time with premature optimizations and I think
having thread safe expectations and non-thread safe expectations hurts
usability.

Still, I am on board with making this a mutex instead of a spinlock for now.

> While on the topic of parallization, what about support for running
> different test cases in parallel? Or at the very least different kunit
> modules in parallel.  Few questions come up based on this prospect:
>
>   * Why not support parallelism from the start?

Just because it is more work and there isn't much to gain from it right now.

Some numbers:
I currently have a collection of 86 test cases in the branch that this
patchset is from. I turned on PRINTK_TIME and looked at the first
KUnit output and the last. On UML, start time was 0.090000, and end
time was 0.090000. Looks like sched_clock is not very good on UML.

Still it seems quite likely that all of these tests run around 0.01
seconds or less on UML: I ran KUnit with only 2 test cases enabled
three times and got an average runtime of 1.55867 seconds with a
standard deviation of 0.0346747. I then ran it another three times
with all test cases enabled and got an average runtime of 1.535
seconds with a standard deviation of 0.0150997. The second average is
less, but that doesn't really mean anything because it is well within
one standard deviation with a very small sample size. Nevertheless, we
can conclude that the actual runtime of those 84 test cases is most
likely within one standard deviation, so on the order of 0.01 seconds.

On x86 running on QEMU, first message from KUnit was printed at
0.194251 and the last KUnit message was printed at 0.340915, meaning
that all 86 test cases ran in about 0.146664 seconds.

In any case, running KUnit tests in parallel is definitely something I
plan on adding it eventually, but it just doesn't really seem worth it
right now. I find the incremental build time of the kernel to
typically be between 3 and 30 seconds, and a clean build to be between
30 seconds to several minutes, depending on the number of available
cores, so I don't think most users would even notice the amount of
runtime contributed by the actual unit tests until we start getting
into the 1000s of test cases. I don't suspect it will become an issue
until we get into the 10,000s of test cases. I think we are a pretty
long way off from that.

>   * Are you opposed to eventually having this added? For instance, there is
>     enough code on lib/test_kmod.c for batching tons of kthreads each
>     one running its own thing for testing purposes which could be used
>     as template.

I am not opposed to adding it eventually at all. I actually plan on
doing so, just not in this patchset. There are a lot of additional
features, improvements, and sugar that I really want to add, so much
so that most of it doesn't belong in this patchset; I just think this
is one of those things that belongs in a follow up. I tried to boil
down this patchset to as small as I could while still being useful;
this is basically an MVP. Maybe after this patchset gets merged I
should post a list of things I have ready for review, or would like to
work on, and people can comment on what things they want to see next.

>   * If we eventually *did* support it:
>     - Would logs be skewed?

Probably, before I went with the TAP approach, I was tagging each
message with the test case it came from and I could have parsed it and
assembled a coherent view of the logs using that; now that I am using
TAP conforming output, that won't work. I haven't really thought too
hard about how to address it, but there are ways. For the UML users, I
am planning on adding a feature to guarantee hermeticity between tests
running in different modules by adding a feature that allows a single
kernel to be built with all tests included, and then determine which
tests get run by passing in command line arguments or something. This
way you can get the isolation from running tests in separate
environments without increasing the build cost. We could also use this
method to achieve parallelism by dispatching multiple kernels at once.
That only works for UML, but I imagine you could do something similar
for users running tests under qemu.

>     - Could we have a way to query: give me log for only kunit module
>       named "foo"?

Yeah, I think that would make sense as part of the hermeticity thing I
mentioned above.

Hope that seems reasonable!
Luis Chamberlain June 26, 2019, 10:02 p.m. UTC | #11
On Tue, Jun 25, 2019 at 11:41:47PM -0700, Brendan Higgins wrote:
> On Tue, Jun 25, 2019 at 4:02 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> >
> > On Tue, Jun 25, 2019 at 03:14:45PM -0700, Brendan Higgins wrote:
> > > On Tue, Jun 25, 2019 at 2:44 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> > > > Since its a new architecture and since you seem to imply most tests
> > > > don't require locking or even IRQs disabled, I think its worth to
> > > > consider the impact of adding such extreme locking requirements for
> > > > an initial ramp up.
> > >
> > > Fair enough, I can see the point of not wanting to use irq disabled
> > > until we get someone complaining about it, but I think making it
> > > thread safe is reasonable. It means there is one less thing to confuse
> > > a KUnit user and the only penalty paid is some very minor performance.
> >
> > One reason I'm really excited about kunit is speed... so by all means I
> > think we're at a good point to analyze performance optimizationsm if
> > they do make sense.
> 
> Yeah, but I think there are much lower hanging fruit than this (as you
> point out below). I am all for making/keeping KUnit super fast, but I
> also don't want to waste time with premature optimizations and I think
> having thread safe expectations and non-thread safe expectations hurts
> usability.
> 
> Still, I am on board with making this a mutex instead of a spinlock for now.
> 
> > While on the topic of parallization, what about support for running
> > different test cases in parallel? Or at the very least different kunit
> > modules in parallel.  Few questions come up based on this prospect:
> >
> >   * Why not support parallelism from the start?
> 
> Just because it is more work and there isn't much to gain from it right now.
> 
> Some numbers:
> I currently have a collection of 86 test cases in the branch that this
> patchset is from.

Impressive, imagine 86 tests and kunit is not even *merged yet*.

> I turned on PRINTK_TIME and looked at the first
> KUnit output and the last. On UML, start time was 0.090000, and end
> time was 0.090000. Looks like sched_clock is not very good on UML.

Since you have a python thing that kicks tests, what if you just run
time on it?

> Still it seems quite likely that all of these tests run around 0.01
> seconds or less on UML: I ran KUnit with only 2 test cases enabled
> three times and got an average runtime of 1.55867 seconds with a
> standard deviation of 0.0346747. I then ran it another three times
> with all test cases enabled and got an average runtime of 1.535
> seconds with a standard deviation of 0.0150997. The second average is
> less, but that doesn't really mean anything because it is well within
> one standard deviation with a very small sample size. Nevertheless, we
> can conclude that the actual runtime of those 84 test cases is most
> likely within one standard deviation, so on the order of 0.01 seconds.
> 
> On x86 running on QEMU, first message from KUnit was printed at
> 0.194251 and the last KUnit message was printed at 0.340915, meaning
> that all 86 test cases ran in about 0.146664 seconds.

Pretty impressive numbers. But can you blame me for expressing the
desire to possibly being able to do better? I am not saying -- let's
definitely have parallelism in place *now*. Just wanted to hear out
tangibles of why we *don't* want it now.

And.. also, since we are reviewing now, try to target so that the code
can later likely get a face lift to support parallelism without
requiring much changes.

> In any case, running KUnit tests in parallel is definitely something I
> plan on adding it eventually, but it just doesn't really seem worth it
> right now.

Makes sense!

> I find the incremental build time of the kernel to
> typically be between 3 and 30 seconds, and a clean build to be between
> 30 seconds to several minutes, depending on the number of available
> cores, so I don't think most users would even notice the amount of
> runtime contributed by the actual unit tests until we start getting
> into the 1000s of test cases. I don't suspect it will become an issue
> until we get into the 10,000s of test cases. I think we are a pretty
> long way off from that.

All makes sense, and agreed based on the numbers you are providing.
Thanks for the details!

> >   * Are you opposed to eventually having this added? For instance, there is
> >     enough code on lib/test_kmod.c for batching tons of kthreads each
> >     one running its own thing for testing purposes which could be used
> >     as template.
> 
> I am not opposed to adding it eventually at all. I actually plan on
> doing so, just not in this patchset. There are a lot of additional
> features, improvements, and sugar that I really want to add, so much
> so that most of it doesn't belong in this patchset; I just think this
> is one of those things that belongs in a follow up. I tried to boil
> down this patchset to as small as I could while still being useful;
> this is basically an MVP. Maybe after this patchset gets merged I
> should post a list of things I have ready for review, or would like to
> work on, and people can comment on what things they want to see next.

Groovy.

> >   * If we eventually *did* support it:
> >     - Would logs be skewed?
> 
> Probably, before I went with the TAP approach, I was tagging each
> message with the test case it came from and I could have parsed it and
> assembled a coherent view of the logs using that; now that I am using
> TAP conforming output, that won't work. I haven't really thought too
> hard about how to address it, but there are ways. For the UML users, I
> am planning on adding a feature to guarantee hermeticity between tests
> running in different modules by adding a feature that allows a single
> kernel to be built with all tests included, and then determine which
> tests get run by passing in command line arguments or something. This
> way you can get the isolation from running tests in separate
> environments without increasing the build cost. We could also use this
> method to achieve parallelism by dispatching multiple kernels at once.

Indeed. Or... wait for it... containers... There tools for these sorts
of things already. And I'm evaluating such prospects now for some other
tests I care for.

> That only works for UML, but I imagine you could do something similar
> for users running tests under qemu.

Or containers.

> >     - Could we have a way to query: give me log for only kunit module
> >       named "foo"?
> 
> Yeah, I think that would make sense as part of the hermeticity thing I
> mentioned above.
> 
> Hope that seems reasonable!

All groovy. Thanks for the details!

  Luis
Brendan Higgins June 26, 2019, 10:16 p.m. UTC | #12
On Tue, Jun 25, 2019 at 8:36 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
>
> On Tue, Jun 25, 2019 at 05:07:32PM -0700, Brendan Higgins wrote:
> > On Tue, Jun 25, 2019 at 3:33 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> > >
> > > On Mon, Jun 17, 2019 at 01:25:56AM -0700, Brendan Higgins wrote:
> > > > +/**
> > > > + * module_test() - used to register a &struct kunit_module with KUnit.
> > > > + * @module: a statically allocated &struct kunit_module.
> > > > + *
> > > > + * Registers @module with the test framework. See &struct kunit_module for more
> > > > + * information.
> > > > + */
> > > > +#define module_test(module) \
> > > > +             static int module_kunit_init##module(void) \
> > > > +             { \
> > > > +                     return kunit_run_tests(&module); \
> > > > +             } \
> > > > +             late_initcall(module_kunit_init##module)
> > >
> > > Becuase late_initcall() is used, if these modules are built-in, this
> > > would preclude the ability to test things prior to this part of the
> > > kernel under UML or whatever architecture runs the tests. So, this
> > > limits the scope of testing. Small detail but the scope whould be
> > > documented.
> >
> > You aren't the first person to complain about this (and I am not sure
> > it is the first time you have complained about it). Anyway, I have
> > some follow on patches that will improve the late_initcall thing, and
> > people seemed okay with discussing the follow on patches as part of a
> > subsequent patchset after this gets merged.
> >
> > I will nevertheless document the restriction until then.
>
> To be clear, I am not complaining about it. I just find it simply
> critical to document its limitations, so folks don't try to invest
> time and energy on kunit right away for an early init test, if it
> cannot support it.
>
> If support for that requires some work, it may be worth mentioning
> as well.

Makes sense. And in anycase, it is something I do want to do, just not
right now. I will put a TODO here in the next revision.

> > > > +static void kunit_print_tap_version(void)
> > > > +{
> > > > +     if (!kunit_has_printed_tap_version) {
> > > > +             kunit_printk_emit(LOGLEVEL_INFO, "TAP version 14\n");
> > >
> > > What is this TAP thing? Why should we care what version it is on?
> > > Why are we printing this?
> >
> > It's part of the TAP specification[1]. Greg and Frank asked me to make
> > the intermediate format conform to TAP. Seems like something else I
> > should probable document...
>
> Yes thanks!
>
> > > > +             kunit_has_printed_tap_version = true;
> > > > +     }
> > > > +}
> > > > +
> > > > +static size_t kunit_test_cases_len(struct kunit_case *test_cases)
> > > > +{
> > > > +     struct kunit_case *test_case;
> > > > +     size_t len = 0;
> > > > +
> > > > +     for (test_case = test_cases; test_case->run_case; test_case++)
> > >
> > > If we make the last test case NULL, we'd just check for test_case here,
> > > and save ourselves an extra few bytes per test module. Any reason why
> > > the last test case cannot be NULL?
> >
> > Is there anyway to make that work with a statically defined array?
>
> No you're right.
>
> > Basically, I want to be able to do something like:
> >
> > static struct kunit_case example_test_cases[] = {
> >         KUNIT_CASE(example_simple_test),
> >         KUNIT_CASE(example_mock_test),
> >         {}
> > };
> >
> > FYI,
> > #define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
>
> >
> > In order to do what you are proposing, I think I need an array of
> > pointers to test cases, which is not ideal.
>
> Yeah, you're right. The only other alternative is to have a:
>
> struct kunit_module {
>        const char name[256];
>        int (*init)(struct kunit *test);
>        void (*exit)(struct kunit *test);
>        struct kunit_case *test_cases;
> +       unsigned int num_cases;
> };
>
> And then something like:
>
> #define KUNIT_MODULE(name, init, exit, cases) { \
>         .name = name, \
>         .init = init, \
>         .exit = exit, \
>         .test_cases = cases,
>         num_cases = ARRAY_SIZE(cases), \
> }
>
> Let's evaluate which is better: one extra test case per all test cases, or
> an extra unsigned int for each kunit module.

I am in favor of the current method since init and exit are optional
arguments. I could see myself (actually I am planning on) adding more
optional things to the kunit_module, so having optional arguments will
make my life a lot easier since I won't have to go through big
refactorings around the kernel to support new features that tie in
here.
Brendan Higgins June 26, 2019, 11 p.m. UTC | #13
On Tue, Jun 25, 2019 at 8:41 PM Stephen Boyd <sboyd@kernel.org> wrote:
>
> Quoting Brendan Higgins (2019-06-25 13:28:25)
> > On Wed, Jun 19, 2019 at 5:15 PM Stephen Boyd <sboyd@kernel.org> wrote:
> > >
> > > Quoting Brendan Higgins (2019-06-17 01:25:56)
> > > > diff --git a/kunit/test.c b/kunit/test.c
> > > > new file mode 100644
> > > > index 0000000000000..d05d254f1521f
> > > > --- /dev/null
> > > > +++ b/kunit/test.c
> > > > @@ -0,0 +1,210 @@
> > > > +// SPDX-License-Identifier: GPL-2.0
> > > > +/*
> > > > + * Base unit test (KUnit) API.
> > > > + *
> > > > + * Copyright (C) 2019, Google LLC.
> > > > + * Author: Brendan Higgins <brendanhiggins@google.com>
> > > > + */
> > > > +
> > > > +#include <linux/sched/debug.h>
> > > > +#include <kunit/test.h>
> > > > +
> > > > +static bool kunit_get_success(struct kunit *test)
> > > > +{
> > > > +       unsigned long flags;
> > > > +       bool success;
> > > > +
> > > > +       spin_lock_irqsave(&test->lock, flags);
> > > > +       success = test->success;
> > > > +       spin_unlock_irqrestore(&test->lock, flags);
> > >
> > > I still don't understand the locking scheme in this code. Is the
> > > intention to make getter and setter APIs that are "safe" by adding in a
> > > spinlock that is held around getting and setting various members in the
> > > kunit structure?
> >
> > Yes, your understanding is correct. It is possible for a user to write
> > a test such that certain elements may be updated in different threads;
> > this would most likely happen in the case where someone wants to make
> > an assertion or an expectation in a thread created by a piece of code
> > under test. Although this should generally be avoided, it is possible,
> > and there are occasionally good reasons to do so, so it is
> > functionality that we should support.
> >
> > Do you think I should add a comment to this effect?
>
> No, I think the locking should be removed.
>
> >
> > > In what situation is there more than one thread reading or writing the
> > > kunit struct? Isn't it only a single process that is going to be
> >
> > As I said above, it is possible that the code under test may spawn a
> > new thread that may make an expectation or an assertion. It is not a
> > super common use case, but it is possible.
>
> Sure, sounds super possible and OK.
>
> >
> > > operating on this structure? And why do we need to disable irqs? Are we
> > > expecting to be modifying the unit tests from irq contexts?
> >
> > There are instances where someone may want to test a driver which has
> > an interrupt handler in it. I actually have (not the greatest) example
> > here. Now in these cases, I expect someone to use a mock irqchip or
> > some other fake mechanism to trigger the interrupt handler and not
> > actual hardware; technically speaking in this case, it is not going to
> > be accessed from a "real" irq context; however, the code under test
> > should think that it is in an irq context; given that, I figured it is
> > best to just treat it as a real irq context. Does that make sense?
>
> Can you please describe the scenario in which grabbing the lock here,
> updating a single variable, and then releasing the lock right after
> does anything useful vs. not having the lock? I'm looking for a two CPU

Sure.

> scenario like below, but where it is a problem. There could be three
> CPUs, or even one CPU and three threads if you want to describe the
> extra thread scenario.
>
> Here's my scenario where it isn't needed:
>
>     CPU0                                      CPU1
>     ----                                      ----
>     kunit_run_test(&test)
>                                               test_case_func()
>                                                 ....
>                                               [mock hardirq]
>                                                 kunit_set_success(&test)
>                                               [hardirq ends]
>                                                 ...
>                                                 complete(&test_done)
>       wait_for_completion(&test_done)
>       kunit_get_success(&test)
>
> We don't need to care about having locking here because success or
> failure only happens in one place and it's synchronized with the
> completion.

Here is the scenario I am concerned about:

CPU0                      CPU1                       CPU2
----                      ----                       ----
kunit_run_test(&test)
                          test_case_func()
                            ....
                            schedule_work(foo_func)
                          [mock hardirq]             foo_func()
                            ...                        ...
                            kunit_set_success(false)   kunit_set_success(false)
                          [hardirq ends]               ...
                            ...
                            complete(&test_done)
  wait_for_completion(...)
  kunit_get_success(&test)

In my scenario, since both CPU1 and CPU2 update the success status of
the test simultaneously, even though they are setting it to the same
value. If my understanding is correct, this could result in a
write-tear on some architectures in some circumstances. I suppose we
could just make it an atomic boolean, but I figured locking is also
fine, and generally preferred.

Also, to be clear, I am onboard with dropping then IRQ stuff for now.
I am fine moving to a mutex for the time being.

>
> >
> > > > +
> > > > +       return success;
> > > > +}
> > > > +
> > > > +static void kunit_set_success(struct kunit *test, bool success)
> > > > +{
> > > > +       unsigned long flags;
> > > > +
> > > > +       spin_lock_irqsave(&test->lock, flags);
> > > > +       test->success = success;
> > > > +       spin_unlock_irqrestore(&test->lock, flags);
> > > > +}
Brendan Higgins June 27, 2019, 12:05 a.m. UTC | #14
On Wed, Jun 26, 2019 at 3:02 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
>
> On Tue, Jun 25, 2019 at 11:41:47PM -0700, Brendan Higgins wrote:
> > On Tue, Jun 25, 2019 at 4:02 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> > >
> > > On Tue, Jun 25, 2019 at 03:14:45PM -0700, Brendan Higgins wrote:
> > > > On Tue, Jun 25, 2019 at 2:44 PM Luis Chamberlain <mcgrof@kernel.org> wrote:
> > > > > Since its a new architecture and since you seem to imply most tests
> > > > > don't require locking or even IRQs disabled, I think its worth to
> > > > > consider the impact of adding such extreme locking requirements for
> > > > > an initial ramp up.
> > > >
> > > > Fair enough, I can see the point of not wanting to use irq disabled
> > > > until we get someone complaining about it, but I think making it
> > > > thread safe is reasonable. It means there is one less thing to confuse
> > > > a KUnit user and the only penalty paid is some very minor performance.
> > >
> > > One reason I'm really excited about kunit is speed... so by all means I
> > > think we're at a good point to analyze performance optimizationsm if
> > > they do make sense.
> >
> > Yeah, but I think there are much lower hanging fruit than this (as you
> > point out below). I am all for making/keeping KUnit super fast, but I
> > also don't want to waste time with premature optimizations and I think
> > having thread safe expectations and non-thread safe expectations hurts
> > usability.
> >
> > Still, I am on board with making this a mutex instead of a spinlock for now.
> >
> > > While on the topic of parallization, what about support for running
> > > different test cases in parallel? Or at the very least different kunit
> > > modules in parallel.  Few questions come up based on this prospect:
> > >
> > >   * Why not support parallelism from the start?
> >
> > Just because it is more work and there isn't much to gain from it right now.
> >
> > Some numbers:
> > I currently have a collection of 86 test cases in the branch that this
> > patchset is from.
>
> Impressive, imagine 86 tests and kunit is not even *merged yet*.

Full disclaimer, about half of them are KUnit tests for KUnit - to
make sure KUnit works, so I don't know if you consider that cheating.

> > I turned on PRINTK_TIME and looked at the first
> > KUnit output and the last. On UML, start time was 0.090000, and end
> > time was 0.090000. Looks like sched_clock is not very good on UML.
>
> Since you have a python thing that kicks tests, what if you just run
> time on it?

That's what I did on the this following paragraph (I just couldn't
time the tests by themselves in this case):

> > Still it seems quite likely that all of these tests run around 0.01
> > seconds or less on UML: I ran KUnit with only 2 test cases enabled
> > three times and got an average runtime of 1.55867 seconds with a
> > standard deviation of 0.0346747. I then ran it another three times
> > with all test cases enabled and got an average runtime of 1.535
> > seconds with a standard deviation of 0.0150997. The second average is
> > less, but that doesn't really mean anything because it is well within
> > one standard deviation with a very small sample size. Nevertheless, we
> > can conclude that the actual runtime of those 84 test cases is most
> > likely within one standard deviation, so on the order of 0.01 seconds.
> >
> > On x86 running on QEMU, first message from KUnit was printed at
> > 0.194251 and the last KUnit message was printed at 0.340915, meaning
> > that all 86 test cases ran in about 0.146664 seconds.
>
> Pretty impressive numbers. But can you blame me for expressing the
> desire to possibly being able to do better? I am not saying -- let's
> definitely have parallelism in place *now*. Just wanted to hear out
> tangibles of why we *don't* want it now.

I agree, faster is almost always better in these types of things, and
certainly is in this case.

In fairness to you, I also short sold the speed of KUnit in the cover
letter. I was too lazy to do this complete of an analysis back when I
wrote it (even if I did a complete timing like this, I would have to
put a bunch of asterisks since it wouldn't include the time to "boot"
the kernel or to build it, which vastly outstrip the speed of
individual test cases). And given the original numbers, I think
speeding things up would probably seem more urgent. So no, I really
cannot blame you.

Sorry if it came across that I was frustrated or impatient, but I am
actually glad you asked because I now have this public email where I
did the full analysis of how fast KUnit really is which I can refer to
in the future.

> And.. also, since we are reviewing now, try to target so that the code
> can later likely get a face lift to support parallelism without
> requiring much changes.

Also fair.

> > In any case, running KUnit tests in parallel is definitely something I
> > plan on adding it eventually, but it just doesn't really seem worth it
> > right now.
>
> Makes sense!
>
> > I find the incremental build time of the kernel to
> > typically be between 3 and 30 seconds, and a clean build to be between
> > 30 seconds to several minutes, depending on the number of available
> > cores, so I don't think most users would even notice the amount of
> > runtime contributed by the actual unit tests until we start getting
> > into the 1000s of test cases. I don't suspect it will become an issue
> > until we get into the 10,000s of test cases. I think we are a pretty
> > long way off from that.
>
> All makes sense, and agreed based on the numbers you are providing.
> Thanks for the details!
>
> > >   * Are you opposed to eventually having this added? For instance, there is
> > >     enough code on lib/test_kmod.c for batching tons of kthreads each
> > >     one running its own thing for testing purposes which could be used
> > >     as template.
> >
> > I am not opposed to adding it eventually at all. I actually plan on
> > doing so, just not in this patchset. There are a lot of additional
> > features, improvements, and sugar that I really want to add, so much
> > so that most of it doesn't belong in this patchset; I just think this
> > is one of those things that belongs in a follow up. I tried to boil
> > down this patchset to as small as I could while still being useful;
> > this is basically an MVP. Maybe after this patchset gets merged I
> > should post a list of things I have ready for review, or would like to
> > work on, and people can comment on what things they want to see next.
>
> Groovy.

Cool, I will do that then!

> > >   * If we eventually *did* support it:
> > >     - Would logs be skewed?
> >
> > Probably, before I went with the TAP approach, I was tagging each
> > message with the test case it came from and I could have parsed it and
> > assembled a coherent view of the logs using that; now that I am using
> > TAP conforming output, that won't work. I haven't really thought too
> > hard about how to address it, but there are ways. For the UML users, I
> > am planning on adding a feature to guarantee hermeticity between tests
> > running in different modules by adding a feature that allows a single
> > kernel to be built with all tests included, and then determine which
> > tests get run by passing in command line arguments or something. This
> > way you can get the isolation from running tests in separate
> > environments without increasing the build cost. We could also use this
> > method to achieve parallelism by dispatching multiple kernels at once.
>
> Indeed. Or... wait for it... containers... There tools for these sorts
> of things already. And I'm evaluating such prospects now for some other
> tests I care for.

Containers could definitely be useful in the long run. I have a long
term goal to build and run just parts of the kernel as I have
mentioned to you, and doing so in a totally hermetic environment could
provide a lot of value; in this case I would probably only want a
chroot, but if I want to deploy tests to run on different machines
containers could be very useful.

Actually, on the topic of containers for running tests, the presubmit
system I have set up for KUnit uses containers for deploying KUnit on
servers for testing[1]. Actually, I have some experimental patches to
make it work with LKML instead of Gerrit, but I am not sure whether it
makes more sense to go that route, with one of the many patchworks
clones that support presubmit, or something else.

> > That only works for UML, but I imagine you could do something similar
> > for users running tests under qemu.
>
> Or containers.
>
> > >     - Could we have a way to query: give me log for only kunit module
> > >       named "foo"?
> >
> > Yeah, I think that would make sense as part of the hermeticity thing I
> > mentioned above.
> >
> > Hope that seems reasonable!
>
> All groovy. Thanks for the details!

[1] https://kunit-review.googlesource.com/c/linux/+/1809/2#message-c243e1c9086d9432d2dcabc67a42a977b8a020ff
Stephen Boyd June 27, 2019, 6:16 p.m. UTC | #15
Quoting Brendan Higgins (2019-06-26 16:00:40)
> On Tue, Jun 25, 2019 at 8:41 PM Stephen Boyd <sboyd@kernel.org> wrote:
> 
> > scenario like below, but where it is a problem. There could be three
> > CPUs, or even one CPU and three threads if you want to describe the
> > extra thread scenario.
> >
> > Here's my scenario where it isn't needed:
> >
> >     CPU0                                      CPU1
> >     ----                                      ----
> >     kunit_run_test(&test)
> >                                               test_case_func()
> >                                                 ....
> >                                               [mock hardirq]
> >                                                 kunit_set_success(&test)
> >                                               [hardirq ends]
> >                                                 ...
> >                                                 complete(&test_done)
> >       wait_for_completion(&test_done)
> >       kunit_get_success(&test)
> >
> > We don't need to care about having locking here because success or
> > failure only happens in one place and it's synchronized with the
> > completion.
> 
> Here is the scenario I am concerned about:
> 
> CPU0                      CPU1                       CPU2
> ----                      ----                       ----
> kunit_run_test(&test)
>                           test_case_func()
>                             ....
>                             schedule_work(foo_func)
>                           [mock hardirq]             foo_func()
>                             ...                        ...
>                             kunit_set_success(false)   kunit_set_success(false)
>                           [hardirq ends]               ...
>                             ...
>                             complete(&test_done)
>   wait_for_completion(...)
>   kunit_get_success(&test)
> 
> In my scenario, since both CPU1 and CPU2 update the success status of
> the test simultaneously, even though they are setting it to the same
> value. If my understanding is correct, this could result in a
> write-tear on some architectures in some circumstances. I suppose we
> could just make it an atomic boolean, but I figured locking is also
> fine, and generally preferred.

This is what we have WRITE_ONCE() and READ_ONCE() for. Maybe you could
just use that in the getter and setters and remove the lock if it isn't
used for anything else.

It may also be a good idea to have a kunit_fail_test() API that fails
the test passed in with a WRITE_ONCE(false). Otherwise, the test is
assumed successful and it isn't even possible for a test to change the
state from failure to success due to a logical error because the API
isn't available. Then we don't really need to have a generic
kunit_set_success() function at all. We could have a kunit_test_failed()
function too that replaces the kunit_get_success() function. That would
read better in an if condition.

> 
> Also, to be clear, I am onboard with dropping then IRQ stuff for now.
> I am fine moving to a mutex for the time being.
> 

Ok.
Brendan Higgins June 28, 2019, 8:09 a.m. UTC | #16
On Thu, Jun 27, 2019 at 11:16 AM Stephen Boyd <sboyd@kernel.org> wrote:
>
> Quoting Brendan Higgins (2019-06-26 16:00:40)
> > On Tue, Jun 25, 2019 at 8:41 PM Stephen Boyd <sboyd@kernel.org> wrote:
> >
> > > scenario like below, but where it is a problem. There could be three
> > > CPUs, or even one CPU and three threads if you want to describe the
> > > extra thread scenario.
> > >
> > > Here's my scenario where it isn't needed:
> > >
> > >     CPU0                                      CPU1
> > >     ----                                      ----
> > >     kunit_run_test(&test)
> > >                                               test_case_func()
> > >                                                 ....
> > >                                               [mock hardirq]
> > >                                                 kunit_set_success(&test)
> > >                                               [hardirq ends]
> > >                                                 ...
> > >                                                 complete(&test_done)
> > >       wait_for_completion(&test_done)
> > >       kunit_get_success(&test)
> > >
> > > We don't need to care about having locking here because success or
> > > failure only happens in one place and it's synchronized with the
> > > completion.
> >
> > Here is the scenario I am concerned about:
> >
> > CPU0                      CPU1                       CPU2
> > ----                      ----                       ----
> > kunit_run_test(&test)
> >                           test_case_func()
> >                             ....
> >                             schedule_work(foo_func)
> >                           [mock hardirq]             foo_func()
> >                             ...                        ...
> >                             kunit_set_success(false)   kunit_set_success(false)
> >                           [hardirq ends]               ...
> >                             ...
> >                             complete(&test_done)
> >   wait_for_completion(...)
> >   kunit_get_success(&test)
> >
> > In my scenario, since both CPU1 and CPU2 update the success status of
> > the test simultaneously, even though they are setting it to the same
> > value. If my understanding is correct, this could result in a
> > write-tear on some architectures in some circumstances. I suppose we
> > could just make it an atomic boolean, but I figured locking is also
> > fine, and generally preferred.
>
> This is what we have WRITE_ONCE() and READ_ONCE() for. Maybe you could
> just use that in the getter and setters and remove the lock if it isn't
> used for anything else.
>
> It may also be a good idea to have a kunit_fail_test() API that fails
> the test passed in with a WRITE_ONCE(false). Otherwise, the test is
> assumed successful and it isn't even possible for a test to change the
> state from failure to success due to a logical error because the API
> isn't available. Then we don't really need to have a generic
> kunit_set_success() function at all. We could have a kunit_test_failed()
> function too that replaces the kunit_get_success() function. That would
> read better in an if condition.

You know what, I think you are right.

Sorry, for not realizing this earlier, I think you mentioned something
along these lines a long time ago.

Thanks for your patience!

> >
> > Also, to be clear, I am onboard with dropping then IRQ stuff for now.
> > I am fine moving to a mutex for the time being.
> >
>
> Ok.

Thanks!
diff mbox series

Patch

diff --git a/include/kunit/test.h b/include/kunit/test.h
new file mode 100644
index 0000000000000..8476b3d371cb9
--- /dev/null
+++ b/include/kunit/test.h
@@ -0,0 +1,161 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Base unit test (KUnit) API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TEST_H
+#define _KUNIT_TEST_H
+
+#include <linux/types.h>
+
+struct kunit;
+
+/**
+ * struct kunit_case - represents an individual test case.
+ * @run_case: the function representing the actual test case.
+ * @name: the name of the test case.
+ *
+ * A test case is a function with the signature, ``void (*)(struct kunit *)``
+ * that makes expectations (see KUNIT_EXPECT_TRUE()) about code under test. Each
+ * test case is associated with a &struct kunit_module and will be run after the
+ * module's init function and followed by the module's exit function.
+ *
+ * A test case should be static and should only be created with the KUNIT_CASE()
+ * macro; additionally, every array of test cases should be terminated with an
+ * empty test case.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ *	void add_test_basic(struct kunit *test)
+ *	{
+ *		KUNIT_EXPECT_EQ(test, 1, add(1, 0));
+ *		KUNIT_EXPECT_EQ(test, 2, add(1, 1));
+ *		KUNIT_EXPECT_EQ(test, 0, add(-1, 1));
+ *		KUNIT_EXPECT_EQ(test, INT_MAX, add(0, INT_MAX));
+ *		KUNIT_EXPECT_EQ(test, -1, add(INT_MAX, INT_MIN));
+ *	}
+ *
+ *	static struct kunit_case example_test_cases[] = {
+ *		KUNIT_CASE(add_test_basic),
+ *		{}
+ *	};
+ *
+ */
+struct kunit_case {
+	void (*run_case)(struct kunit *test);
+	const char *name;
+
+	/* private: internal use only. */
+	bool success;
+};
+
+/**
+ * KUNIT_CASE - A helper for creating a &struct kunit_case
+ * @test_name: a reference to a test case function.
+ *
+ * Takes a symbol for a function representing a test case and creates a
+ * &struct kunit_case object from it. See the documentation for
+ * &struct kunit_case for an example on how to use it.
+ */
+#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
+
+/**
+ * struct kunit_module - describes a related collection of &struct kunit_case s.
+ * @name: the name of the test. Purely informational.
+ * @init: called before every test case.
+ * @exit: called after every test case.
+ * @test_cases: a null terminated array of test cases.
+ *
+ * A kunit_module is a collection of related &struct kunit_case s, such that
+ * @init is called before every test case and @exit is called after every test
+ * case, similar to the notion of a *test fixture* or a *test class* in other
+ * unit testing frameworks like JUnit or Googletest.
+ *
+ * Every &struct kunit_case must be associated with a kunit_module for KUnit to
+ * run it.
+ */
+struct kunit_module {
+	const char name[256];
+	int (*init)(struct kunit *test);
+	void (*exit)(struct kunit *test);
+	struct kunit_case *test_cases;
+};
+
+/**
+ * struct kunit - represents a running instance of a test.
+ * @priv: for user to store arbitrary data. Commonly used to pass data created
+ * in the init function (see &struct kunit_module).
+ *
+ * Used to store information about the current context under which the test is
+ * running. Most of this data is private and should only be accessed indirectly
+ * via public functions; the one exception is @priv which can be used by the
+ * test writer to store arbitrary data.
+ */
+struct kunit {
+	void *priv;
+
+	/* private: internal use only. */
+	const char *name; /* Read only after initialization! */
+	spinlock_t lock; /* Gaurds all mutable test state. */
+	bool success; /* Protected by lock. */
+};
+
+void kunit_init_test(struct kunit *test, const char *name);
+
+int kunit_run_tests(struct kunit_module *module);
+
+/**
+ * module_test() - used to register a &struct kunit_module with KUnit.
+ * @module: a statically allocated &struct kunit_module.
+ *
+ * Registers @module with the test framework. See &struct kunit_module for more
+ * information.
+ */
+#define module_test(module) \
+		static int module_kunit_init##module(void) \
+		{ \
+			return kunit_run_tests(&module); \
+		} \
+		late_initcall(module_kunit_init##module)
+
+void __printf(3, 4) kunit_printk(const char *level,
+				 const struct kunit *test,
+				 const char *fmt, ...);
+
+/**
+ * kunit_info() - Prints an INFO level message associated with the current test.
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints an info level message associated with the test module being run. Takes
+ * a variable number of format parameters just like printk().
+ */
+#define kunit_info(test, fmt, ...) \
+		kunit_printk(KERN_INFO, test, fmt, ##__VA_ARGS__)
+
+/**
+ * kunit_warn() - Prints a WARN level message associated with the current test.
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints a warning level message.
+ */
+#define kunit_warn(test, fmt, ...) \
+		kunit_printk(KERN_WARNING, test, fmt, ##__VA_ARGS__)
+
+/**
+ * kunit_err() - Prints an ERROR level message associated with the current test.
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints an error level message.
+ */
+#define kunit_err(test, fmt, ...) \
+		kunit_printk(KERN_ERR, test, fmt, ##__VA_ARGS__)
+
+#endif /* _KUNIT_TEST_H */
diff --git a/kunit/Kconfig b/kunit/Kconfig
new file mode 100644
index 0000000000000..330ae83527c23
--- /dev/null
+++ b/kunit/Kconfig
@@ -0,0 +1,17 @@ 
+#
+# KUnit base configuration
+#
+
+menu "KUnit support"
+
+config KUNIT
+	bool "Enable support for unit tests (KUnit)"
+	help
+	  Enables support for kernel unit tests (KUnit), a lightweight unit
+	  testing and mocking framework for the Linux kernel. These tests are
+	  able to be run locally on a developer's workstation without a VM or
+	  special hardware when using UML. Can also be used on most other
+	  architectures. For more information, please see
+	  Documentation/dev-tools/kunit/.
+
+endmenu
diff --git a/kunit/Makefile b/kunit/Makefile
new file mode 100644
index 0000000000000..5efdc4dea2c08
--- /dev/null
+++ b/kunit/Makefile
@@ -0,0 +1 @@ 
+obj-$(CONFIG_KUNIT) +=			test.o
diff --git a/kunit/test.c b/kunit/test.c
new file mode 100644
index 0000000000000..d05d254f1521f
--- /dev/null
+++ b/kunit/test.c
@@ -0,0 +1,210 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Base unit test (KUnit) API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <linux/sched/debug.h>
+#include <kunit/test.h>
+
+static bool kunit_get_success(struct kunit *test)
+{
+	unsigned long flags;
+	bool success;
+
+	spin_lock_irqsave(&test->lock, flags);
+	success = test->success;
+	spin_unlock_irqrestore(&test->lock, flags);
+
+	return success;
+}
+
+static void kunit_set_success(struct kunit *test, bool success)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&test->lock, flags);
+	test->success = success;
+	spin_unlock_irqrestore(&test->lock, flags);
+}
+
+static int kunit_vprintk_emit(int level, const char *fmt, va_list args)
+{
+	return vprintk_emit(0, level, NULL, 0, fmt, args);
+}
+
+static int kunit_printk_emit(int level, const char *fmt, ...)
+{
+	va_list args;
+	int ret;
+
+	va_start(args, fmt);
+	ret = kunit_vprintk_emit(level, fmt, args);
+	va_end(args);
+
+	return ret;
+}
+
+static void kunit_vprintk(const struct kunit *test,
+			  const char *level,
+			  struct va_format *vaf)
+{
+	kunit_printk_emit(level[1] - '0', "\t# %s: %pV", test->name, vaf);
+}
+
+static bool kunit_has_printed_tap_version;
+
+static void kunit_print_tap_version(void)
+{
+	if (!kunit_has_printed_tap_version) {
+		kunit_printk_emit(LOGLEVEL_INFO, "TAP version 14\n");
+		kunit_has_printed_tap_version = true;
+	}
+}
+
+static size_t kunit_test_cases_len(struct kunit_case *test_cases)
+{
+	struct kunit_case *test_case;
+	size_t len = 0;
+
+	for (test_case = test_cases; test_case->run_case; test_case++)
+		len++;
+
+	return len;
+}
+
+static void kunit_print_subtest_start(struct kunit_module *module)
+{
+	kunit_print_tap_version();
+	kunit_printk_emit(LOGLEVEL_INFO, "\t# Subtest: %s\n", module->name);
+	kunit_printk_emit(LOGLEVEL_INFO,
+			  "\t1..%zd\n",
+			  kunit_test_cases_len(module->test_cases));
+}
+
+static void kunit_print_ok_not_ok(bool should_indent,
+				  bool is_ok,
+				  size_t test_number,
+				  const char *description)
+{
+	const char *indent, *ok_not_ok;
+
+	if (should_indent)
+		indent = "\t";
+	else
+		indent = "";
+
+	if (is_ok)
+		ok_not_ok = "ok";
+	else
+		ok_not_ok = "not ok";
+
+	kunit_printk_emit(LOGLEVEL_INFO,
+			  "%s%s %zd - %s\n",
+			  indent, ok_not_ok, test_number, description);
+}
+
+static bool kunit_module_has_succeeded(struct kunit_module *module)
+{
+	const struct kunit_case *test_case;
+	bool success = true;
+
+	for (test_case = module->test_cases; test_case->run_case; test_case++)
+		if (!test_case->success) {
+			success = false;
+			break;
+		}
+
+	return success;
+}
+
+static size_t kunit_module_counter = 1;
+
+static void kunit_print_subtest_end(struct kunit_module *module)
+{
+	kunit_print_ok_not_ok(false,
+			      kunit_module_has_succeeded(module),
+			      kunit_module_counter++,
+			      module->name);
+}
+
+static void kunit_print_test_case_ok_not_ok(struct kunit_case *test_case,
+					    size_t test_number)
+{
+	kunit_print_ok_not_ok(true,
+			      test_case->success,
+			      test_number,
+			      test_case->name);
+}
+
+void kunit_init_test(struct kunit *test, const char *name)
+{
+	spin_lock_init(&test->lock);
+	test->name = name;
+	test->success = true;
+}
+
+/*
+ * Performs all logic to run a test case.
+ */
+static void kunit_run_case(struct kunit_module *module,
+			   struct kunit_case *test_case)
+{
+	struct kunit test;
+	int ret = 0;
+
+	kunit_init_test(&test, test_case->name);
+
+	if (module->init) {
+		ret = module->init(&test);
+		if (ret) {
+			kunit_err(&test, "failed to initialize: %d\n", ret);
+			kunit_set_success(&test, false);
+			return;
+		}
+	}
+
+	if (!ret)
+		test_case->run_case(&test);
+
+	if (module->exit)
+		module->exit(&test);
+
+	test_case->success = kunit_get_success(&test);
+}
+
+int kunit_run_tests(struct kunit_module *module)
+{
+	struct kunit_case *test_case;
+	size_t test_case_count = 1;
+
+	kunit_print_subtest_start(module);
+
+	for (test_case = module->test_cases; test_case->run_case; test_case++) {
+		kunit_run_case(module, test_case);
+		kunit_print_test_case_ok_not_ok(test_case, test_case_count++);
+	}
+
+	kunit_print_subtest_end(module);
+
+	return 0;
+}
+
+void kunit_printk(const char *level,
+		  const struct kunit *test,
+		  const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	kunit_vprintk(test, level, &vaf);
+
+	va_end(args);
+}