diff mbox series

[SRU,B,1/1] UBUNTU: SAUCE: Revert "s390/archrandom: simplify back to earlier design and initialize earlier"

Message ID 20221027152921.448751-2-frank.heimes@canonical.com
State New
Headers show
Series Ubuntu 18.04 kernel 4.15.0-194 crashes on IPL (LP: 1994601) | expand

Commit Message

Frank Heimes Oct. 27, 2022, 3:29 p.m. UTC
BugLink: https://bugs.launchpad.net/bugs/1994601

From: Frank Heimes <frank.heimes@canonical.com>

This reverts commit 6edb63a7b6cd57825e47cf6a8600b694a19f0d90.

In LP#1994601 it's reported that 6edb63a7b6cd breaks IPL (boot) on IBM zSystems
generation z14 and newer (however, z13 is fine).
Hence reverting this patch to unbreak and re-enable IPL.
Due to slightly changed context over time, the revert needed minor adjustments.

Signed-off-by: Frank Heimes <frank.heimes@canonical.com>
---
 arch/s390/crypto/arch_random.c     | 111 ++++++++++++++++++++++++++++-
 arch/s390/include/asm/archrandom.h |  27 +++----
 arch/s390/kernel/setup.c           |   5 --
 3 files changed, 121 insertions(+), 22 deletions(-)

Comments

Luke Nowakowski-Krijger Oct. 27, 2022, 9:27 p.m. UTC | #1
I applied this patch but decided to split it up into two reverts that
revert both this patch and the "s390/archrandom: prevent CPACF trng
invocations in interrupt context" that was a part of the stable updates
this cycle. This extra patch was why you needed to do context adjustments.
I decided to just go ahead with it so we can get things moving.

The source diff is exactly the same, I just decided to split it up into two
reverts because one affects the last cycle and what we have in updates,
while the other one is in the current cycle. So when respinning I will
apply the reverts in the appropriate cycle/place for clarity.

Thanks,
- Luke

On Thu, Oct 27, 2022 at 8:30 AM <frank.heimes@canonical.com> wrote:

> BugLink: https://bugs.launchpad.net/bugs/1994601
>
> From: Frank Heimes <frank.heimes@canonical.com>
>
> This reverts commit 6edb63a7b6cd57825e47cf6a8600b694a19f0d90.
>
> In LP#1994601 it's reported that 6edb63a7b6cd breaks IPL (boot) on IBM
> zSystems
> generation z14 and newer (however, z13 is fine).
> Hence reverting this patch to unbreak and re-enable IPL.
> Due to slightly changed context over time, the revert needed minor
> adjustments.
>
> Signed-off-by: Frank Heimes <frank.heimes@canonical.com>
> ---
>  arch/s390/crypto/arch_random.c     | 111 ++++++++++++++++++++++++++++-
>  arch/s390/include/asm/archrandom.h |  27 +++----
>  arch/s390/kernel/setup.c           |   5 --
>  3 files changed, 121 insertions(+), 22 deletions(-)
>
> diff --git a/arch/s390/crypto/arch_random.c
> b/arch/s390/crypto/arch_random.c
> index 1f2d40993c4d..4cbb4b6d85a8 100644
> --- a/arch/s390/crypto/arch_random.c
> +++ b/arch/s390/crypto/arch_random.c
> @@ -2,17 +2,126 @@
>  /*
>   * s390 arch random implementation.
>   *
> - * Copyright IBM Corp. 2017, 2020
> + * Copyright IBM Corp. 2017, 2018
>   * Author(s): Harald Freudenberger
> + *
> + * The s390_arch_random_generate() function may be called from random.c
> + * in interrupt context. So this implementation does the best to be very
> + * fast. There is a buffer of random data which is asynchronously checked
> + * and filled by a workqueue thread.
> + * If there are enough bytes in the buffer the s390_arch_random_generate()
> + * just delivers these bytes. Otherwise false is returned until the
> + * worker thread refills the buffer.
> + * The worker fills the rng buffer by pulling fresh entropy from the
> + * high quality (but slow) true hardware random generator. This entropy
> + * is then spread over the buffer with an pseudo random generator PRNG.
> + * As the arch_get_random_seed_long() fetches 8 bytes and the calling
> + * function add_interrupt_randomness() counts this as 1 bit entropy the
> + * distribution needs to make sure there is in fact 1 bit entropy
> contained
> + * in 8 bytes of the buffer. The current values pull 32 byte entropy
> + * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
> + * will contain 1 bit of entropy.
> + * The worker thread is rescheduled based on the charge level of the
> + * buffer but at least with 500 ms delay to avoid too much CPU
> consumption.
> + * So the max. amount of rng data delivered via arch_get_random_seed is
> + * limited to 4k bytes per second.
>   */
>
>  #include <linux/kernel.h>
>  #include <linux/atomic.h>
>  #include <linux/random.h>
> +#include <linux/slab.h>
>  #include <linux/static_key.h>
> +#include <linux/workqueue.h>
>  #include <asm/cpacf.h>
>
>  DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
>
>  atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
>  EXPORT_SYMBOL(s390_arch_random_counter);
> +
> +#define ARCH_REFILL_TICKS (HZ/2)
> +#define ARCH_PRNG_SEED_SIZE 32
> +#define ARCH_RNG_BUF_SIZE 2048
> +
> +static DEFINE_SPINLOCK(arch_rng_lock);
> +static u8 *arch_rng_buf;
> +static unsigned int arch_rng_buf_idx;
> +
> +static void arch_rng_refill_buffer(struct work_struct *);
> +static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
> +
> +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
> +{
> +       /* max hunk is ARCH_RNG_BUF_SIZE */
> +       if (nbytes > ARCH_RNG_BUF_SIZE)
> +               return false;
> +
> +       /* lock rng buffer */
> +       if (!spin_trylock(&arch_rng_lock))
> +               return false;
> +
> +       /* try to resolve the requested amount of bytes from the buffer */
> +       arch_rng_buf_idx -= nbytes;
> +       if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
> +               memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
> +               atomic64_add(nbytes, &s390_arch_random_counter);
> +               spin_unlock(&arch_rng_lock);
> +               return true;
> +       }
> +
> +       /* not enough bytes in rng buffer, refill is done asynchronously */
> +       spin_unlock(&arch_rng_lock);
> +
> +       return false;
> +}
> +EXPORT_SYMBOL(s390_arch_random_generate);
> +
> +static void arch_rng_refill_buffer(struct work_struct *unused)
> +{
> +       unsigned int delay = ARCH_REFILL_TICKS;
> +
> +       spin_lock(&arch_rng_lock);
> +       if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
> +               /* buffer is exhausted and needs refill */
> +               u8 seed[ARCH_PRNG_SEED_SIZE];
> +               u8 prng_wa[240];
> +               /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
> +               cpacf_trng(NULL, 0, seed, sizeof(seed));
> +               /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
> +               memset(prng_wa, 0, sizeof(prng_wa));
> +               cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
> +                          &prng_wa, NULL, 0, seed, sizeof(seed));
> +               cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
> +                          &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE,
> NULL, 0);
> +               arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
> +       }
> +       delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) /
> ARCH_RNG_BUF_SIZE;
> +       spin_unlock(&arch_rng_lock);
> +
> +       /* kick next check */
> +       queue_delayed_work(system_long_wq, &arch_rng_work, delay);
> +}
> +
> +static int __init s390_arch_random_init(void)
> +{
> +       /* all the needed PRNO subfunctions available ? */
> +       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
> +           cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
> +
> +               /* alloc arch random working buffer */
> +               arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
> +               if (!arch_rng_buf)
> +                       return -ENOMEM;
> +
> +               /* kick worker queue job to fill the random buffer */
> +               queue_delayed_work(system_long_wq,
> +                                  &arch_rng_work, ARCH_REFILL_TICKS);
> +
> +               /* enable arch random to the outside world */
> +               static_branch_enable(&s390_arch_random_available);
> +       }
> +
> +       return 0;
> +}
> +arch_initcall(s390_arch_random_init);
> diff --git a/arch/s390/include/asm/archrandom.h
> b/arch/s390/include/asm/archrandom.h
> index 4120c428dc37..6ef8857f648f 100644
> --- a/arch/s390/include/asm/archrandom.h
> +++ b/arch/s390/include/asm/archrandom.h
> @@ -2,7 +2,7 @@
>  /*
>   * Kernel interface for the s390 arch_random_* functions
>   *
> - * Copyright IBM Corp. 2017, 2022
> + * Copyright IBM Corp. 2017
>   *
>   * Author: Harald Freudenberger <freude@de.ibm.com>
>   *
> @@ -16,39 +16,34 @@
>  #include <linux/static_key.h>
>  #include <linux/preempt.h>
>  #include <linux/atomic.h>
> -#include <asm/cpacf.h>
>
>  DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
>  extern atomic64_t s390_arch_random_counter;
>
> -static inline bool __must_check arch_get_random_long(unsigned long *v)
> +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
> +
> +static inline bool arch_get_random_long(unsigned long *v)
>  {
>         return false;
>  }
>
> -static inline bool __must_check arch_get_random_int(unsigned int *v)
> +static inline bool arch_get_random_int(unsigned int *v)
>  {
>         return false;
>  }
>
> -static inline bool __must_check arch_get_random_seed_long(unsigned long
> *v)
> +static inline bool arch_get_random_seed_long(unsigned long *v)
>  {
> -       if (static_branch_likely(&s390_arch_random_available) &&
> -           in_task()) {
> -               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
> -               atomic64_add(sizeof(*v), &s390_arch_random_counter);
> -               return true;
> +       if (static_branch_likely(&s390_arch_random_available)) {
> +               return s390_arch_random_generate((u8 *)v, sizeof(*v));
>         }
>         return false;
>  }
>
> -static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
> +static inline bool arch_get_random_seed_int(unsigned int *v)
>  {
> -       if (static_branch_likely(&s390_arch_random_available) &&
> -           in_task()) {
> -               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
> -               atomic64_add(sizeof(*v), &s390_arch_random_counter);
> -               return true;
> +       if (static_branch_likely(&s390_arch_random_available)) {
> +               return s390_arch_random_generate((u8 *)v, sizeof(*v));
>         }
>         return false;
>  }
> diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
> index 933300e2ad38..a59a730c3f11 100644
> --- a/arch/s390/kernel/setup.c
> +++ b/arch/s390/kernel/setup.c
> @@ -861,11 +861,6 @@ static void __init setup_randomness(void)
>         if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
>                 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) *
> vmms->count);
>         memblock_free((unsigned long) vmms, PAGE_SIZE);
> -
> -#ifdef CONFIG_ARCH_RANDOM
> -       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
> -               static_branch_enable(&s390_arch_random_available);
> -#endif
>  }
>
>  /*
> --
> 2.25.1
>
>
> --
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
>
Luke Nowakowski-Krijger Oct. 27, 2022, 10:37 p.m. UTC | #2
Follow up to this just so we are all aware,

Not going to do the second revert because obviously the first one getting
reverted means the second apply+revert doesn't make sense. Just going to
drop "s390/archrandom: prevent CPACF trng invocations in interrupt context"
from the current cycle stable updates and going to leave breadcrumbs on
stable updates LP bug that this was dropped.

- Luke

On Thu, Oct 27, 2022 at 2:27 PM Luke Nowakowski-Krijger <
luke.nowakowskikrijger@canonical.com> wrote:

> I applied this patch but decided to split it up into two reverts that
> revert both this patch and the "s390/archrandom: prevent CPACF trng
> invocations in interrupt context" that was a part of the stable updates
> this cycle. This extra patch was why you needed to do context adjustments.
> I decided to just go ahead with it so we can get things moving.
>
> The source diff is exactly the same, I just decided to split it up into
> two reverts because one affects the last cycle and what we have in updates,
> while the other one is in the current cycle. So when respinning I will
> apply the reverts in the appropriate cycle/place for clarity.
>
> Thanks,
> - Luke
>
> On Thu, Oct 27, 2022 at 8:30 AM <frank.heimes@canonical.com> wrote:
>
>> BugLink: https://bugs.launchpad.net/bugs/1994601
>>
>> From: Frank Heimes <frank.heimes@canonical.com>
>>
>> This reverts commit 6edb63a7b6cd57825e47cf6a8600b694a19f0d90.
>>
>> In LP#1994601 it's reported that 6edb63a7b6cd breaks IPL (boot) on IBM
>> zSystems
>> generation z14 and newer (however, z13 is fine).
>> Hence reverting this patch to unbreak and re-enable IPL.
>> Due to slightly changed context over time, the revert needed minor
>> adjustments.
>>
>> Signed-off-by: Frank Heimes <frank.heimes@canonical.com>
>> ---
>>  arch/s390/crypto/arch_random.c     | 111 ++++++++++++++++++++++++++++-
>>  arch/s390/include/asm/archrandom.h |  27 +++----
>>  arch/s390/kernel/setup.c           |   5 --
>>  3 files changed, 121 insertions(+), 22 deletions(-)
>>
>> diff --git a/arch/s390/crypto/arch_random.c
>> b/arch/s390/crypto/arch_random.c
>> index 1f2d40993c4d..4cbb4b6d85a8 100644
>> --- a/arch/s390/crypto/arch_random.c
>> +++ b/arch/s390/crypto/arch_random.c
>> @@ -2,17 +2,126 @@
>>  /*
>>   * s390 arch random implementation.
>>   *
>> - * Copyright IBM Corp. 2017, 2020
>> + * Copyright IBM Corp. 2017, 2018
>>   * Author(s): Harald Freudenberger
>> + *
>> + * The s390_arch_random_generate() function may be called from random.c
>> + * in interrupt context. So this implementation does the best to be very
>> + * fast. There is a buffer of random data which is asynchronously checked
>> + * and filled by a workqueue thread.
>> + * If there are enough bytes in the buffer the
>> s390_arch_random_generate()
>> + * just delivers these bytes. Otherwise false is returned until the
>> + * worker thread refills the buffer.
>> + * The worker fills the rng buffer by pulling fresh entropy from the
>> + * high quality (but slow) true hardware random generator. This entropy
>> + * is then spread over the buffer with an pseudo random generator PRNG.
>> + * As the arch_get_random_seed_long() fetches 8 bytes and the calling
>> + * function add_interrupt_randomness() counts this as 1 bit entropy the
>> + * distribution needs to make sure there is in fact 1 bit entropy
>> contained
>> + * in 8 bytes of the buffer. The current values pull 32 byte entropy
>> + * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
>> + * will contain 1 bit of entropy.
>> + * The worker thread is rescheduled based on the charge level of the
>> + * buffer but at least with 500 ms delay to avoid too much CPU
>> consumption.
>> + * So the max. amount of rng data delivered via arch_get_random_seed is
>> + * limited to 4k bytes per second.
>>   */
>>
>>  #include <linux/kernel.h>
>>  #include <linux/atomic.h>
>>  #include <linux/random.h>
>> +#include <linux/slab.h>
>>  #include <linux/static_key.h>
>> +#include <linux/workqueue.h>
>>  #include <asm/cpacf.h>
>>
>>  DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
>>
>>  atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
>>  EXPORT_SYMBOL(s390_arch_random_counter);
>> +
>> +#define ARCH_REFILL_TICKS (HZ/2)
>> +#define ARCH_PRNG_SEED_SIZE 32
>> +#define ARCH_RNG_BUF_SIZE 2048
>> +
>> +static DEFINE_SPINLOCK(arch_rng_lock);
>> +static u8 *arch_rng_buf;
>> +static unsigned int arch_rng_buf_idx;
>> +
>> +static void arch_rng_refill_buffer(struct work_struct *);
>> +static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
>> +
>> +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
>> +{
>> +       /* max hunk is ARCH_RNG_BUF_SIZE */
>> +       if (nbytes > ARCH_RNG_BUF_SIZE)
>> +               return false;
>> +
>> +       /* lock rng buffer */
>> +       if (!spin_trylock(&arch_rng_lock))
>> +               return false;
>> +
>> +       /* try to resolve the requested amount of bytes from the buffer */
>> +       arch_rng_buf_idx -= nbytes;
>> +       if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
>> +               memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
>> +               atomic64_add(nbytes, &s390_arch_random_counter);
>> +               spin_unlock(&arch_rng_lock);
>> +               return true;
>> +       }
>> +
>> +       /* not enough bytes in rng buffer, refill is done asynchronously
>> */
>> +       spin_unlock(&arch_rng_lock);
>> +
>> +       return false;
>> +}
>> +EXPORT_SYMBOL(s390_arch_random_generate);
>> +
>> +static void arch_rng_refill_buffer(struct work_struct *unused)
>> +{
>> +       unsigned int delay = ARCH_REFILL_TICKS;
>> +
>> +       spin_lock(&arch_rng_lock);
>> +       if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
>> +               /* buffer is exhausted and needs refill */
>> +               u8 seed[ARCH_PRNG_SEED_SIZE];
>> +               u8 prng_wa[240];
>> +               /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
>> +               cpacf_trng(NULL, 0, seed, sizeof(seed));
>> +               /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
>> +               memset(prng_wa, 0, sizeof(prng_wa));
>> +               cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
>> +                          &prng_wa, NULL, 0, seed, sizeof(seed));
>> +               cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
>> +                          &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE,
>> NULL, 0);
>> +               arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
>> +       }
>> +       delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) /
>> ARCH_RNG_BUF_SIZE;
>> +       spin_unlock(&arch_rng_lock);
>> +
>> +       /* kick next check */
>> +       queue_delayed_work(system_long_wq, &arch_rng_work, delay);
>> +}
>> +
>> +static int __init s390_arch_random_init(void)
>> +{
>> +       /* all the needed PRNO subfunctions available ? */
>> +       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
>> +           cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
>> +
>> +               /* alloc arch random working buffer */
>> +               arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
>> +               if (!arch_rng_buf)
>> +                       return -ENOMEM;
>> +
>> +               /* kick worker queue job to fill the random buffer */
>> +               queue_delayed_work(system_long_wq,
>> +                                  &arch_rng_work, ARCH_REFILL_TICKS);
>> +
>> +               /* enable arch random to the outside world */
>> +               static_branch_enable(&s390_arch_random_available);
>> +       }
>> +
>> +       return 0;
>> +}
>> +arch_initcall(s390_arch_random_init);
>> diff --git a/arch/s390/include/asm/archrandom.h
>> b/arch/s390/include/asm/archrandom.h
>> index 4120c428dc37..6ef8857f648f 100644
>> --- a/arch/s390/include/asm/archrandom.h
>> +++ b/arch/s390/include/asm/archrandom.h
>> @@ -2,7 +2,7 @@
>>  /*
>>   * Kernel interface for the s390 arch_random_* functions
>>   *
>> - * Copyright IBM Corp. 2017, 2022
>> + * Copyright IBM Corp. 2017
>>   *
>>   * Author: Harald Freudenberger <freude@de.ibm.com>
>>   *
>> @@ -16,39 +16,34 @@
>>  #include <linux/static_key.h>
>>  #include <linux/preempt.h>
>>  #include <linux/atomic.h>
>> -#include <asm/cpacf.h>
>>
>>  DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
>>  extern atomic64_t s390_arch_random_counter;
>>
>> -static inline bool __must_check arch_get_random_long(unsigned long *v)
>> +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
>> +
>> +static inline bool arch_get_random_long(unsigned long *v)
>>  {
>>         return false;
>>  }
>>
>> -static inline bool __must_check arch_get_random_int(unsigned int *v)
>> +static inline bool arch_get_random_int(unsigned int *v)
>>  {
>>         return false;
>>  }
>>
>> -static inline bool __must_check arch_get_random_seed_long(unsigned long
>> *v)
>> +static inline bool arch_get_random_seed_long(unsigned long *v)
>>  {
>> -       if (static_branch_likely(&s390_arch_random_available) &&
>> -           in_task()) {
>> -               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
>> -               atomic64_add(sizeof(*v), &s390_arch_random_counter);
>> -               return true;
>> +       if (static_branch_likely(&s390_arch_random_available)) {
>> +               return s390_arch_random_generate((u8 *)v, sizeof(*v));
>>         }
>>         return false;
>>  }
>>
>> -static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
>> +static inline bool arch_get_random_seed_int(unsigned int *v)
>>  {
>> -       if (static_branch_likely(&s390_arch_random_available) &&
>> -           in_task()) {
>> -               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
>> -               atomic64_add(sizeof(*v), &s390_arch_random_counter);
>> -               return true;
>> +       if (static_branch_likely(&s390_arch_random_available)) {
>> +               return s390_arch_random_generate((u8 *)v, sizeof(*v));
>>         }
>>         return false;
>>  }
>> diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
>> index 933300e2ad38..a59a730c3f11 100644
>> --- a/arch/s390/kernel/setup.c
>> +++ b/arch/s390/kernel/setup.c
>> @@ -861,11 +861,6 @@ static void __init setup_randomness(void)
>>         if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
>>                 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) *
>> vmms->count);
>>         memblock_free((unsigned long) vmms, PAGE_SIZE);
>> -
>> -#ifdef CONFIG_ARCH_RANDOM
>> -       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
>> -               static_branch_enable(&s390_arch_random_available);
>> -#endif
>>  }
>>
>>  /*
>> --
>> 2.25.1
>>
>>
>> --
>> kernel-team mailing list
>> kernel-team@lists.ubuntu.com
>> https://lists.ubuntu.com/mailman/listinfo/kernel-team
>>
>
Frank Heimes Oct. 28, 2022, 6:32 a.m. UTC | #3
Okay, understood - thank you Luke!


On Fri, Oct 28, 2022 at 12:38 AM Luke Nowakowski-Krijger <
luke.nowakowskikrijger@canonical.com> wrote:

> Follow up to this just so we are all aware,
>
> Not going to do the second revert because obviously the first one getting
> reverted means the second apply+revert doesn't make sense. Just going to
> drop "s390/archrandom: prevent CPACF trng invocations in interrupt context"
> from the current cycle stable updates and going to leave breadcrumbs on
> stable updates LP bug that this was dropped.
>
> - Luke
>
> On Thu, Oct 27, 2022 at 2:27 PM Luke Nowakowski-Krijger <
> luke.nowakowskikrijger@canonical.com> wrote:
>
>> I applied this patch but decided to split it up into two reverts that
>> revert both this patch and the "s390/archrandom: prevent CPACF trng
>> invocations in interrupt context" that was a part of the stable updates
>> this cycle. This extra patch was why you needed to do context adjustments.
>> I decided to just go ahead with it so we can get things moving.
>>
>> The source diff is exactly the same, I just decided to split it up into
>> two reverts because one affects the last cycle and what we have in updates,
>> while the other one is in the current cycle. So when respinning I will
>> apply the reverts in the appropriate cycle/place for clarity.
>>
>> Thanks,
>> - Luke
>>
>> On Thu, Oct 27, 2022 at 8:30 AM <frank.heimes@canonical.com> wrote:
>>
>>> BugLink: https://bugs.launchpad.net/bugs/1994601
>>>
>>> From: Frank Heimes <frank.heimes@canonical.com>
>>>
>>> This reverts commit 6edb63a7b6cd57825e47cf6a8600b694a19f0d90.
>>>
>>> In LP#1994601 it's reported that 6edb63a7b6cd breaks IPL (boot) on IBM
>>> zSystems
>>> generation z14 and newer (however, z13 is fine).
>>> Hence reverting this patch to unbreak and re-enable IPL.
>>> Due to slightly changed context over time, the revert needed minor
>>> adjustments.
>>>
>>> Signed-off-by: Frank Heimes <frank.heimes@canonical.com>
>>> ---
>>>  arch/s390/crypto/arch_random.c     | 111 ++++++++++++++++++++++++++++-
>>>  arch/s390/include/asm/archrandom.h |  27 +++----
>>>  arch/s390/kernel/setup.c           |   5 --
>>>  3 files changed, 121 insertions(+), 22 deletions(-)
>>>
>>> diff --git a/arch/s390/crypto/arch_random.c
>>> b/arch/s390/crypto/arch_random.c
>>> index 1f2d40993c4d..4cbb4b6d85a8 100644
>>> --- a/arch/s390/crypto/arch_random.c
>>> +++ b/arch/s390/crypto/arch_random.c
>>> @@ -2,17 +2,126 @@
>>>  /*
>>>   * s390 arch random implementation.
>>>   *
>>> - * Copyright IBM Corp. 2017, 2020
>>> + * Copyright IBM Corp. 2017, 2018
>>>   * Author(s): Harald Freudenberger
>>> + *
>>> + * The s390_arch_random_generate() function may be called from random.c
>>> + * in interrupt context. So this implementation does the best to be very
>>> + * fast. There is a buffer of random data which is asynchronously
>>> checked
>>> + * and filled by a workqueue thread.
>>> + * If there are enough bytes in the buffer the
>>> s390_arch_random_generate()
>>> + * just delivers these bytes. Otherwise false is returned until the
>>> + * worker thread refills the buffer.
>>> + * The worker fills the rng buffer by pulling fresh entropy from the
>>> + * high quality (but slow) true hardware random generator. This entropy
>>> + * is then spread over the buffer with an pseudo random generator PRNG.
>>> + * As the arch_get_random_seed_long() fetches 8 bytes and the calling
>>> + * function add_interrupt_randomness() counts this as 1 bit entropy the
>>> + * distribution needs to make sure there is in fact 1 bit entropy
>>> contained
>>> + * in 8 bytes of the buffer. The current values pull 32 byte entropy
>>> + * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
>>> + * will contain 1 bit of entropy.
>>> + * The worker thread is rescheduled based on the charge level of the
>>> + * buffer but at least with 500 ms delay to avoid too much CPU
>>> consumption.
>>> + * So the max. amount of rng data delivered via arch_get_random_seed is
>>> + * limited to 4k bytes per second.
>>>   */
>>>
>>>  #include <linux/kernel.h>
>>>  #include <linux/atomic.h>
>>>  #include <linux/random.h>
>>> +#include <linux/slab.h>
>>>  #include <linux/static_key.h>
>>> +#include <linux/workqueue.h>
>>>  #include <asm/cpacf.h>
>>>
>>>  DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
>>>
>>>  atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
>>>  EXPORT_SYMBOL(s390_arch_random_counter);
>>> +
>>> +#define ARCH_REFILL_TICKS (HZ/2)
>>> +#define ARCH_PRNG_SEED_SIZE 32
>>> +#define ARCH_RNG_BUF_SIZE 2048
>>> +
>>> +static DEFINE_SPINLOCK(arch_rng_lock);
>>> +static u8 *arch_rng_buf;
>>> +static unsigned int arch_rng_buf_idx;
>>> +
>>> +static void arch_rng_refill_buffer(struct work_struct *);
>>> +static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
>>> +
>>> +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
>>> +{
>>> +       /* max hunk is ARCH_RNG_BUF_SIZE */
>>> +       if (nbytes > ARCH_RNG_BUF_SIZE)
>>> +               return false;
>>> +
>>> +       /* lock rng buffer */
>>> +       if (!spin_trylock(&arch_rng_lock))
>>> +               return false;
>>> +
>>> +       /* try to resolve the requested amount of bytes from the buffer
>>> */
>>> +       arch_rng_buf_idx -= nbytes;
>>> +       if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
>>> +               memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
>>> +               atomic64_add(nbytes, &s390_arch_random_counter);
>>> +               spin_unlock(&arch_rng_lock);
>>> +               return true;
>>> +       }
>>> +
>>> +       /* not enough bytes in rng buffer, refill is done asynchronously
>>> */
>>> +       spin_unlock(&arch_rng_lock);
>>> +
>>> +       return false;
>>> +}
>>> +EXPORT_SYMBOL(s390_arch_random_generate);
>>> +
>>> +static void arch_rng_refill_buffer(struct work_struct *unused)
>>> +{
>>> +       unsigned int delay = ARCH_REFILL_TICKS;
>>> +
>>> +       spin_lock(&arch_rng_lock);
>>> +       if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
>>> +               /* buffer is exhausted and needs refill */
>>> +               u8 seed[ARCH_PRNG_SEED_SIZE];
>>> +               u8 prng_wa[240];
>>> +               /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
>>> +               cpacf_trng(NULL, 0, seed, sizeof(seed));
>>> +               /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
>>> +               memset(prng_wa, 0, sizeof(prng_wa));
>>> +               cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
>>> +                          &prng_wa, NULL, 0, seed, sizeof(seed));
>>> +               cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
>>> +                          &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE,
>>> NULL, 0);
>>> +               arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
>>> +       }
>>> +       delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) /
>>> ARCH_RNG_BUF_SIZE;
>>> +       spin_unlock(&arch_rng_lock);
>>> +
>>> +       /* kick next check */
>>> +       queue_delayed_work(system_long_wq, &arch_rng_work, delay);
>>> +}
>>> +
>>> +static int __init s390_arch_random_init(void)
>>> +{
>>> +       /* all the needed PRNO subfunctions available ? */
>>> +       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
>>> +           cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
>>> +
>>> +               /* alloc arch random working buffer */
>>> +               arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
>>> +               if (!arch_rng_buf)
>>> +                       return -ENOMEM;
>>> +
>>> +               /* kick worker queue job to fill the random buffer */
>>> +               queue_delayed_work(system_long_wq,
>>> +                                  &arch_rng_work, ARCH_REFILL_TICKS);
>>> +
>>> +               /* enable arch random to the outside world */
>>> +               static_branch_enable(&s390_arch_random_available);
>>> +       }
>>> +
>>> +       return 0;
>>> +}
>>> +arch_initcall(s390_arch_random_init);
>>> diff --git a/arch/s390/include/asm/archrandom.h
>>> b/arch/s390/include/asm/archrandom.h
>>> index 4120c428dc37..6ef8857f648f 100644
>>> --- a/arch/s390/include/asm/archrandom.h
>>> +++ b/arch/s390/include/asm/archrandom.h
>>> @@ -2,7 +2,7 @@
>>>  /*
>>>   * Kernel interface for the s390 arch_random_* functions
>>>   *
>>> - * Copyright IBM Corp. 2017, 2022
>>> + * Copyright IBM Corp. 2017
>>>   *
>>>   * Author: Harald Freudenberger <freude@de.ibm.com>
>>>   *
>>> @@ -16,39 +16,34 @@
>>>  #include <linux/static_key.h>
>>>  #include <linux/preempt.h>
>>>  #include <linux/atomic.h>
>>> -#include <asm/cpacf.h>
>>>
>>>  DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
>>>  extern atomic64_t s390_arch_random_counter;
>>>
>>> -static inline bool __must_check arch_get_random_long(unsigned long *v)
>>> +bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
>>> +
>>> +static inline bool arch_get_random_long(unsigned long *v)
>>>  {
>>>         return false;
>>>  }
>>>
>>> -static inline bool __must_check arch_get_random_int(unsigned int *v)
>>> +static inline bool arch_get_random_int(unsigned int *v)
>>>  {
>>>         return false;
>>>  }
>>>
>>> -static inline bool __must_check arch_get_random_seed_long(unsigned long
>>> *v)
>>> +static inline bool arch_get_random_seed_long(unsigned long *v)
>>>  {
>>> -       if (static_branch_likely(&s390_arch_random_available) &&
>>> -           in_task()) {
>>> -               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
>>> -               atomic64_add(sizeof(*v), &s390_arch_random_counter);
>>> -               return true;
>>> +       if (static_branch_likely(&s390_arch_random_available)) {
>>> +               return s390_arch_random_generate((u8 *)v, sizeof(*v));
>>>         }
>>>         return false;
>>>  }
>>>
>>> -static inline bool __must_check arch_get_random_seed_int(unsigned int
>>> *v)
>>> +static inline bool arch_get_random_seed_int(unsigned int *v)
>>>  {
>>> -       if (static_branch_likely(&s390_arch_random_available) &&
>>> -           in_task()) {
>>> -               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
>>> -               atomic64_add(sizeof(*v), &s390_arch_random_counter);
>>> -               return true;
>>> +       if (static_branch_likely(&s390_arch_random_available)) {
>>> +               return s390_arch_random_generate((u8 *)v, sizeof(*v));
>>>         }
>>>         return false;
>>>  }
>>> diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
>>> index 933300e2ad38..a59a730c3f11 100644
>>> --- a/arch/s390/kernel/setup.c
>>> +++ b/arch/s390/kernel/setup.c
>>> @@ -861,11 +861,6 @@ static void __init setup_randomness(void)
>>>         if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
>>>                 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) *
>>> vmms->count);
>>>         memblock_free((unsigned long) vmms, PAGE_SIZE);
>>> -
>>> -#ifdef CONFIG_ARCH_RANDOM
>>> -       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
>>> -               static_branch_enable(&s390_arch_random_available);
>>> -#endif
>>>  }
>>>
>>>  /*
>>> --
>>> 2.25.1
>>>
>>>
>>> --
>>> kernel-team mailing list
>>> kernel-team@lists.ubuntu.com
>>> https://lists.ubuntu.com/mailman/listinfo/kernel-team
>>>
>>
diff mbox series

Patch

diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index 1f2d40993c4d..4cbb4b6d85a8 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -2,17 +2,126 @@ 
 /*
  * s390 arch random implementation.
  *
- * Copyright IBM Corp. 2017, 2020
+ * Copyright IBM Corp. 2017, 2018
  * Author(s): Harald Freudenberger
+ *
+ * The s390_arch_random_generate() function may be called from random.c
+ * in interrupt context. So this implementation does the best to be very
+ * fast. There is a buffer of random data which is asynchronously checked
+ * and filled by a workqueue thread.
+ * If there are enough bytes in the buffer the s390_arch_random_generate()
+ * just delivers these bytes. Otherwise false is returned until the
+ * worker thread refills the buffer.
+ * The worker fills the rng buffer by pulling fresh entropy from the
+ * high quality (but slow) true hardware random generator. This entropy
+ * is then spread over the buffer with an pseudo random generator PRNG.
+ * As the arch_get_random_seed_long() fetches 8 bytes and the calling
+ * function add_interrupt_randomness() counts this as 1 bit entropy the
+ * distribution needs to make sure there is in fact 1 bit entropy contained
+ * in 8 bytes of the buffer. The current values pull 32 byte entropy
+ * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
+ * will contain 1 bit of entropy.
+ * The worker thread is rescheduled based on the charge level of the
+ * buffer but at least with 500 ms delay to avoid too much CPU consumption.
+ * So the max. amount of rng data delivered via arch_get_random_seed is
+ * limited to 4k bytes per second.
  */
 
 #include <linux/kernel.h>
 #include <linux/atomic.h>
 #include <linux/random.h>
+#include <linux/slab.h>
 #include <linux/static_key.h>
+#include <linux/workqueue.h>
 #include <asm/cpacf.h>
 
 DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
 
 atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
 EXPORT_SYMBOL(s390_arch_random_counter);
+
+#define ARCH_REFILL_TICKS (HZ/2)
+#define ARCH_PRNG_SEED_SIZE 32
+#define ARCH_RNG_BUF_SIZE 2048
+
+static DEFINE_SPINLOCK(arch_rng_lock);
+static u8 *arch_rng_buf;
+static unsigned int arch_rng_buf_idx;
+
+static void arch_rng_refill_buffer(struct work_struct *);
+static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
+
+bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
+{
+	/* max hunk is ARCH_RNG_BUF_SIZE */
+	if (nbytes > ARCH_RNG_BUF_SIZE)
+		return false;
+
+	/* lock rng buffer */
+	if (!spin_trylock(&arch_rng_lock))
+		return false;
+
+	/* try to resolve the requested amount of bytes from the buffer */
+	arch_rng_buf_idx -= nbytes;
+	if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
+		memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
+		atomic64_add(nbytes, &s390_arch_random_counter);
+		spin_unlock(&arch_rng_lock);
+		return true;
+	}
+
+	/* not enough bytes in rng buffer, refill is done asynchronously */
+	spin_unlock(&arch_rng_lock);
+
+	return false;
+}
+EXPORT_SYMBOL(s390_arch_random_generate);
+
+static void arch_rng_refill_buffer(struct work_struct *unused)
+{
+	unsigned int delay = ARCH_REFILL_TICKS;
+
+	spin_lock(&arch_rng_lock);
+	if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
+		/* buffer is exhausted and needs refill */
+		u8 seed[ARCH_PRNG_SEED_SIZE];
+		u8 prng_wa[240];
+		/* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
+		cpacf_trng(NULL, 0, seed, sizeof(seed));
+		/* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
+		memset(prng_wa, 0, sizeof(prng_wa));
+		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
+			   &prng_wa, NULL, 0, seed, sizeof(seed));
+		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
+			   &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
+		arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
+	}
+	delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
+	spin_unlock(&arch_rng_lock);
+
+	/* kick next check */
+	queue_delayed_work(system_long_wq, &arch_rng_work, delay);
+}
+
+static int __init s390_arch_random_init(void)
+{
+	/* all the needed PRNO subfunctions available ? */
+	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
+	    cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
+
+		/* alloc arch random working buffer */
+		arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
+		if (!arch_rng_buf)
+			return -ENOMEM;
+
+		/* kick worker queue job to fill the random buffer */
+		queue_delayed_work(system_long_wq,
+				   &arch_rng_work, ARCH_REFILL_TICKS);
+
+		/* enable arch random to the outside world */
+		static_branch_enable(&s390_arch_random_available);
+	}
+
+	return 0;
+}
+arch_initcall(s390_arch_random_init);
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index 4120c428dc37..6ef8857f648f 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -2,7 +2,7 @@ 
 /*
  * Kernel interface for the s390 arch_random_* functions
  *
- * Copyright IBM Corp. 2017, 2022
+ * Copyright IBM Corp. 2017
  *
  * Author: Harald Freudenberger <freude@de.ibm.com>
  *
@@ -16,39 +16,34 @@ 
 #include <linux/static_key.h>
 #include <linux/preempt.h>
 #include <linux/atomic.h>
-#include <asm/cpacf.h>
 
 DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
 extern atomic64_t s390_arch_random_counter;
 
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
+
+static inline bool arch_get_random_long(unsigned long *v)
 {
 	return false;
 }
 
-static inline bool __must_check arch_get_random_int(unsigned int *v)
+static inline bool arch_get_random_int(unsigned int *v)
 {
 	return false;
 }
 
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+static inline bool arch_get_random_seed_long(unsigned long *v)
 {
-	if (static_branch_likely(&s390_arch_random_available) &&
-	    in_task()) {
-		cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
-		atomic64_add(sizeof(*v), &s390_arch_random_counter);
-		return true;
+	if (static_branch_likely(&s390_arch_random_available)) {
+		return s390_arch_random_generate((u8 *)v, sizeof(*v));
 	}
 	return false;
 }
 
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+static inline bool arch_get_random_seed_int(unsigned int *v)
 {
-	if (static_branch_likely(&s390_arch_random_available) &&
-	    in_task()) {
-		cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
-		atomic64_add(sizeof(*v), &s390_arch_random_counter);
-		return true;
+	if (static_branch_likely(&s390_arch_random_available)) {
+		return s390_arch_random_generate((u8 *)v, sizeof(*v));
 	}
 	return false;
 }
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 933300e2ad38..a59a730c3f11 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -861,11 +861,6 @@  static void __init setup_randomness(void)
 	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
 		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
 	memblock_free((unsigned long) vmms, PAGE_SIZE);
-
-#ifdef CONFIG_ARCH_RANDOM
-	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
-		static_branch_enable(&s390_arch_random_available);
-#endif
 }
 
 /*