diff mbox series

[SRU,Xenial,1/1] crypto: vmx - Fix sleep-in-atomic bugs

Message ID 346d3b7f39c986b7e010a938c72df9ddd31dcdda.1536170853.git.joseph.salisbury@canonical.com
State New
Headers show
Series crypto: vmx - Fix sleep-in-atomic bugs | expand

Commit Message

Joseph Salisbury Oct. 12, 2018, 6:23 p.m. UTC
From: Ondrej Mosnacek <omosnace@redhat.com>

BugLink: https://bugs.launchpad.net/bugs/1790832

This patch fixes sleep-in-atomic bugs in AES-CBC and AES-XTS VMX
implementations. The problem is that the blkcipher_* functions should
not be called in atomic context.

The bugs can be reproduced via the AF_ALG interface by trying to
encrypt/decrypt sufficiently large buffers (at least 64 KiB) using the
VMX implementations of 'cbc(aes)' or 'xts(aes)'. Such operations then
trigger BUG in crypto_yield():

[  891.863680] BUG: sleeping function called from invalid context at include/crypto/algapi.h:424
[  891.864622] in_atomic(): 1, irqs_disabled(): 0, pid: 12347, name: kcapi-enc
[  891.864739] 1 lock held by kcapi-enc/12347:
[  891.864811]  #0: 00000000f5d42c46 (sk_lock-AF_ALG){+.+.}, at: skcipher_recvmsg+0x50/0x530
[  891.865076] CPU: 5 PID: 12347 Comm: kcapi-enc Not tainted 4.19.0-0.rc0.git3.1.fc30.ppc64le #1
[  891.865251] Call Trace:
[  891.865340] [c0000003387578c0] [c000000000d67ea4] dump_stack+0xe8/0x164 (unreliable)
[  891.865511] [c000000338757910] [c000000000172a58] ___might_sleep+0x2f8/0x310
[  891.865679] [c000000338757990] [c0000000006bff74] blkcipher_walk_done+0x374/0x4a0
[  891.865825] [c0000003387579e0] [d000000007e73e70] p8_aes_cbc_encrypt+0x1c8/0x260 [vmx_crypto]
[  891.865993] [c000000338757ad0] [c0000000006c0ee0] skcipher_encrypt_blkcipher+0x60/0x80
[  891.866128] [c000000338757b10] [c0000000006ec504] skcipher_recvmsg+0x424/0x530
[  891.866283] [c000000338757bd0] [c000000000b00654] sock_recvmsg+0x74/0xa0
[  891.866403] [c000000338757c10] [c000000000b00f64] ___sys_recvmsg+0xf4/0x2f0
[  891.866515] [c000000338757d90] [c000000000b02bb8] __sys_recvmsg+0x68/0xe0
[  891.866631] [c000000338757e30] [c00000000000bbe4] system_call+0x5c/0x70

Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module")
Fixes: c07f5d3da643 ("crypto: vmx - Adding support for XTS")
Cc: stable@vger.kernel.org
Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
(cherry picked from commit 0522236d4f9c5ab2e79889cb020d1acbe5da416e)
Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com>
---
 drivers/crypto/vmx/aes_cbc.c | 30 ++++++++++++++----------------
 drivers/crypto/vmx/aes_xts.c | 21 ++++++++++++++-------
 2 files changed, 28 insertions(+), 23 deletions(-)

Comments

Kleber Sacilotto de Souza Oct. 15, 2018, 10:32 a.m. UTC | #1
On 10/12/18 20:23, Joseph Salisbury wrote:
> From: Ondrej Mosnacek <omosnace@redhat.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1790832
> 
> This patch fixes sleep-in-atomic bugs in AES-CBC and AES-XTS VMX
> implementations. The problem is that the blkcipher_* functions should
> not be called in atomic context.
> 
> The bugs can be reproduced via the AF_ALG interface by trying to
> encrypt/decrypt sufficiently large buffers (at least 64 KiB) using the
> VMX implementations of 'cbc(aes)' or 'xts(aes)'. Such operations then
> trigger BUG in crypto_yield():
> 
> [  891.863680] BUG: sleeping function called from invalid context at include/crypto/algapi.h:424
> [  891.864622] in_atomic(): 1, irqs_disabled(): 0, pid: 12347, name: kcapi-enc
> [  891.864739] 1 lock held by kcapi-enc/12347:
> [  891.864811]  #0: 00000000f5d42c46 (sk_lock-AF_ALG){+.+.}, at: skcipher_recvmsg+0x50/0x530
> [  891.865076] CPU: 5 PID: 12347 Comm: kcapi-enc Not tainted 4.19.0-0.rc0.git3.1.fc30.ppc64le #1
> [  891.865251] Call Trace:
> [  891.865340] [c0000003387578c0] [c000000000d67ea4] dump_stack+0xe8/0x164 (unreliable)
> [  891.865511] [c000000338757910] [c000000000172a58] ___might_sleep+0x2f8/0x310
> [  891.865679] [c000000338757990] [c0000000006bff74] blkcipher_walk_done+0x374/0x4a0
> [  891.865825] [c0000003387579e0] [d000000007e73e70] p8_aes_cbc_encrypt+0x1c8/0x260 [vmx_crypto]
> [  891.865993] [c000000338757ad0] [c0000000006c0ee0] skcipher_encrypt_blkcipher+0x60/0x80
> [  891.866128] [c000000338757b10] [c0000000006ec504] skcipher_recvmsg+0x424/0x530
> [  891.866283] [c000000338757bd0] [c000000000b00654] sock_recvmsg+0x74/0xa0
> [  891.866403] [c000000338757c10] [c000000000b00f64] ___sys_recvmsg+0xf4/0x2f0
> [  891.866515] [c000000338757d90] [c000000000b02bb8] __sys_recvmsg+0x68/0xe0
> [  891.866631] [c000000338757e30] [c00000000000bbe4] system_call+0x5c/0x70
> 
> Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module")
> Fixes: c07f5d3da643 ("crypto: vmx - Adding support for XTS")
> Cc: stable@vger.kernel.org
> Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
> (cherry picked from commit 0522236d4f9c5ab2e79889cb020d1acbe5da416e)

As Khaled mention on the other thread, tt looks like the Xenial version
of the patch needed some context adjustment, so in that case the above
should be:

(backported from commit ...)

If that's the case, it can be fixed when applying the patch.

Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>

> Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com>
> ---
>  drivers/crypto/vmx/aes_cbc.c | 30 ++++++++++++++----------------
>  drivers/crypto/vmx/aes_xts.c | 21 ++++++++++++++-------
>  2 files changed, 28 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
> index 4613170..92e1163 100644
> --- a/drivers/crypto/vmx/aes_cbc.c
> +++ b/drivers/crypto/vmx/aes_cbc.c
> @@ -111,24 +111,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
>  		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
>  					       nbytes);
>  	} else {
> -		preempt_disable();
> -		pagefault_disable();
> -		enable_kernel_vsx();
> -
>  		blkcipher_walk_init(&walk, dst, src, nbytes);
>  		ret = blkcipher_walk_virt(desc, &walk);
>  		while ((nbytes = walk.nbytes)) {
> +			preempt_disable();
> +			pagefault_disable();
> +			enable_kernel_vsx();
>  			aes_p8_cbc_encrypt(walk.src.virt.addr,
>  					   walk.dst.virt.addr,
>  					   nbytes & AES_BLOCK_MASK,
>  					   &ctx->enc_key, walk.iv, 1);
> +			disable_kernel_vsx();
> +			pagefault_enable();
> +			preempt_enable();
> +
>  			nbytes &= AES_BLOCK_SIZE - 1;
>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>  		}
> -
> -		disable_kernel_vsx();
> -		pagefault_enable();
> -		preempt_enable();
>  	}
>  
>  	return ret;
> @@ -152,24 +151,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
>  		ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
>  					       nbytes);
>  	} else {
> -		preempt_disable();
> -		pagefault_disable();
> -		enable_kernel_vsx();
> -
>  		blkcipher_walk_init(&walk, dst, src, nbytes);
>  		ret = blkcipher_walk_virt(desc, &walk);
>  		while ((nbytes = walk.nbytes)) {
> +			preempt_disable();
> +			pagefault_disable();
> +			enable_kernel_vsx();
>  			aes_p8_cbc_encrypt(walk.src.virt.addr,
>  					   walk.dst.virt.addr,
>  					   nbytes & AES_BLOCK_MASK,
>  					   &ctx->dec_key, walk.iv, 0);
> +			disable_kernel_vsx();
> +			pagefault_enable();
> +			preempt_enable();
> +
>  			nbytes &= AES_BLOCK_SIZE - 1;
>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>  		}
> -
> -		disable_kernel_vsx();
> -		pagefault_enable();
> -		preempt_enable();
>  	}
>  
>  	return ret;
> diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
> index 24353ec3..52e7ae0 100644
> --- a/drivers/crypto/vmx/aes_xts.c
> +++ b/drivers/crypto/vmx/aes_xts.c
> @@ -123,32 +123,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
>  		ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
>                              crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
>  	} else {
> +		blkcipher_walk_init(&walk, dst, src, nbytes);
> +
> +		ret = blkcipher_walk_virt(desc, &walk);
> +
>  		preempt_disable();
>  		pagefault_disable();
>  		enable_kernel_vsx();
>  
> -		blkcipher_walk_init(&walk, dst, src, nbytes);
> -
> -		ret = blkcipher_walk_virt(desc, &walk);
>  		iv = walk.iv;
>  		memset(tweak, 0, AES_BLOCK_SIZE);
>  		aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
>  
> +		disable_kernel_vsx();
> +		pagefault_enable();
> +		preempt_enable();
> +
>  		while ((nbytes = walk.nbytes)) {
> +			preempt_disable();
> +			pagefault_disable();
> +			enable_kernel_vsx();
>  			if (enc)
>  				aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
>  						nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
>  			else
>  				aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
>  						nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
> +			disable_kernel_vsx();
> +			pagefault_enable();
> +			preempt_enable();
>  
>  			nbytes &= AES_BLOCK_SIZE - 1;
>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>  		}
> -
> -		disable_kernel_vsx();
> -		pagefault_enable();
> -		preempt_enable();
>  	}
>  	return ret;
>  }
>
Joseph Salisbury Oct. 15, 2018, 2:10 p.m. UTC | #2
On 10/15/2018 06:32 AM, Kleber Souza wrote:
> On 10/12/18 20:23, Joseph Salisbury wrote:
>> From: Ondrej Mosnacek <omosnace@redhat.com>
>>
>> BugLink: https://bugs.launchpad.net/bugs/1790832
>>
>> This patch fixes sleep-in-atomic bugs in AES-CBC and AES-XTS VMX
>> implementations. The problem is that the blkcipher_* functions should
>> not be called in atomic context.
>>
>> The bugs can be reproduced via the AF_ALG interface by trying to
>> encrypt/decrypt sufficiently large buffers (at least 64 KiB) using the
>> VMX implementations of 'cbc(aes)' or 'xts(aes)'. Such operations then
>> trigger BUG in crypto_yield():
>>
>> [  891.863680] BUG: sleeping function called from invalid context at include/crypto/algapi.h:424
>> [  891.864622] in_atomic(): 1, irqs_disabled(): 0, pid: 12347, name: kcapi-enc
>> [  891.864739] 1 lock held by kcapi-enc/12347:
>> [  891.864811]  #0: 00000000f5d42c46 (sk_lock-AF_ALG){+.+.}, at: skcipher_recvmsg+0x50/0x530
>> [  891.865076] CPU: 5 PID: 12347 Comm: kcapi-enc Not tainted 4.19.0-0.rc0.git3.1.fc30.ppc64le #1
>> [  891.865251] Call Trace:
>> [  891.865340] [c0000003387578c0] [c000000000d67ea4] dump_stack+0xe8/0x164 (unreliable)
>> [  891.865511] [c000000338757910] [c000000000172a58] ___might_sleep+0x2f8/0x310
>> [  891.865679] [c000000338757990] [c0000000006bff74] blkcipher_walk_done+0x374/0x4a0
>> [  891.865825] [c0000003387579e0] [d000000007e73e70] p8_aes_cbc_encrypt+0x1c8/0x260 [vmx_crypto]
>> [  891.865993] [c000000338757ad0] [c0000000006c0ee0] skcipher_encrypt_blkcipher+0x60/0x80
>> [  891.866128] [c000000338757b10] [c0000000006ec504] skcipher_recvmsg+0x424/0x530
>> [  891.866283] [c000000338757bd0] [c000000000b00654] sock_recvmsg+0x74/0xa0
>> [  891.866403] [c000000338757c10] [c000000000b00f64] ___sys_recvmsg+0xf4/0x2f0
>> [  891.866515] [c000000338757d90] [c000000000b02bb8] __sys_recvmsg+0x68/0xe0
>> [  891.866631] [c000000338757e30] [c00000000000bbe4] system_call+0x5c/0x70
>>
>> Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module")
>> Fixes: c07f5d3da643 ("crypto: vmx - Adding support for XTS")
>> Cc: stable@vger.kernel.org
>> Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
>> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
>> (cherry picked from commit 0522236d4f9c5ab2e79889cb020d1acbe5da416e)
> As Khaled mention on the other thread, tt looks like the Xenial version
> of the patch needed some context adjustment, so in that case the above
> should be:
>
> (backported from commit ...)
>
> If that's the case, it can be fixed when applying the patch.
>
> Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
I also replied to the other thread.  The reason it says cherry picked
from is because I didn't do any backporting myself, the 'git
cherry-pick' applied the patch differently for Xenial and Bionic but did
it without any changes by me.  This caused the format-patch output to be
slightly different for each kernel.

>
>> Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com>
>> ---
>>  drivers/crypto/vmx/aes_cbc.c | 30 ++++++++++++++----------------
>>  drivers/crypto/vmx/aes_xts.c | 21 ++++++++++++++-------
>>  2 files changed, 28 insertions(+), 23 deletions(-)
>>
>> diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
>> index 4613170..92e1163 100644
>> --- a/drivers/crypto/vmx/aes_cbc.c
>> +++ b/drivers/crypto/vmx/aes_cbc.c
>> @@ -111,24 +111,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
>>  		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
>>  					       nbytes);
>>  	} else {
>> -		preempt_disable();
>> -		pagefault_disable();
>> -		enable_kernel_vsx();
>> -
>>  		blkcipher_walk_init(&walk, dst, src, nbytes);
>>  		ret = blkcipher_walk_virt(desc, &walk);
>>  		while ((nbytes = walk.nbytes)) {
>> +			preempt_disable();
>> +			pagefault_disable();
>> +			enable_kernel_vsx();
>>  			aes_p8_cbc_encrypt(walk.src.virt.addr,
>>  					   walk.dst.virt.addr,
>>  					   nbytes & AES_BLOCK_MASK,
>>  					   &ctx->enc_key, walk.iv, 1);
>> +			disable_kernel_vsx();
>> +			pagefault_enable();
>> +			preempt_enable();
>> +
>>  			nbytes &= AES_BLOCK_SIZE - 1;
>>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>>  		}
>> -
>> -		disable_kernel_vsx();
>> -		pagefault_enable();
>> -		preempt_enable();
>>  	}
>>  
>>  	return ret;
>> @@ -152,24 +151,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
>>  		ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
>>  					       nbytes);
>>  	} else {
>> -		preempt_disable();
>> -		pagefault_disable();
>> -		enable_kernel_vsx();
>> -
>>  		blkcipher_walk_init(&walk, dst, src, nbytes);
>>  		ret = blkcipher_walk_virt(desc, &walk);
>>  		while ((nbytes = walk.nbytes)) {
>> +			preempt_disable();
>> +			pagefault_disable();
>> +			enable_kernel_vsx();
>>  			aes_p8_cbc_encrypt(walk.src.virt.addr,
>>  					   walk.dst.virt.addr,
>>  					   nbytes & AES_BLOCK_MASK,
>>  					   &ctx->dec_key, walk.iv, 0);
>> +			disable_kernel_vsx();
>> +			pagefault_enable();
>> +			preempt_enable();
>> +
>>  			nbytes &= AES_BLOCK_SIZE - 1;
>>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>>  		}
>> -
>> -		disable_kernel_vsx();
>> -		pagefault_enable();
>> -		preempt_enable();
>>  	}
>>  
>>  	return ret;
>> diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
>> index 24353ec3..52e7ae0 100644
>> --- a/drivers/crypto/vmx/aes_xts.c
>> +++ b/drivers/crypto/vmx/aes_xts.c
>> @@ -123,32 +123,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
>>  		ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
>>                              crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
>>  	} else {
>> +		blkcipher_walk_init(&walk, dst, src, nbytes);
>> +
>> +		ret = blkcipher_walk_virt(desc, &walk);
>> +
>>  		preempt_disable();
>>  		pagefault_disable();
>>  		enable_kernel_vsx();
>>  
>> -		blkcipher_walk_init(&walk, dst, src, nbytes);
>> -
>> -		ret = blkcipher_walk_virt(desc, &walk);
>>  		iv = walk.iv;
>>  		memset(tweak, 0, AES_BLOCK_SIZE);
>>  		aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
>>  
>> +		disable_kernel_vsx();
>> +		pagefault_enable();
>> +		preempt_enable();
>> +
>>  		while ((nbytes = walk.nbytes)) {
>> +			preempt_disable();
>> +			pagefault_disable();
>> +			enable_kernel_vsx();
>>  			if (enc)
>>  				aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
>>  						nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
>>  			else
>>  				aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
>>  						nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
>> +			disable_kernel_vsx();
>> +			pagefault_enable();
>> +			preempt_enable();
>>  
>>  			nbytes &= AES_BLOCK_SIZE - 1;
>>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>>  		}
>> -
>> -		disable_kernel_vsx();
>> -		pagefault_enable();
>> -		preempt_enable();
>>  	}
>>  	return ret;
>>  }
>>
Stefan Bader Oct. 16, 2018, 6:03 a.m. UTC | #3
On 15.10.18 16:10, Joseph Salisbury wrote:
> On 10/15/2018 06:32 AM, Kleber Souza wrote:
>> On 10/12/18 20:23, Joseph Salisbury wrote:
>>> From: Ondrej Mosnacek <omosnace@redhat.com>
>>>
>>> BugLink: https://bugs.launchpad.net/bugs/1790832
>>>
>>> This patch fixes sleep-in-atomic bugs in AES-CBC and AES-XTS VMX
>>> implementations. The problem is that the blkcipher_* functions should
>>> not be called in atomic context.
>>>
>>> The bugs can be reproduced via the AF_ALG interface by trying to
>>> encrypt/decrypt sufficiently large buffers (at least 64 KiB) using the
>>> VMX implementations of 'cbc(aes)' or 'xts(aes)'. Such operations then
>>> trigger BUG in crypto_yield():
>>>
>>> [  891.863680] BUG: sleeping function called from invalid context at include/crypto/algapi.h:424
>>> [  891.864622] in_atomic(): 1, irqs_disabled(): 0, pid: 12347, name: kcapi-enc
>>> [  891.864739] 1 lock held by kcapi-enc/12347:
>>> [  891.864811]  #0: 00000000f5d42c46 (sk_lock-AF_ALG){+.+.}, at: skcipher_recvmsg+0x50/0x530
>>> [  891.865076] CPU: 5 PID: 12347 Comm: kcapi-enc Not tainted 4.19.0-0.rc0.git3.1.fc30.ppc64le #1
>>> [  891.865251] Call Trace:
>>> [  891.865340] [c0000003387578c0] [c000000000d67ea4] dump_stack+0xe8/0x164 (unreliable)
>>> [  891.865511] [c000000338757910] [c000000000172a58] ___might_sleep+0x2f8/0x310
>>> [  891.865679] [c000000338757990] [c0000000006bff74] blkcipher_walk_done+0x374/0x4a0
>>> [  891.865825] [c0000003387579e0] [d000000007e73e70] p8_aes_cbc_encrypt+0x1c8/0x260 [vmx_crypto]
>>> [  891.865993] [c000000338757ad0] [c0000000006c0ee0] skcipher_encrypt_blkcipher+0x60/0x80
>>> [  891.866128] [c000000338757b10] [c0000000006ec504] skcipher_recvmsg+0x424/0x530
>>> [  891.866283] [c000000338757bd0] [c000000000b00654] sock_recvmsg+0x74/0xa0
>>> [  891.866403] [c000000338757c10] [c000000000b00f64] ___sys_recvmsg+0xf4/0x2f0
>>> [  891.866515] [c000000338757d90] [c000000000b02bb8] __sys_recvmsg+0x68/0xe0
>>> [  891.866631] [c000000338757e30] [c00000000000bbe4] system_call+0x5c/0x70
>>>
>>> Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module")
>>> Fixes: c07f5d3da643 ("crypto: vmx - Adding support for XTS")
>>> Cc: stable@vger.kernel.org
>>> Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
>>> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
>>> (cherry picked from commit 0522236d4f9c5ab2e79889cb020d1acbe5da416e)
>> As Khaled mention on the other thread, tt looks like the Xenial version
>> of the patch needed some context adjustment, so in that case the above
>> should be:
>>
>> (backported from commit ...)
>>
>> If that's the case, it can be fixed when applying the patch.
>>
>> Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
> I also replied to the other thread.  The reason it says cherry picked
> from is because I didn't do any backporting myself, the 'git
> cherry-pick' applied the patch differently for Xenial and Bionic but did
> it without any changes by me.  This caused the format-patch output to be
> slightly different for each kernel.

This is probably one of the reasons I rather not rely on the cherry-pick command
itself when it comes to getting things from upstream backwards. Rather I use
format-patch and am and work backwards from latest to oldest release. As soon as
that fails with something worse than "fuzz 1" (which needs checking, too,
whether things went into the right place), I would adjust the cherry pick to
backported and if there are still older releases, I would format-patch the
backport version.
This causes less magic things to happen. Alternatively I would do one submission
(for x and B in this case) and state the fact that this only works by actually
using cherry-pick in the introduction.

-Stefan

> 
>>
>>> Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com>
>>> ---
>>>  drivers/crypto/vmx/aes_cbc.c | 30 ++++++++++++++----------------
>>>  drivers/crypto/vmx/aes_xts.c | 21 ++++++++++++++-------
>>>  2 files changed, 28 insertions(+), 23 deletions(-)
>>>
>>> diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
>>> index 4613170..92e1163 100644
>>> --- a/drivers/crypto/vmx/aes_cbc.c
>>> +++ b/drivers/crypto/vmx/aes_cbc.c
>>> @@ -111,24 +111,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
>>>  		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
>>>  					       nbytes);
>>>  	} else {
>>> -		preempt_disable();
>>> -		pagefault_disable();
>>> -		enable_kernel_vsx();
>>> -
>>>  		blkcipher_walk_init(&walk, dst, src, nbytes);
>>>  		ret = blkcipher_walk_virt(desc, &walk);
>>>  		while ((nbytes = walk.nbytes)) {
>>> +			preempt_disable();
>>> +			pagefault_disable();
>>> +			enable_kernel_vsx();
>>>  			aes_p8_cbc_encrypt(walk.src.virt.addr,
>>>  					   walk.dst.virt.addr,
>>>  					   nbytes & AES_BLOCK_MASK,
>>>  					   &ctx->enc_key, walk.iv, 1);
>>> +			disable_kernel_vsx();
>>> +			pagefault_enable();
>>> +			preempt_enable();
>>> +
>>>  			nbytes &= AES_BLOCK_SIZE - 1;
>>>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>>>  		}
>>> -
>>> -		disable_kernel_vsx();
>>> -		pagefault_enable();
>>> -		preempt_enable();
>>>  	}
>>>  
>>>  	return ret;
>>> @@ -152,24 +151,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
>>>  		ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
>>>  					       nbytes);
>>>  	} else {
>>> -		preempt_disable();
>>> -		pagefault_disable();
>>> -		enable_kernel_vsx();
>>> -
>>>  		blkcipher_walk_init(&walk, dst, src, nbytes);
>>>  		ret = blkcipher_walk_virt(desc, &walk);
>>>  		while ((nbytes = walk.nbytes)) {
>>> +			preempt_disable();
>>> +			pagefault_disable();
>>> +			enable_kernel_vsx();
>>>  			aes_p8_cbc_encrypt(walk.src.virt.addr,
>>>  					   walk.dst.virt.addr,
>>>  					   nbytes & AES_BLOCK_MASK,
>>>  					   &ctx->dec_key, walk.iv, 0);
>>> +			disable_kernel_vsx();
>>> +			pagefault_enable();
>>> +			preempt_enable();
>>> +
>>>  			nbytes &= AES_BLOCK_SIZE - 1;
>>>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>>>  		}
>>> -
>>> -		disable_kernel_vsx();
>>> -		pagefault_enable();
>>> -		preempt_enable();
>>>  	}
>>>  
>>>  	return ret;
>>> diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
>>> index 24353ec3..52e7ae0 100644
>>> --- a/drivers/crypto/vmx/aes_xts.c
>>> +++ b/drivers/crypto/vmx/aes_xts.c
>>> @@ -123,32 +123,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
>>>  		ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
>>>                              crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
>>>  	} else {
>>> +		blkcipher_walk_init(&walk, dst, src, nbytes);
>>> +
>>> +		ret = blkcipher_walk_virt(desc, &walk);
>>> +
>>>  		preempt_disable();
>>>  		pagefault_disable();
>>>  		enable_kernel_vsx();
>>>  
>>> -		blkcipher_walk_init(&walk, dst, src, nbytes);
>>> -
>>> -		ret = blkcipher_walk_virt(desc, &walk);
>>>  		iv = walk.iv;
>>>  		memset(tweak, 0, AES_BLOCK_SIZE);
>>>  		aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
>>>  
>>> +		disable_kernel_vsx();
>>> +		pagefault_enable();
>>> +		preempt_enable();
>>> +
>>>  		while ((nbytes = walk.nbytes)) {
>>> +			preempt_disable();
>>> +			pagefault_disable();
>>> +			enable_kernel_vsx();
>>>  			if (enc)
>>>  				aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
>>>  						nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
>>>  			else
>>>  				aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
>>>  						nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
>>> +			disable_kernel_vsx();
>>> +			pagefault_enable();
>>> +			preempt_enable();
>>>  
>>>  			nbytes &= AES_BLOCK_SIZE - 1;
>>>  			ret = blkcipher_walk_done(desc, &walk, nbytes);
>>>  		}
>>> -
>>> -		disable_kernel_vsx();
>>> -		pagefault_enable();
>>> -		preempt_enable();
>>>  	}
>>>  	return ret;
>>>  }
>>>
> 
>
diff mbox series

Patch

diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 4613170..92e1163 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -111,24 +111,23 @@  static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
 		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
 					       nbytes);
 	} else {
-		preempt_disable();
-		pagefault_disable();
-		enable_kernel_vsx();
-
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		ret = blkcipher_walk_virt(desc, &walk);
 		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
 			aes_p8_cbc_encrypt(walk.src.virt.addr,
 					   walk.dst.virt.addr,
 					   nbytes & AES_BLOCK_MASK,
 					   &ctx->enc_key, walk.iv, 1);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
 			nbytes &= AES_BLOCK_SIZE - 1;
 			ret = blkcipher_walk_done(desc, &walk, nbytes);
 		}
-
-		disable_kernel_vsx();
-		pagefault_enable();
-		preempt_enable();
 	}
 
 	return ret;
@@ -152,24 +151,23 @@  static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
 		ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
 					       nbytes);
 	} else {
-		preempt_disable();
-		pagefault_disable();
-		enable_kernel_vsx();
-
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		ret = blkcipher_walk_virt(desc, &walk);
 		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
 			aes_p8_cbc_encrypt(walk.src.virt.addr,
 					   walk.dst.virt.addr,
 					   nbytes & AES_BLOCK_MASK,
 					   &ctx->dec_key, walk.iv, 0);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
 			nbytes &= AES_BLOCK_SIZE - 1;
 			ret = blkcipher_walk_done(desc, &walk, nbytes);
 		}
-
-		disable_kernel_vsx();
-		pagefault_enable();
-		preempt_enable();
 	}
 
 	return ret;
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 24353ec3..52e7ae0 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -123,32 +123,39 @@  static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
 		ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
                             crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
 	} else {
+		blkcipher_walk_init(&walk, dst, src, nbytes);
+
+		ret = blkcipher_walk_virt(desc, &walk);
+
 		preempt_disable();
 		pagefault_disable();
 		enable_kernel_vsx();
 
-		blkcipher_walk_init(&walk, dst, src, nbytes);
-
-		ret = blkcipher_walk_virt(desc, &walk);
 		iv = walk.iv;
 		memset(tweak, 0, AES_BLOCK_SIZE);
 		aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
 
+		disable_kernel_vsx();
+		pagefault_enable();
+		preempt_enable();
+
 		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
 			if (enc)
 				aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 						nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
 			else
 				aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
 						nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
 
 			nbytes &= AES_BLOCK_SIZE - 1;
 			ret = blkcipher_walk_done(desc, &walk, nbytes);
 		}
-
-		disable_kernel_vsx();
-		pagefault_enable();
-		preempt_enable();
 	}
 	return ret;
 }