diff mbox

[RFC,Part1,v3,03/17] x86/mm: Secure Encrypted Virtualization (SEV) support

Message ID 20170724190757.11278-4-brijesh.singh@amd.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Brijesh Singh July 24, 2017, 7:07 p.m. UTC
From: Tom Lendacky <thomas.lendacky@amd.com>

Provide support for Secure Encyrpted Virtualization (SEV). This initial
support defines a flag that is used by the kernel to determine if it is
running with SEV active.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h | 2 ++
 arch/x86/mm/mem_encrypt.c          | 3 +++
 include/linux/mem_encrypt.h        | 8 +++++++-
 3 files changed, 12 insertions(+), 1 deletion(-)

Comments

Borislav Petkov July 26, 2017, 4:28 a.m. UTC | #1
On Mon, Jul 24, 2017 at 02:07:43PM -0500, Brijesh Singh wrote:
> From: Tom Lendacky <thomas.lendacky@amd.com>
> 
> Provide support for Secure Encyrpted Virtualization (SEV). This initial

Your subject misses a verb and patch subjects should have an active verb
denoting what the patch does. The sentence above is a good example.

> support defines a flag that is used by the kernel to determine if it is
> running with SEV active.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> ---
>  arch/x86/include/asm/mem_encrypt.h | 2 ++
>  arch/x86/mm/mem_encrypt.c          | 3 +++
>  include/linux/mem_encrypt.h        | 8 +++++++-
>  3 files changed, 12 insertions(+), 1 deletion(-)

...

> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 0fbd092..1e4643e 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -40,6 +40,9 @@ static char sme_cmdline_off[] __initdata = "off";
>  unsigned long sme_me_mask __section(.data) = 0;
>  EXPORT_SYMBOL_GPL(sme_me_mask);
>  
> +unsigned int sev_enabled __section(.data) = 0;
> +EXPORT_SYMBOL_GPL(sev_enabled);

So sev_enabled is a pure bool used only in bool context, not like
sme_me_mask whose value is read too. Which means, you can make the
former static and query it only through accessor functions.

>  /* Buffer used for early in-place encryption by BSP, no locking needed */
>  static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
>  
> diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
> index 1255f09..ea0831a 100644
> --- a/include/linux/mem_encrypt.h
> +++ b/include/linux/mem_encrypt.h
> @@ -22,12 +22,18 @@
>  #else	/* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
>  
>  #define sme_me_mask	0UL
> +#define sev_enabled	0
>  
>  #endif	/* CONFIG_ARCH_HAS_MEM_ENCRYPT */
>  
>  static inline bool sme_active(void)
>  {
> -	return !!sme_me_mask;
> +	return (sme_me_mask && !sev_enabled);

You don't need the brackets. Below too.

> +}
> +
> +static inline bool sev_active(void)
> +{
> +	return (sme_me_mask && sev_enabled);
>  }

So this is confusing, TBH. SME and SEV are not mutually exclusive and
yet the logic here says so. Why?

I mean, in the hypervisor context, sme_active() is still true.

/me is confused.
Tom Lendacky July 26, 2017, 4:47 p.m. UTC | #2
On 7/25/2017 11:28 PM, Borislav Petkov wrote:
> On Mon, Jul 24, 2017 at 02:07:43PM -0500, Brijesh Singh wrote:
>> From: Tom Lendacky <thomas.lendacky@amd.com>
>>
>> Provide support for Secure Encyrpted Virtualization (SEV). This initial
> 
> Your subject misses a verb and patch subjects should have an active verb
> denoting what the patch does. The sentence above is a good example.

Yup, will update.

> 
>> support defines a flag that is used by the kernel to determine if it is
>> running with SEV active.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>> ---
>>   arch/x86/include/asm/mem_encrypt.h | 2 ++
>>   arch/x86/mm/mem_encrypt.c          | 3 +++
>>   include/linux/mem_encrypt.h        | 8 +++++++-
>>   3 files changed, 12 insertions(+), 1 deletion(-)
> 
> ...
> 
>> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
>> index 0fbd092..1e4643e 100644
>> --- a/arch/x86/mm/mem_encrypt.c
>> +++ b/arch/x86/mm/mem_encrypt.c
>> @@ -40,6 +40,9 @@ static char sme_cmdline_off[] __initdata = "off";
>>   unsigned long sme_me_mask __section(.data) = 0;
>>   EXPORT_SYMBOL_GPL(sme_me_mask);
>>   
>> +unsigned int sev_enabled __section(.data) = 0;
>> +EXPORT_SYMBOL_GPL(sev_enabled);
> 
> So sev_enabled is a pure bool used only in bool context, not like
> sme_me_mask whose value is read too. Which means, you can make the
> former static and query it only through accessor functions.

If it's made static then the sme_active()/sev_active() inline functions
would need to be turned into functions within the mem_encrypt.c file. So
there's a trade-off to do that, which is the better one?

> 
>>   /* Buffer used for early in-place encryption by BSP, no locking needed */
>>   static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
>>   
>> diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
>> index 1255f09..ea0831a 100644
>> --- a/include/linux/mem_encrypt.h
>> +++ b/include/linux/mem_encrypt.h
>> @@ -22,12 +22,18 @@
>>   #else	/* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
>>   
>>   #define sme_me_mask	0UL
>> +#define sev_enabled	0
>>   
>>   #endif	/* CONFIG_ARCH_HAS_MEM_ENCRYPT */
>>   
>>   static inline bool sme_active(void)
>>   {
>> -	return !!sme_me_mask;
>> +	return (sme_me_mask && !sev_enabled);
> 
> You don't need the brackets. Below too.

Ok.

> 
>> +}
>> +
>> +static inline bool sev_active(void)
>> +{
>> +	return (sme_me_mask && sev_enabled);
>>   }
> 
> So this is confusing, TBH. SME and SEV are not mutually exclusive and
> yet the logic here says so. Why?
> 
> I mean, in the hypervisor context, sme_active() is still true.
> 
> /me is confused.

The kernel needs to distinguish between running under SME and running
under SEV. SME and SEV are similar but not the same. The trampoline code
is a good example.  Before paging is activated, SME will access all
memory as decrypted, but SEV will access all memory as encrypted.  So
when APs are being brought up under SME the trampoline area cannot be
encrypted, whereas under SEV the trampoline area must be encrypted.

Thanks,
Tom

>
Borislav Petkov July 27, 2017, 1:39 p.m. UTC | #3
On Wed, Jul 26, 2017 at 11:47:32AM -0500, Tom Lendacky wrote:
> If it's made static then the sme_active()/sev_active() inline functions
> would need to be turned into functions within the mem_encrypt.c file. So
> there's a trade-off to do that, which is the better one?

Simple: why do we have functions if the variables are exported?

The reasoning for sme_me_mask is more or less obvious but for sev_enabled...

IOW, either make the bool static and unlinine the function - this way
you're free to change how you determine whether SEV is enabled later as
callers will be using the function.

Or, if it doesn't really matter because you can always change callers
later, simply drop sev_active() the function and use a bool sev_active
everywhere.

> The kernel needs to distinguish between running under SME and running
> under SEV. SME and SEV are similar but not the same. The trampoline code
> is a good example.  Before paging is activated, SME will access all
> memory as decrypted, but SEV will access all memory as encrypted.  So
> when APs are being brought up under SME the trampoline area cannot be
> encrypted, whereas under SEV the trampoline area must be encrypted.

I guess you're sensing by now that we need this clarification in a
comment above it...

:-)
diff mbox

Patch

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 8e618fc..9274ec7 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -22,6 +22,7 @@ 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
 extern unsigned long sme_me_mask;
+extern unsigned int sev_enabled;
 
 void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
 			 unsigned long decrypted_kernel_vaddr,
@@ -50,6 +51,7 @@  void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
 #else	/* !CONFIG_AMD_MEM_ENCRYPT */
 
 #define sme_me_mask	0UL
+#define sev_enabled	0
 
 static inline void __init sme_early_encrypt(resource_size_t paddr,
 					    unsigned long size) { }
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 0fbd092..1e4643e 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -40,6 +40,9 @@  static char sme_cmdline_off[] __initdata = "off";
 unsigned long sme_me_mask __section(.data) = 0;
 EXPORT_SYMBOL_GPL(sme_me_mask);
 
+unsigned int sev_enabled __section(.data) = 0;
+EXPORT_SYMBOL_GPL(sev_enabled);
+
 /* Buffer used for early in-place encryption by BSP, no locking needed */
 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
 
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09..ea0831a 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -22,12 +22,18 @@ 
 #else	/* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
 
 #define sme_me_mask	0UL
+#define sev_enabled	0
 
 #endif	/* CONFIG_ARCH_HAS_MEM_ENCRYPT */
 
 static inline bool sme_active(void)
 {
-	return !!sme_me_mask;
+	return (sme_me_mask && !sev_enabled);
+}
+
+static inline bool sev_active(void)
+{
+	return (sme_me_mask && sev_enabled);
 }
 
 static inline unsigned long sme_get_me_mask(void)