diff mbox

[RFC,v4,28/28] x86: Add support to make use of Secure Memory Encryption

Message ID 20170216154825.19244.32545.stgit@tlendack-t1.amdoffice.net (mailing list archive)
State New, archived
Headers show

Commit Message

Tom Lendacky Feb. 16, 2017, 3:48 p.m. UTC
This patch adds the support to check if SME has been enabled and if
memory encryption should be activated (checking of command line option
based on the configuration of the default state).  If memory encryption
is to be activated, then the encryption mask is set and the kernel is
encrypted "in place."

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/kernel/head_64.S          |    1 +
 arch/x86/kernel/mem_encrypt_init.c |   71 +++++++++++++++++++++++++++++++++++-
 arch/x86/mm/mem_encrypt.c          |    2 +
 3 files changed, 73 insertions(+), 1 deletion(-)

Comments

Borislav Petkov March 1, 2017, 6:40 p.m. UTC | #1
On Thu, Feb 16, 2017 at 09:48:25AM -0600, Tom Lendacky wrote:
> This patch adds the support to check if SME has been enabled and if
> memory encryption should be activated (checking of command line option
> based on the configuration of the default state).  If memory encryption
> is to be activated, then the encryption mask is set and the kernel is
> encrypted "in place."
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
>  arch/x86/kernel/head_64.S          |    1 +
>  arch/x86/kernel/mem_encrypt_init.c |   71 +++++++++++++++++++++++++++++++++++-
>  arch/x86/mm/mem_encrypt.c          |    2 +
>  3 files changed, 73 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index edd2f14..e6820e7 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -97,6 +97,7 @@ startup_64:
>  	 * Save the returned mask in %r12 for later use.
>  	 */
>  	push	%rsi
> +	movq	%rsi, %rdi
>  	call	sme_enable
>  	pop	%rsi
>  	movq	%rax, %r12
> diff --git a/arch/x86/kernel/mem_encrypt_init.c b/arch/x86/kernel/mem_encrypt_init.c
> index 07cbb90..35c5e3d 100644
> --- a/arch/x86/kernel/mem_encrypt_init.c
> +++ b/arch/x86/kernel/mem_encrypt_init.c
> @@ -19,6 +19,12 @@
>  #include <linux/mm.h>
>  
>  #include <asm/sections.h>
> +#include <asm/processor-flags.h>
> +#include <asm/msr.h>
> +#include <asm/cmdline.h>
> +
> +static char sme_cmdline_arg_on[] __initdata = "mem_encrypt=on";
> +static char sme_cmdline_arg_off[] __initdata = "mem_encrypt=off";
>  
>  extern void sme_encrypt_execute(unsigned long, unsigned long, unsigned long,
>  				void *, pgd_t *);
> @@ -217,8 +223,71 @@ unsigned long __init sme_get_me_mask(void)
>  	return sme_me_mask;
>  }
>  
> -unsigned long __init sme_enable(void)
> +unsigned long __init sme_enable(void *boot_data)

unsigned long __init sme_enable(struct boot_params *bp)

works too.

And then you need to correct the function signature in the
!CONFIG_AMD_MEM_ENCRYPT case, at the end of this file, too:

unsigned long __init sme_enable(struct boot_params *bp)		{ return 0; }

>  {
> +	struct boot_params *bp = boot_data;
> +	unsigned int eax, ebx, ecx, edx;
> +	unsigned long cmdline_ptr;
> +	bool enable_if_found;
> +	void *cmdline_arg;
> +	u64 msr;
> +
> +	/* Check for an AMD processor */
> +	eax = 0;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	if ((ebx != 0x68747541) || (edx != 0x69746e65) || (ecx != 0x444d4163))
> +		goto out;
> +
> +	/* Check for the SME support leaf */
> +	eax = 0x80000000;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	if (eax < 0x8000001f)
> +		goto out;
> +
> +	/*
> +	 * Check for the SME feature:
> +	 *   CPUID Fn8000_001F[EAX] - Bit 0
> +	 *     Secure Memory Encryption support
> +	 *   CPUID Fn8000_001F[EBX] - Bits 5:0
> +	 *     Pagetable bit position used to indicate encryption
> +	 */
> +	eax = 0x8000001f;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	if (!(eax & 1))
> +		goto out;
> +
> +	/* Check if SME is enabled */
> +	msr = native_read_msr(MSR_K8_SYSCFG);

This native_read_msr() wankery is adding this check:

	if (msr_tracepoint_active(__tracepoint_read_msr))

and here it is clearly too early for tracepoints. Please use __rdmsr()
which is purely doing the MSR operation. (... and exception handling for
when the RDMSR itself raises an exception but we're very early here too
so the MSR better be there, otherwise we'll blow up).

> +	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
> +		goto out;
> +
> +	/*
> +	 * Fixups have not been to applied phys_base yet, so we must obtain

		...    not been applied to phys_base yet ...

> +	 * the address to the SME command line option in the following way.
> +	 */
> +	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) {
> +		asm ("lea sme_cmdline_arg_off(%%rip), %0"
> +		     : "=r" (cmdline_arg)
> +		     : "p" (sme_cmdline_arg_off));
> +		enable_if_found = false;
> +	} else {
> +		asm ("lea sme_cmdline_arg_on(%%rip), %0"
> +		     : "=r" (cmdline_arg)
> +		     : "p" (sme_cmdline_arg_on));
> +		enable_if_found = true;
> +	}
> +
> +	cmdline_ptr = bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32);
> +
> +	if (cmdline_find_option_bool((char *)cmdline_ptr, cmdline_arg))
> +		sme_me_mask = enable_if_found ? 1UL << (ebx & 0x3f) : 0;
> +	else
> +		sme_me_mask = enable_if_found ? 0 : 1UL << (ebx & 0x3f);

I have a better idea: you can copy __cmdline_find_option() +
cmdline_find_option() to arch/x86/lib/cmdline.c in a pre-patch. Then,
pass in a buffer and check for "on" and "off". This way you don't
have to misuse the _bool() variant for something which is actually
"option=argument".

Thanks.
Tom Lendacky March 7, 2017, 4:05 p.m. UTC | #2
On 3/1/2017 12:40 PM, Borislav Petkov wrote:
> On Thu, Feb 16, 2017 at 09:48:25AM -0600, Tom Lendacky wrote:
>> This patch adds the support to check if SME has been enabled and if
>> memory encryption should be activated (checking of command line option
>> based on the configuration of the default state).  If memory encryption
>> is to be activated, then the encryption mask is set and the kernel is
>> encrypted "in place."
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/kernel/head_64.S          |    1 +
>>  arch/x86/kernel/mem_encrypt_init.c |   71 +++++++++++++++++++++++++++++++++++-
>>  arch/x86/mm/mem_encrypt.c          |    2 +
>>  3 files changed, 73 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
>> index edd2f14..e6820e7 100644
>> --- a/arch/x86/kernel/head_64.S
>> +++ b/arch/x86/kernel/head_64.S
>> @@ -97,6 +97,7 @@ startup_64:
>>  	 * Save the returned mask in %r12 for later use.
>>  	 */
>>  	push	%rsi
>> +	movq	%rsi, %rdi
>>  	call	sme_enable
>>  	pop	%rsi
>>  	movq	%rax, %r12
>> diff --git a/arch/x86/kernel/mem_encrypt_init.c b/arch/x86/kernel/mem_encrypt_init.c
>> index 07cbb90..35c5e3d 100644
>> --- a/arch/x86/kernel/mem_encrypt_init.c
>> +++ b/arch/x86/kernel/mem_encrypt_init.c
>> @@ -19,6 +19,12 @@
>>  #include <linux/mm.h>
>>
>>  #include <asm/sections.h>
>> +#include <asm/processor-flags.h>
>> +#include <asm/msr.h>
>> +#include <asm/cmdline.h>
>> +
>> +static char sme_cmdline_arg_on[] __initdata = "mem_encrypt=on";
>> +static char sme_cmdline_arg_off[] __initdata = "mem_encrypt=off";
>>
>>  extern void sme_encrypt_execute(unsigned long, unsigned long, unsigned long,
>>  				void *, pgd_t *);
>> @@ -217,8 +223,71 @@ unsigned long __init sme_get_me_mask(void)
>>  	return sme_me_mask;
>>  }
>>
>> -unsigned long __init sme_enable(void)
>> +unsigned long __init sme_enable(void *boot_data)
>
> unsigned long __init sme_enable(struct boot_params *bp)
>
> works too.

Ok, will do.

>
> And then you need to correct the function signature in the
> !CONFIG_AMD_MEM_ENCRYPT case, at the end of this file, too:
>
> unsigned long __init sme_enable(struct boot_params *bp)		{ return 0; }

Yup, missed that.  I'll make it match.

>
>>  {
>> +	struct boot_params *bp = boot_data;
>> +	unsigned int eax, ebx, ecx, edx;
>> +	unsigned long cmdline_ptr;
>> +	bool enable_if_found;
>> +	void *cmdline_arg;
>> +	u64 msr;
>> +
>> +	/* Check for an AMD processor */
>> +	eax = 0;
>> +	ecx = 0;
>> +	native_cpuid(&eax, &ebx, &ecx, &edx);
>> +	if ((ebx != 0x68747541) || (edx != 0x69746e65) || (ecx != 0x444d4163))
>> +		goto out;
>> +
>> +	/* Check for the SME support leaf */
>> +	eax = 0x80000000;
>> +	ecx = 0;
>> +	native_cpuid(&eax, &ebx, &ecx, &edx);
>> +	if (eax < 0x8000001f)
>> +		goto out;
>> +
>> +	/*
>> +	 * Check for the SME feature:
>> +	 *   CPUID Fn8000_001F[EAX] - Bit 0
>> +	 *     Secure Memory Encryption support
>> +	 *   CPUID Fn8000_001F[EBX] - Bits 5:0
>> +	 *     Pagetable bit position used to indicate encryption
>> +	 */
>> +	eax = 0x8000001f;
>> +	ecx = 0;
>> +	native_cpuid(&eax, &ebx, &ecx, &edx);
>> +	if (!(eax & 1))
>> +		goto out;
>> +
>> +	/* Check if SME is enabled */
>> +	msr = native_read_msr(MSR_K8_SYSCFG);
>
> This native_read_msr() wankery is adding this check:
>
> 	if (msr_tracepoint_active(__tracepoint_read_msr))
>
> and here it is clearly too early for tracepoints. Please use __rdmsr()
> which is purely doing the MSR operation. (... and exception handling for

Ah, good catch.  I'll switch to __rdmsr().

> when the RDMSR itself raises an exception but we're very early here too
> so the MSR better be there, otherwise we'll blow up).

Yes, it will be there if SME support is indicated in the CPUID result.

>
>> +	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
>> +		goto out;
>> +
>> +	/*
>> +	 * Fixups have not been to applied phys_base yet, so we must obtain
>
> 		...    not been applied to phys_base yet ...

Yup.

>
>> +	 * the address to the SME command line option in the following way.
>> +	 */
>> +	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) {
>> +		asm ("lea sme_cmdline_arg_off(%%rip), %0"
>> +		     : "=r" (cmdline_arg)
>> +		     : "p" (sme_cmdline_arg_off));
>> +		enable_if_found = false;
>> +	} else {
>> +		asm ("lea sme_cmdline_arg_on(%%rip), %0"
>> +		     : "=r" (cmdline_arg)
>> +		     : "p" (sme_cmdline_arg_on));
>> +		enable_if_found = true;
>> +	}
>> +
>> +	cmdline_ptr = bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32);
>> +
>> +	if (cmdline_find_option_bool((char *)cmdline_ptr, cmdline_arg))
>> +		sme_me_mask = enable_if_found ? 1UL << (ebx & 0x3f) : 0;
>> +	else
>> +		sme_me_mask = enable_if_found ? 0 : 1UL << (ebx & 0x3f);
>
> I have a better idea: you can copy __cmdline_find_option() +
> cmdline_find_option() to arch/x86/lib/cmdline.c in a pre-patch. Then,
> pass in a buffer and check for "on" and "off". This way you don't
> have to misuse the _bool() variant for something which is actually
> "option=argument".

I can do that.  Because phys_base hasn't been updated yet, I'll have to
create "on" and "off" constants and get their address in a similar way
to the command line option so that I can do the strncmp properly.

Thanks,
Tom

>
> Thanks.
>
Borislav Petkov March 7, 2017, 5:42 p.m. UTC | #3
On Tue, Mar 07, 2017 at 10:05:00AM -0600, Tom Lendacky wrote:
> I can do that.  Because phys_base hasn't been updated yet, I'll have to
> create "on" and "off" constants and get their address in a similar way
> to the command line option so that I can do the strncmp properly.

Actually, wouldn't it be simpler to inspect the passed in buffer for
containing the chars 'o', 'n' - in that order, or 'o', 'f', 'f' - in
that order too? Because __cmdline_find_option() does copy the option
characters into the buffer.

Then you wouldn't need those "on" and "off" constants...
Borislav Petkov March 8, 2017, 3:05 p.m. UTC | #4
On Tue, Mar 07, 2017 at 10:05:00AM -0600, Tom Lendacky wrote:
> > And then you need to correct the function signature in the
> > !CONFIG_AMD_MEM_ENCRYPT case, at the end of this file, too:
> > 
> > unsigned long __init sme_enable(struct boot_params *bp)		{ return 0; }
> 
> Yup, missed that.  I'll make it match.

Or, you can do this:

unsigned long __init sme_enable(void *boot_data)
{
#ifdef CONFIG_AMD_MEM_ENCRYPT
        struct boot_params *bp = boot_data;
        unsigned int eax, ebx, ecx, edx;
        unsigned long cmdline_ptr;

	...

out:
#endif /* CONFIG_AMD_MEM_ENCRYPT */
        return sme_me_mask;
}

and never worry for function headers going out of whack.
diff mbox

Patch

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index edd2f14..e6820e7 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -97,6 +97,7 @@  startup_64:
 	 * Save the returned mask in %r12 for later use.
 	 */
 	push	%rsi
+	movq	%rsi, %rdi
 	call	sme_enable
 	pop	%rsi
 	movq	%rax, %r12
diff --git a/arch/x86/kernel/mem_encrypt_init.c b/arch/x86/kernel/mem_encrypt_init.c
index 07cbb90..35c5e3d 100644
--- a/arch/x86/kernel/mem_encrypt_init.c
+++ b/arch/x86/kernel/mem_encrypt_init.c
@@ -19,6 +19,12 @@ 
 #include <linux/mm.h>
 
 #include <asm/sections.h>
+#include <asm/processor-flags.h>
+#include <asm/msr.h>
+#include <asm/cmdline.h>
+
+static char sme_cmdline_arg_on[] __initdata = "mem_encrypt=on";
+static char sme_cmdline_arg_off[] __initdata = "mem_encrypt=off";
 
 extern void sme_encrypt_execute(unsigned long, unsigned long, unsigned long,
 				void *, pgd_t *);
@@ -217,8 +223,71 @@  unsigned long __init sme_get_me_mask(void)
 	return sme_me_mask;
 }
 
-unsigned long __init sme_enable(void)
+unsigned long __init sme_enable(void *boot_data)
 {
+	struct boot_params *bp = boot_data;
+	unsigned int eax, ebx, ecx, edx;
+	unsigned long cmdline_ptr;
+	bool enable_if_found;
+	void *cmdline_arg;
+	u64 msr;
+
+	/* Check for an AMD processor */
+	eax = 0;
+	ecx = 0;
+	native_cpuid(&eax, &ebx, &ecx, &edx);
+	if ((ebx != 0x68747541) || (edx != 0x69746e65) || (ecx != 0x444d4163))
+		goto out;
+
+	/* Check for the SME support leaf */
+	eax = 0x80000000;
+	ecx = 0;
+	native_cpuid(&eax, &ebx, &ecx, &edx);
+	if (eax < 0x8000001f)
+		goto out;
+
+	/*
+	 * Check for the SME feature:
+	 *   CPUID Fn8000_001F[EAX] - Bit 0
+	 *     Secure Memory Encryption support
+	 *   CPUID Fn8000_001F[EBX] - Bits 5:0
+	 *     Pagetable bit position used to indicate encryption
+	 */
+	eax = 0x8000001f;
+	ecx = 0;
+	native_cpuid(&eax, &ebx, &ecx, &edx);
+	if (!(eax & 1))
+		goto out;
+
+	/* Check if SME is enabled */
+	msr = native_read_msr(MSR_K8_SYSCFG);
+	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+		goto out;
+
+	/*
+	 * Fixups have not been to applied phys_base yet, so we must obtain
+	 * the address to the SME command line option in the following way.
+	 */
+	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) {
+		asm ("lea sme_cmdline_arg_off(%%rip), %0"
+		     : "=r" (cmdline_arg)
+		     : "p" (sme_cmdline_arg_off));
+		enable_if_found = false;
+	} else {
+		asm ("lea sme_cmdline_arg_on(%%rip), %0"
+		     : "=r" (cmdline_arg)
+		     : "p" (sme_cmdline_arg_on));
+		enable_if_found = true;
+	}
+
+	cmdline_ptr = bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32);
+
+	if (cmdline_find_option_bool((char *)cmdline_ptr, cmdline_arg))
+		sme_me_mask = enable_if_found ? 1UL << (ebx & 0x3f) : 0;
+	else
+		sme_me_mask = enable_if_found ? 0 : 1UL << (ebx & 0x3f);
+
+out:
 	return sme_me_mask;
 }
 
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index a46bcf4..c5062e1 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -204,6 +204,8 @@  void __init mem_encrypt_init(void)
 
 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
 	swiotlb_update_mem_attributes();
+
+	pr_info("AMD Secure Memory Encryption (SME) active\n");
 }
 
 void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)