diff mbox

[RFC,v4,06/28] x86: Add support to enable SME during early boot processing

Message ID 20170216154319.19244.7863.stgit@tlendack-t1.amdoffice.net (mailing list archive)
State New, archived
Headers show

Commit Message

Tom Lendacky Feb. 16, 2017, 3:43 p.m. UTC
This patch adds support to the early boot code to use Secure Memory
Encryption (SME).  Support is added to update the early pagetables with
the memory encryption mask and to encrypt the kernel in place.

The routines to set the encryption mask and perform the encryption are
stub routines for now with full function to be added in a later patch.

A new file, arch/x86/kernel/mem_encrypt_init.c, is introduced to avoid
adding #ifdefs within arch/x86/kernel/head_64.S and allow
arch/x86/mm/mem_encrypt.c to be removed from the build if SME is not
configured. The mem_encrypt_init.c file will contain the necessary #ifdefs
to allow head_64.S to successfully build and call the SME routines.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/kernel/Makefile           |    2 +
 arch/x86/kernel/head_64.S          |   46 ++++++++++++++++++++++++++++++++-
 arch/x86/kernel/mem_encrypt_init.c |   50 ++++++++++++++++++++++++++++++++++++
 3 files changed, 96 insertions(+), 2 deletions(-)
 create mode 100644 arch/x86/kernel/mem_encrypt_init.c

Comments

Borislav Petkov Feb. 20, 2017, 12:51 p.m. UTC | #1
On Thu, Feb 16, 2017 at 09:43:19AM -0600, Tom Lendacky wrote:
> This patch adds support to the early boot code to use Secure Memory
> Encryption (SME).  Support is added to update the early pagetables with
> the memory encryption mask and to encrypt the kernel in place.
> 
> The routines to set the encryption mask and perform the encryption are
> stub routines for now with full function to be added in a later patch.

s/full function/functionality/

> A new file, arch/x86/kernel/mem_encrypt_init.c, is introduced to avoid
> adding #ifdefs within arch/x86/kernel/head_64.S and allow
> arch/x86/mm/mem_encrypt.c to be removed from the build if SME is not
> configured. The mem_encrypt_init.c file will contain the necessary #ifdefs
> to allow head_64.S to successfully build and call the SME routines.

That paragraph is superfluous.

> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
>  arch/x86/kernel/Makefile           |    2 +
>  arch/x86/kernel/head_64.S          |   46 ++++++++++++++++++++++++++++++++-
>  arch/x86/kernel/mem_encrypt_init.c |   50 ++++++++++++++++++++++++++++++++++++
>  3 files changed, 96 insertions(+), 2 deletions(-)
>  create mode 100644 arch/x86/kernel/mem_encrypt_init.c
> 
> diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
> index bdcdb3b..33af80a 100644
> --- a/arch/x86/kernel/Makefile
> +++ b/arch/x86/kernel/Makefile
> @@ -140,4 +140,6 @@ ifeq ($(CONFIG_X86_64),y)
>  
>  	obj-$(CONFIG_PCI_MMCONFIG)	+= mmconf-fam10h_64.o
>  	obj-y				+= vsmp_64.o
> +
> +	obj-y				+= mem_encrypt_init.o
>  endif
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index b467b14..4f8201b 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -91,6 +91,23 @@ startup_64:
>  	jnz	bad_address
>  
>  	/*
> +	 * Enable Secure Memory Encryption (SME), if supported and enabled.
> +	 * The real_mode_data address is in %rsi and that register can be
> +	 * clobbered by the called function so be sure to save it.
> +	 * Save the returned mask in %r12 for later use.
> +	 */
> +	push	%rsi
> +	call	sme_enable
> +	pop	%rsi
> +	movq	%rax, %r12
> +
> +	/*
> +	 * Add the memory encryption mask to %rbp to include it in the page
> +	 * table fixups.
> +	 */
> +	addq	%r12, %rbp
> +
> +	/*
>  	 * Fixup the physical addresses in the page table
>  	 */
>  	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
> @@ -113,6 +130,7 @@ startup_64:
>  	shrq	$PGDIR_SHIFT, %rax
>  
>  	leaq	(PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
> +	addq	%r12, %rdx
>  	movq	%rdx, 0(%rbx,%rax,8)
>  	movq	%rdx, 8(%rbx,%rax,8)
>  
> @@ -129,6 +147,7 @@ startup_64:
>  	movq	%rdi, %rax
>  	shrq	$PMD_SHIFT, %rdi
>  	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
> +	addq	%r12, %rax
>  	leaq	(_end - 1)(%rip), %rcx
>  	shrq	$PMD_SHIFT, %rcx
>  	subq	%rdi, %rcx
> @@ -162,11 +181,25 @@ startup_64:
>  	cmp	%r8, %rdi
>  	jne	1b
>  
> -	/* Fixup phys_base */
> +	/*
> +	 * Fixup phys_base - remove the memory encryption mask from %rbp
> +	 * to obtain the true physical address.
> +	 */
> +	subq	%r12, %rbp
>  	addq	%rbp, phys_base(%rip)
>  
> +	/*
> +	 * Encrypt the kernel if SME is active.
> +	 * The real_mode_data address is in %rsi and that register can be
> +	 * clobbered by the called function so be sure to save it.
> +	 */
> +	push	%rsi
> +	call	sme_encrypt_kernel
> +	pop	%rsi
> +
>  .Lskip_fixup:

So if we land on this label because we can skip the fixup due to %rbp
being 0, we will skip sme_encrypt_kernel() too.

I think you need to move the .Lskip_fixup label above the
sme_encrypt_kernel call.

>  	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
> +	addq	%r12, %rax
>  	jmp 1f
>  ENTRY(secondary_startup_64)
>  	/*
> @@ -186,7 +219,16 @@ ENTRY(secondary_startup_64)
>  	/* Sanitize CPU configuration */
>  	call verify_cpu
>  
> -	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
> +	/*
> +	 * Get the SME encryption mask.
> +	 * The real_mode_data address is in %rsi and that register can be
> +	 * clobbered by the called function so be sure to save it.

You can say here that sme_get_me_mask puts the mask in %rax, that's why
we do ADD below and not MOV. I know, it is very explicit but this is
boot asm and I'd prefer for it to be absolutely clear.

> +	 */
> +	push	%rsi
> +	call	sme_get_me_mask
> +	pop	%rsi
> +
> +	addq	$(init_level4_pgt - __START_KERNEL_map), %rax
>  1:

...

> +#else	/* !CONFIG_AMD_MEM_ENCRYPT */
> +
> +void __init sme_encrypt_kernel(void)
> +{
> +}
> +
> +unsigned long __init sme_get_me_mask(void)
> +{
> +	return 0;
> +}
> +
> +unsigned long __init sme_enable(void)
> +{
> +	return 0;
> +}

Do that:

void __init sme_encrypt_kernel(void)            { }
unsigned long __init sme_get_me_mask(void)      { return 0; }
unsigned long __init sme_enable(void)           { return 0; }

to save some lines.
Tom Lendacky Feb. 21, 2017, 2:55 p.m. UTC | #2
On 2/20/2017 6:51 AM, Borislav Petkov wrote:
> On Thu, Feb 16, 2017 at 09:43:19AM -0600, Tom Lendacky wrote:
>> This patch adds support to the early boot code to use Secure Memory
>> Encryption (SME).  Support is added to update the early pagetables with
>> the memory encryption mask and to encrypt the kernel in place.
>>
>> The routines to set the encryption mask and perform the encryption are
>> stub routines for now with full function to be added in a later patch.
>
> s/full function/functionality/

Ok.

>
>> A new file, arch/x86/kernel/mem_encrypt_init.c, is introduced to avoid
>> adding #ifdefs within arch/x86/kernel/head_64.S and allow
>> arch/x86/mm/mem_encrypt.c to be removed from the build if SME is not
>> configured. The mem_encrypt_init.c file will contain the necessary #ifdefs
>> to allow head_64.S to successfully build and call the SME routines.
>
> That paragraph is superfluous.

I'll remove this, especially since the files will be combined now.

>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/kernel/Makefile           |    2 +
>>  arch/x86/kernel/head_64.S          |   46 ++++++++++++++++++++++++++++++++-
>>  arch/x86/kernel/mem_encrypt_init.c |   50 ++++++++++++++++++++++++++++++++++++
>>  3 files changed, 96 insertions(+), 2 deletions(-)
>>  create mode 100644 arch/x86/kernel/mem_encrypt_init.c
>>
>> diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
>> index bdcdb3b..33af80a 100644
>> --- a/arch/x86/kernel/Makefile
>> +++ b/arch/x86/kernel/Makefile
>> @@ -140,4 +140,6 @@ ifeq ($(CONFIG_X86_64),y)
>>
>>  	obj-$(CONFIG_PCI_MMCONFIG)	+= mmconf-fam10h_64.o
>>  	obj-y				+= vsmp_64.o
>> +
>> +	obj-y				+= mem_encrypt_init.o
>>  endif
>> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
>> index b467b14..4f8201b 100644
>> --- a/arch/x86/kernel/head_64.S
>> +++ b/arch/x86/kernel/head_64.S
>> @@ -91,6 +91,23 @@ startup_64:
>>  	jnz	bad_address
>>
>>  	/*
>> +	 * Enable Secure Memory Encryption (SME), if supported and enabled.
>> +	 * The real_mode_data address is in %rsi and that register can be
>> +	 * clobbered by the called function so be sure to save it.
>> +	 * Save the returned mask in %r12 for later use.
>> +	 */
>> +	push	%rsi
>> +	call	sme_enable
>> +	pop	%rsi
>> +	movq	%rax, %r12
>> +
>> +	/*
>> +	 * Add the memory encryption mask to %rbp to include it in the page
>> +	 * table fixups.
>> +	 */
>> +	addq	%r12, %rbp
>> +
>> +	/*
>>  	 * Fixup the physical addresses in the page table
>>  	 */
>>  	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
>> @@ -113,6 +130,7 @@ startup_64:
>>  	shrq	$PGDIR_SHIFT, %rax
>>
>>  	leaq	(PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
>> +	addq	%r12, %rdx
>>  	movq	%rdx, 0(%rbx,%rax,8)
>>  	movq	%rdx, 8(%rbx,%rax,8)
>>
>> @@ -129,6 +147,7 @@ startup_64:
>>  	movq	%rdi, %rax
>>  	shrq	$PMD_SHIFT, %rdi
>>  	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
>> +	addq	%r12, %rax
>>  	leaq	(_end - 1)(%rip), %rcx
>>  	shrq	$PMD_SHIFT, %rcx
>>  	subq	%rdi, %rcx
>> @@ -162,11 +181,25 @@ startup_64:
>>  	cmp	%r8, %rdi
>>  	jne	1b
>>
>> -	/* Fixup phys_base */
>> +	/*
>> +	 * Fixup phys_base - remove the memory encryption mask from %rbp
>> +	 * to obtain the true physical address.
>> +	 */
>> +	subq	%r12, %rbp
>>  	addq	%rbp, phys_base(%rip)
>>
>> +	/*
>> +	 * Encrypt the kernel if SME is active.
>> +	 * The real_mode_data address is in %rsi and that register can be
>> +	 * clobbered by the called function so be sure to save it.
>> +	 */
>> +	push	%rsi
>> +	call	sme_encrypt_kernel
>> +	pop	%rsi
>> +
>>  .Lskip_fixup:
>
> So if we land on this label because we can skip the fixup due to %rbp
> being 0, we will skip sme_encrypt_kernel() too.
>
> I think you need to move the .Lskip_fixup label above the
> sme_encrypt_kernel call.

Actually, %rbp will have the encryption bit set in it at the time of the
check so if SME is active we won't take the jump to .Lskip_fixup.

>
>>  	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
>> +	addq	%r12, %rax
>>  	jmp 1f
>>  ENTRY(secondary_startup_64)
>>  	/*
>> @@ -186,7 +219,16 @@ ENTRY(secondary_startup_64)
>>  	/* Sanitize CPU configuration */
>>  	call verify_cpu
>>
>> -	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
>> +	/*
>> +	 * Get the SME encryption mask.
>> +	 * The real_mode_data address is in %rsi and that register can be
>> +	 * clobbered by the called function so be sure to save it.
>
> You can say here that sme_get_me_mask puts the mask in %rax, that's why
> we do ADD below and not MOV. I know, it is very explicit but this is
> boot asm and I'd prefer for it to be absolutely clear.

Ok, I can be explicit on this.

>
>> +	 */
>> +	push	%rsi
>> +	call	sme_get_me_mask
>> +	pop	%rsi
>> +
>> +	addq	$(init_level4_pgt - __START_KERNEL_map), %rax
>>  1:
>
> ...
>
>> +#else	/* !CONFIG_AMD_MEM_ENCRYPT */
>> +
>> +void __init sme_encrypt_kernel(void)
>> +{
>> +}
>> +
>> +unsigned long __init sme_get_me_mask(void)
>> +{
>> +	return 0;
>> +}
>> +
>> +unsigned long __init sme_enable(void)
>> +{
>> +	return 0;
>> +}
>
> Do that:
>
> void __init sme_encrypt_kernel(void)            { }
> unsigned long __init sme_get_me_mask(void)      { return 0; }
> unsigned long __init sme_enable(void)           { return 0; }
>
> to save some lines.

No problem.

Thanks,
Tom

>
Borislav Petkov Feb. 21, 2017, 3:10 p.m. UTC | #3
On Tue, Feb 21, 2017 at 08:55:30AM -0600, Tom Lendacky wrote:
> Actually, %rbp will have the encryption bit set in it at the time of the
> check so if SME is active we won't take the jump to .Lskip_fixup.

Ha, I didn't think of that! Do you see now what I mean with being
explicit in the asm boot code? :-)

Please note that in the comment above sme_encrypt_kernel().

Thanks.
diff mbox

Patch

diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index bdcdb3b..33af80a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -140,4 +140,6 @@  ifeq ($(CONFIG_X86_64),y)
 
 	obj-$(CONFIG_PCI_MMCONFIG)	+= mmconf-fam10h_64.o
 	obj-y				+= vsmp_64.o
+
+	obj-y				+= mem_encrypt_init.o
 endif
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b467b14..4f8201b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -91,6 +91,23 @@  startup_64:
 	jnz	bad_address
 
 	/*
+	 * Enable Secure Memory Encryption (SME), if supported and enabled.
+	 * The real_mode_data address is in %rsi and that register can be
+	 * clobbered by the called function so be sure to save it.
+	 * Save the returned mask in %r12 for later use.
+	 */
+	push	%rsi
+	call	sme_enable
+	pop	%rsi
+	movq	%rax, %r12
+
+	/*
+	 * Add the memory encryption mask to %rbp to include it in the page
+	 * table fixups.
+	 */
+	addq	%r12, %rbp
+
+	/*
 	 * Fixup the physical addresses in the page table
 	 */
 	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
@@ -113,6 +130,7 @@  startup_64:
 	shrq	$PGDIR_SHIFT, %rax
 
 	leaq	(PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
+	addq	%r12, %rdx
 	movq	%rdx, 0(%rbx,%rax,8)
 	movq	%rdx, 8(%rbx,%rax,8)
 
@@ -129,6 +147,7 @@  startup_64:
 	movq	%rdi, %rax
 	shrq	$PMD_SHIFT, %rdi
 	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
+	addq	%r12, %rax
 	leaq	(_end - 1)(%rip), %rcx
 	shrq	$PMD_SHIFT, %rcx
 	subq	%rdi, %rcx
@@ -162,11 +181,25 @@  startup_64:
 	cmp	%r8, %rdi
 	jne	1b
 
-	/* Fixup phys_base */
+	/*
+	 * Fixup phys_base - remove the memory encryption mask from %rbp
+	 * to obtain the true physical address.
+	 */
+	subq	%r12, %rbp
 	addq	%rbp, phys_base(%rip)
 
+	/*
+	 * Encrypt the kernel if SME is active.
+	 * The real_mode_data address is in %rsi and that register can be
+	 * clobbered by the called function so be sure to save it.
+	 */
+	push	%rsi
+	call	sme_encrypt_kernel
+	pop	%rsi
+
 .Lskip_fixup:
 	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
+	addq	%r12, %rax
 	jmp 1f
 ENTRY(secondary_startup_64)
 	/*
@@ -186,7 +219,16 @@  ENTRY(secondary_startup_64)
 	/* Sanitize CPU configuration */
 	call verify_cpu
 
-	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
+	/*
+	 * Get the SME encryption mask.
+	 * The real_mode_data address is in %rsi and that register can be
+	 * clobbered by the called function so be sure to save it.
+	 */
+	push	%rsi
+	call	sme_get_me_mask
+	pop	%rsi
+
+	addq	$(init_level4_pgt - __START_KERNEL_map), %rax
 1:
 
 	/* Enable PAE mode and PGE */
diff --git a/arch/x86/kernel/mem_encrypt_init.c b/arch/x86/kernel/mem_encrypt_init.c
new file mode 100644
index 0000000..25af15d
--- /dev/null
+++ b/arch/x86/kernel/mem_encrypt_init.c
@@ -0,0 +1,50 @@ 
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+
+#include <linux/mem_encrypt.h>
+
+void __init sme_encrypt_kernel(void)
+{
+}
+
+unsigned long __init sme_get_me_mask(void)
+{
+	return sme_me_mask;
+}
+
+unsigned long __init sme_enable(void)
+{
+	return sme_me_mask;
+}
+
+#else	/* !CONFIG_AMD_MEM_ENCRYPT */
+
+void __init sme_encrypt_kernel(void)
+{
+}
+
+unsigned long __init sme_get_me_mask(void)
+{
+	return 0;
+}
+
+unsigned long __init sme_enable(void)
+{
+	return 0;
+}
+
+#endif	/* CONFIG_AMD_MEM_ENCRYPT */