Message ID | 20211210154332.11526-3-brijesh.singh@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD Secure Nested Paging (SEV-SNP) Guest Support | expand |
On 2021-12-10 09:42:54 -0600, Brijesh Singh wrote: > From: Michael Roth <michael.roth@amd.com> > > sme_enable() handles feature detection for both SEV and SME. Future > patches will also use it for SEV-SNP feature detection/setup, which > will need to be done immediately after the first #VC handler is set up. > Move it now in preparation. > > Signed-off-by: Michael Roth <michael.roth@amd.com> > Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Reviewed-by: Venu Busireddy <venu.busireddy@oracle.com> > --- > arch/x86/kernel/head64.c | 3 --- > arch/x86/kernel/head_64.S | 13 +++++++++++++ > 2 files changed, 13 insertions(+), 3 deletions(-) > > diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c > index 3be9dd213dad..b01f64e8389b 100644 > --- a/arch/x86/kernel/head64.c > +++ b/arch/x86/kernel/head64.c > @@ -192,9 +192,6 @@ unsigned long __head __startup_64(unsigned long physaddr, > if (load_delta & ~PMD_PAGE_MASK) > for (;;); > > - /* Activate Secure Memory Encryption (SME) if supported and enabled */ > - sme_enable(bp); > - > /* Include the SME encryption mask in the fixup value */ > load_delta += sme_get_me_mask(); > > diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S > index d8b3ebd2bb85..99de8fd461e8 100644 > --- a/arch/x86/kernel/head_64.S > +++ b/arch/x86/kernel/head_64.S > @@ -69,6 +69,19 @@ SYM_CODE_START_NOALIGN(startup_64) > call startup_64_setup_env > popq %rsi > > +#ifdef CONFIG_AMD_MEM_ENCRYPT > + /* > + * Activate SEV/SME memory encryption if supported/enabled. This needs to > + * be done now, since this also includes setup of the SEV-SNP CPUID table, > + * which needs to be done before any CPUID instructions are executed in > + * subsequent code. > + */ > + movq %rsi, %rdi > + pushq %rsi > + call sme_enable > + popq %rsi > +#endif > + > /* Now switch to __KERNEL_CS so IRET works reliably */ > pushq $__KERNEL_CS > leaq .Lon_kernel_cs(%rip), %rax > -- > 2.25.1 >
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 3be9dd213dad..b01f64e8389b 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -192,9 +192,6 @@ unsigned long __head __startup_64(unsigned long physaddr, if (load_delta & ~PMD_PAGE_MASK) for (;;); - /* Activate Secure Memory Encryption (SME) if supported and enabled */ - sme_enable(bp); - /* Include the SME encryption mask in the fixup value */ load_delta += sme_get_me_mask(); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index d8b3ebd2bb85..99de8fd461e8 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -69,6 +69,19 @@ SYM_CODE_START_NOALIGN(startup_64) call startup_64_setup_env popq %rsi +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * Activate SEV/SME memory encryption if supported/enabled. This needs to + * be done now, since this also includes setup of the SEV-SNP CPUID table, + * which needs to be done before any CPUID instructions are executed in + * subsequent code. + */ + movq %rsi, %rdi + pushq %rsi + call sme_enable + popq %rsi +#endif + /* Now switch to __KERNEL_CS so IRET works reliably */ pushq $__KERNEL_CS leaq .Lon_kernel_cs(%rip), %rax