diff mbox series

[Part1,v5,31/38] x86/compressed/64: add identity mapping for Confidential Computing blob

Message ID 20210820151933.22401-32-brijesh.singh@amd.com (mailing list archive)
State New, archived
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Guest Support | expand

Commit Message

Brijesh Singh Aug. 20, 2021, 3:19 p.m. UTC
From: Michael Roth <michael.roth@amd.com>

The run-time kernel will need to access the Confidential Computing
blob very early in boot to access the CPUID table it points to. At that
stage of boot it will be relying on the identity-mapped page table set
up by boot/compressed kernel, so make sure we have both of them mapped
in advance.

Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/boot/compressed/ident_map_64.c | 18 ++++++++++++++++++
 arch/x86/boot/compressed/sev.c          |  2 +-
 arch/x86/include/asm/sev.h              |  6 ++++++
 3 files changed, 25 insertions(+), 1 deletion(-)

Comments

Borislav Petkov Aug. 27, 2021, 2:43 p.m. UTC | #1
On Fri, Aug 20, 2021 at 10:19:26AM -0500, Brijesh Singh wrote:
> diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
> index 3cf7a7575f5c..54374e0f0257 100644
> --- a/arch/x86/boot/compressed/ident_map_64.c
> +++ b/arch/x86/boot/compressed/ident_map_64.c
> @@ -37,6 +37,9 @@
>  #include <asm/setup.h>	/* For COMMAND_LINE_SIZE */
>  #undef _SETUP
>  
> +#define __BOOT_COMPRESSED
> +#include <asm/sev.h> /* For sev_snp_active() + ConfidentialComputing blob */
> +

When you move all the cc_blob parsing to the compressed kernel, all that
ugly ifdeffery won't be needed.

>  extern unsigned long get_cmd_line_ptr(void);
>  
>  /* Used by PAGE_KERN* macros: */
> @@ -163,6 +166,21 @@ void initialize_identity_maps(void *rmode)
>  	cmdline = get_cmd_line_ptr();
>  	add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);

Carve that ...

> +	/*
> +	 * The ConfidentialComputing blob is used very early in uncompressed
> +	 * kernel to find CPUID memory to handle cpuid instructions. Make sure
> +	 * an identity-mapping exists so they can be accessed after switchover.
> +	 */
> +	if (sev_snp_enabled()) {
> +		struct cc_blob_sev_info *cc_info =
> +			(void *)(unsigned long)boot_params->cc_blob_address;
> +
> +		add_identity_map((unsigned long)cc_info,
> +				 (unsigned long)cc_info + sizeof(*cc_info));
> +		add_identity_map((unsigned long)cc_info->cpuid_phys,
> +				 (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len);
> +	}
> +
>  	/* Load the new page-table. */
>  	sev_verify_cbit(top_level_pgt);

... up to here into a separate function called sev_prep_identity_maps()
so that SEV-specific code flow is not in the generic code path.

>  	write_cr3(top_level_pgt);
diff mbox series

Patch

diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
index 3cf7a7575f5c..54374e0f0257 100644
--- a/arch/x86/boot/compressed/ident_map_64.c
+++ b/arch/x86/boot/compressed/ident_map_64.c
@@ -37,6 +37,9 @@ 
 #include <asm/setup.h>	/* For COMMAND_LINE_SIZE */
 #undef _SETUP
 
+#define __BOOT_COMPRESSED
+#include <asm/sev.h> /* For sev_snp_active() + ConfidentialComputing blob */
+
 extern unsigned long get_cmd_line_ptr(void);
 
 /* Used by PAGE_KERN* macros: */
@@ -163,6 +166,21 @@  void initialize_identity_maps(void *rmode)
 	cmdline = get_cmd_line_ptr();
 	add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
 
+	/*
+	 * The ConfidentialComputing blob is used very early in uncompressed
+	 * kernel to find CPUID memory to handle cpuid instructions. Make sure
+	 * an identity-mapping exists so they can be accessed after switchover.
+	 */
+	if (sev_snp_enabled()) {
+		struct cc_blob_sev_info *cc_info =
+			(void *)(unsigned long)boot_params->cc_blob_address;
+
+		add_identity_map((unsigned long)cc_info,
+				 (unsigned long)cc_info + sizeof(*cc_info));
+		add_identity_map((unsigned long)cc_info->cpuid_phys,
+				 (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len);
+	}
+
 	/* Load the new page-table. */
 	sev_verify_cbit(top_level_pgt);
 	write_cr3(top_level_pgt);
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 910bf5cf010e..d1ecba457350 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -123,7 +123,7 @@  static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
 /* Include code for early handlers */
 #include "../../kernel/sev-shared.c"
 
-static inline bool sev_snp_enabled(void)
+bool sev_snp_enabled(void)
 {
 	unsigned long low, high;
 
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index c73931548346..345740aa5559 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -127,6 +127,9 @@  void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
 void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
 void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
 void snp_set_wakeup_secondary_cpu(void);
+#ifdef __BOOT_COMPRESSED
+bool sev_snp_enabled(void);
+#endif /* __BOOT_COMPRESSED */
 void sev_snp_cpuid_init(struct boot_params *bp);
 #else
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
@@ -144,6 +147,9 @@  static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npage
 static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { }
 static inline void snp_set_wakeup_secondary_cpu(void) { }
 static inline void sev_snp_cpuid_init(struct boot_params *bp) { }
+#ifdef __BOOT_COMPRESSED
+static inline bool sev_snp_enabled { return false; }
+#endif /*__BOOT_COMPRESSED */
 #endif
 
 #endif