diff mbox series

[v8,33/40] x86/compressed/64: add identity mapping for Confidential Computing blob

Message ID 20211210154332.11526-34-brijesh.singh@amd.com (mailing list archive)
State New, archived
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Guest Support | expand

Commit Message

Brijesh Singh Dec. 10, 2021, 3:43 p.m. UTC
From: Michael Roth <michael.roth@amd.com>

The run-time kernel will need to access the Confidential Computing
blob very early in boot to access the CPUID table it points to. At
that stage of boot it will be relying on the identity-mapped page table
set up by boot/compressed kernel, so make sure the blob and the CPUID
table it points to are mapped in advance.

Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/boot/compressed/ident_map_64.c | 26 ++++++++++++++++++++++++-
 arch/x86/boot/compressed/misc.h         |  4 ++++
 arch/x86/boot/compressed/sev.c          |  2 +-
 3 files changed, 30 insertions(+), 2 deletions(-)

Comments

Dave Hansen Dec. 10, 2021, 7:52 p.m. UTC | #1
On 12/10/21 7:43 AM, Brijesh Singh wrote:
> +static void sev_prep_identity_maps(void)
> +{
> +	/*
> +	 * The ConfidentialComputing blob is used very early in uncompressed
> +	 * kernel to find the in-memory cpuid table to handle cpuid
> +	 * instructions. Make sure an identity-mapping exists so it can be
> +	 * accessed after switchover.
> +	 */
> +	if (sev_snp_enabled()) {
> +		struct cc_blob_sev_info *cc_info =
> +			(void *)(unsigned long)boot_params->cc_blob_address;
> +
> +		add_identity_map((unsigned long)cc_info,
> +				 (unsigned long)cc_info + sizeof(*cc_info));
> +		add_identity_map((unsigned long)cc_info->cpuid_phys,
> +				 (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len);
> +	}

The casting here is pretty ugly.  Also, isn't ->cpuid_phys already a
u64?  Whats the idea behind casting it?

I also have a sneaking suspicion that a single "unsigned long cc_blob"
could remove virtually all the casting.  Does this work?

	unsigned long cc_blob = boot_params->cc_blob_addres;
	struct cc_blob_sev_info *cc_info;

	add_identity_map(cc_blob, cc_blob + sizeof(*cc_info));

	cc_info = (struct cc_blob_sev_info *)cc_blob;
	add_identity_map(cc_info->cpuid_phys,
			 cc_info->cpuid_phys + cc_info->cpuid_len);
Michael Roth Dec. 13, 2021, 5:54 p.m. UTC | #2
On Fri, Dec 10, 2021 at 11:52:28AM -0800, Dave Hansen wrote:
> On 12/10/21 7:43 AM, Brijesh Singh wrote:
> > +static void sev_prep_identity_maps(void)
> > +{
> > +	/*
> > +	 * The ConfidentialComputing blob is used very early in uncompressed
> > +	 * kernel to find the in-memory cpuid table to handle cpuid
> > +	 * instructions. Make sure an identity-mapping exists so it can be
> > +	 * accessed after switchover.
> > +	 */
> > +	if (sev_snp_enabled()) {
> > +		struct cc_blob_sev_info *cc_info =
> > +			(void *)(unsigned long)boot_params->cc_blob_address;
> > +
> > +		add_identity_map((unsigned long)cc_info,
> > +				 (unsigned long)cc_info + sizeof(*cc_info));
> > +		add_identity_map((unsigned long)cc_info->cpuid_phys,
> > +				 (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len);
> > +	}
> 
> The casting here is pretty ugly.  Also, isn't ->cpuid_phys already a
> u64?  Whats the idea behind casting it?
> 
> I also have a sneaking suspicion that a single "unsigned long cc_blob"
> could remove virtually all the casting.  Does this work?
> 
> 	unsigned long cc_blob = boot_params->cc_blob_addres;
> 	struct cc_blob_sev_info *cc_info;
> 
> 	add_identity_map(cc_blob, cc_blob + sizeof(*cc_info));
> 
> 	cc_info = (struct cc_blob_sev_info *)cc_blob;
> 	add_identity_map(cc_info->cpuid_phys,
> 			 cc_info->cpuid_phys + cc_info->cpuid_len);

Yes, the cc->cpuid_phys cast is not needed, and your suggested implementation
is clearer and compiles/runs without any issues. I'll implement it this way for
the next spin. Thanks!
Borislav Petkov Jan. 25, 2022, 1:48 p.m. UTC | #3
On Fri, Dec 10, 2021 at 09:43:25AM -0600, Brijesh Singh wrote:
> +static void sev_prep_identity_maps(void)
> +{
> +	/*
> +	 * The ConfidentialComputing blob is used very early in uncompressed
> +	 * kernel to find the in-memory cpuid table to handle cpuid
> +	 * instructions. Make sure an identity-mapping exists so it can be
> +	 * accessed after switchover.
> +	 */
> +	if (sev_snp_enabled()) {
> +		struct cc_blob_sev_info *cc_info =
> +			(void *)(unsigned long)boot_params->cc_blob_address;
> +
> +		add_identity_map((unsigned long)cc_info,
> +				 (unsigned long)cc_info + sizeof(*cc_info));
> +		add_identity_map((unsigned long)cc_info->cpuid_phys,
> +				 (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len);
> +	}
> +
> +	sev_verify_cbit(top_level_pgt);
> +}
> +

Also, that function can just as well live in compressed/sev.c and
you can export add_identity_map() instead.

That latter function calls kernel_ident_mapping_init() which is
already exported. add_identity_map() doesn't do anything special
and it is limited to the decompressor kernel so nothing stands in
the way of exporting it in a pre-patch and renaming it there to
kernel_add_identity_map() or so...

Thx.
diff mbox series

Patch

diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
index ef77453cc629..2a99b3274ec2 100644
--- a/arch/x86/boot/compressed/ident_map_64.c
+++ b/arch/x86/boot/compressed/ident_map_64.c
@@ -37,6 +37,8 @@ 
 #include <asm/setup.h>	/* For COMMAND_LINE_SIZE */
 #undef _SETUP
 
+#include <asm/sev.h> /* For ConfidentialComputing blob */
+
 extern unsigned long get_cmd_line_ptr(void);
 
 /* Used by PAGE_KERN* macros: */
@@ -106,6 +108,27 @@  static void add_identity_map(unsigned long start, unsigned long end)
 		error("Error: kernel_ident_mapping_init() failed\n");
 }
 
+static void sev_prep_identity_maps(void)
+{
+	/*
+	 * The ConfidentialComputing blob is used very early in uncompressed
+	 * kernel to find the in-memory cpuid table to handle cpuid
+	 * instructions. Make sure an identity-mapping exists so it can be
+	 * accessed after switchover.
+	 */
+	if (sev_snp_enabled()) {
+		struct cc_blob_sev_info *cc_info =
+			(void *)(unsigned long)boot_params->cc_blob_address;
+
+		add_identity_map((unsigned long)cc_info,
+				 (unsigned long)cc_info + sizeof(*cc_info));
+		add_identity_map((unsigned long)cc_info->cpuid_phys,
+				 (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len);
+	}
+
+	sev_verify_cbit(top_level_pgt);
+}
+
 /* Locates and clears a region for a new top level page table. */
 void initialize_identity_maps(void *rmode)
 {
@@ -163,8 +186,9 @@  void initialize_identity_maps(void *rmode)
 	cmdline = get_cmd_line_ptr();
 	add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
 
+	sev_prep_identity_maps();
+
 	/* Load the new page-table. */
-	sev_verify_cbit(top_level_pgt);
 	write_cr3(top_level_pgt);
 }
 
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index e9fde1482fbe..4b02bf5c8582 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -127,6 +127,8 @@  void sev_es_shutdown_ghcb(void);
 extern bool sev_es_check_ghcb_fault(unsigned long address);
 void snp_set_page_private(unsigned long paddr);
 void snp_set_page_shared(unsigned long paddr);
+bool sev_snp_enabled(void);
+
 #else
 static inline void sev_enable(struct boot_params *bp) { }
 static inline void sev_es_shutdown_ghcb(void) { }
@@ -136,6 +138,8 @@  static inline bool sev_es_check_ghcb_fault(unsigned long address)
 }
 static inline void snp_set_page_private(unsigned long paddr) { }
 static inline void snp_set_page_shared(unsigned long paddr) { }
+static inline bool sev_snp_enabled(void) { return false; }
+
 #endif
 
 /* acpi.c */
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 29dfb34b5907..c2bf99522e5e 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -120,7 +120,7 @@  static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
 /* Include code for early handlers */
 #include "../../kernel/sev-shared.c"
 
-static inline bool sev_snp_enabled(void)
+bool sev_snp_enabled(void)
 {
 	return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
 }