Message ID | 20211210154332.11526-34-brijesh.singh@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD Secure Nested Paging (SEV-SNP) Guest Support | expand |
On 12/10/21 7:43 AM, Brijesh Singh wrote: > +static void sev_prep_identity_maps(void) > +{ > + /* > + * The ConfidentialComputing blob is used very early in uncompressed > + * kernel to find the in-memory cpuid table to handle cpuid > + * instructions. Make sure an identity-mapping exists so it can be > + * accessed after switchover. > + */ > + if (sev_snp_enabled()) { > + struct cc_blob_sev_info *cc_info = > + (void *)(unsigned long)boot_params->cc_blob_address; > + > + add_identity_map((unsigned long)cc_info, > + (unsigned long)cc_info + sizeof(*cc_info)); > + add_identity_map((unsigned long)cc_info->cpuid_phys, > + (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len); > + } The casting here is pretty ugly. Also, isn't ->cpuid_phys already a u64? Whats the idea behind casting it? I also have a sneaking suspicion that a single "unsigned long cc_blob" could remove virtually all the casting. Does this work? unsigned long cc_blob = boot_params->cc_blob_addres; struct cc_blob_sev_info *cc_info; add_identity_map(cc_blob, cc_blob + sizeof(*cc_info)); cc_info = (struct cc_blob_sev_info *)cc_blob; add_identity_map(cc_info->cpuid_phys, cc_info->cpuid_phys + cc_info->cpuid_len);
On Fri, Dec 10, 2021 at 11:52:28AM -0800, Dave Hansen wrote: > On 12/10/21 7:43 AM, Brijesh Singh wrote: > > +static void sev_prep_identity_maps(void) > > +{ > > + /* > > + * The ConfidentialComputing blob is used very early in uncompressed > > + * kernel to find the in-memory cpuid table to handle cpuid > > + * instructions. Make sure an identity-mapping exists so it can be > > + * accessed after switchover. > > + */ > > + if (sev_snp_enabled()) { > > + struct cc_blob_sev_info *cc_info = > > + (void *)(unsigned long)boot_params->cc_blob_address; > > + > > + add_identity_map((unsigned long)cc_info, > > + (unsigned long)cc_info + sizeof(*cc_info)); > > + add_identity_map((unsigned long)cc_info->cpuid_phys, > > + (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len); > > + } > > The casting here is pretty ugly. Also, isn't ->cpuid_phys already a > u64? Whats the idea behind casting it? > > I also have a sneaking suspicion that a single "unsigned long cc_blob" > could remove virtually all the casting. Does this work? > > unsigned long cc_blob = boot_params->cc_blob_addres; > struct cc_blob_sev_info *cc_info; > > add_identity_map(cc_blob, cc_blob + sizeof(*cc_info)); > > cc_info = (struct cc_blob_sev_info *)cc_blob; > add_identity_map(cc_info->cpuid_phys, > cc_info->cpuid_phys + cc_info->cpuid_len); Yes, the cc->cpuid_phys cast is not needed, and your suggested implementation is clearer and compiles/runs without any issues. I'll implement it this way for the next spin. Thanks!
On Fri, Dec 10, 2021 at 09:43:25AM -0600, Brijesh Singh wrote: > +static void sev_prep_identity_maps(void) > +{ > + /* > + * The ConfidentialComputing blob is used very early in uncompressed > + * kernel to find the in-memory cpuid table to handle cpuid > + * instructions. Make sure an identity-mapping exists so it can be > + * accessed after switchover. > + */ > + if (sev_snp_enabled()) { > + struct cc_blob_sev_info *cc_info = > + (void *)(unsigned long)boot_params->cc_blob_address; > + > + add_identity_map((unsigned long)cc_info, > + (unsigned long)cc_info + sizeof(*cc_info)); > + add_identity_map((unsigned long)cc_info->cpuid_phys, > + (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len); > + } > + > + sev_verify_cbit(top_level_pgt); > +} > + Also, that function can just as well live in compressed/sev.c and you can export add_identity_map() instead. That latter function calls kernel_ident_mapping_init() which is already exported. add_identity_map() doesn't do anything special and it is limited to the decompressor kernel so nothing stands in the way of exporting it in a pre-patch and renaming it there to kernel_add_identity_map() or so... Thx.
diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index ef77453cc629..2a99b3274ec2 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -37,6 +37,8 @@ #include <asm/setup.h> /* For COMMAND_LINE_SIZE */ #undef _SETUP +#include <asm/sev.h> /* For ConfidentialComputing blob */ + extern unsigned long get_cmd_line_ptr(void); /* Used by PAGE_KERN* macros: */ @@ -106,6 +108,27 @@ static void add_identity_map(unsigned long start, unsigned long end) error("Error: kernel_ident_mapping_init() failed\n"); } +static void sev_prep_identity_maps(void) +{ + /* + * The ConfidentialComputing blob is used very early in uncompressed + * kernel to find the in-memory cpuid table to handle cpuid + * instructions. Make sure an identity-mapping exists so it can be + * accessed after switchover. + */ + if (sev_snp_enabled()) { + struct cc_blob_sev_info *cc_info = + (void *)(unsigned long)boot_params->cc_blob_address; + + add_identity_map((unsigned long)cc_info, + (unsigned long)cc_info + sizeof(*cc_info)); + add_identity_map((unsigned long)cc_info->cpuid_phys, + (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len); + } + + sev_verify_cbit(top_level_pgt); +} + /* Locates and clears a region for a new top level page table. */ void initialize_identity_maps(void *rmode) { @@ -163,8 +186,9 @@ void initialize_identity_maps(void *rmode) cmdline = get_cmd_line_ptr(); add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE); + sev_prep_identity_maps(); + /* Load the new page-table. */ - sev_verify_cbit(top_level_pgt); write_cr3(top_level_pgt); } diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index e9fde1482fbe..4b02bf5c8582 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -127,6 +127,8 @@ void sev_es_shutdown_ghcb(void); extern bool sev_es_check_ghcb_fault(unsigned long address); void snp_set_page_private(unsigned long paddr); void snp_set_page_shared(unsigned long paddr); +bool sev_snp_enabled(void); + #else static inline void sev_enable(struct boot_params *bp) { } static inline void sev_es_shutdown_ghcb(void) { } @@ -136,6 +138,8 @@ static inline bool sev_es_check_ghcb_fault(unsigned long address) } static inline void snp_set_page_private(unsigned long paddr) { } static inline void snp_set_page_shared(unsigned long paddr) { } +static inline bool sev_snp_enabled(void) { return false; } + #endif /* acpi.c */ diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index 29dfb34b5907..c2bf99522e5e 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -120,7 +120,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, /* Include code for early handlers */ #include "../../kernel/sev-shared.c" -static inline bool sev_snp_enabled(void) +bool sev_snp_enabled(void) { return sev_status & MSR_AMD64_SEV_SNP_ENABLED; }