@@ -163,8 +163,9 @@ void initialize_identity_maps(void *rmode)
cmdline = get_cmd_line_ptr();
kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
+ sev_prep_identity_maps(top_level_pgt);
+
/* Load the new page-table. */
- sev_verify_cbit(top_level_pgt);
write_cr3(top_level_pgt);
}
@@ -127,6 +127,7 @@ void sev_es_shutdown_ghcb(void);
extern bool sev_es_check_ghcb_fault(unsigned long address);
void snp_set_page_private(unsigned long paddr);
void snp_set_page_shared(unsigned long paddr);
+void sev_prep_identity_maps(unsigned long top_level_pgt);
#else
static inline void sev_enable(struct boot_params *bp) { }
static inline void sev_es_shutdown_ghcb(void) { }
@@ -136,6 +137,7 @@ static inline bool sev_es_check_ghcb_fault(unsigned long address)
}
static inline void snp_set_page_private(unsigned long paddr) { }
static inline void snp_set_page_shared(unsigned long paddr) { }
+static inline void sev_prep_identity_maps(unsigned long top_level_pgt) { }
#endif
/* acpi.c */
@@ -496,3 +496,25 @@ bool snp_init(struct boot_params *bp)
*/
return true;
}
+
+void sev_prep_identity_maps(unsigned long top_level_pgt)
+{
+ /*
+ * The ConfidentialComputing blob is used very early in uncompressed
+ * kernel to find the in-memory cpuid table to handle cpuid
+ * instructions. Make sure an identity-mapping exists so it can be
+ * accessed after switchover.
+ */
+ if (sev_snp_enabled()) {
+ unsigned long cc_info_pa = boot_params->cc_blob_address;
+ struct cc_blob_sev_info *cc_info;
+
+ kernel_add_identity_map(cc_info_pa,
+ cc_info_pa + sizeof(*cc_info));
+ cc_info = (struct cc_blob_sev_info *)cc_info_pa;
+ kernel_add_identity_map((unsigned long)cc_info->cpuid_phys,
+ (unsigned long)cc_info->cpuid_phys + cc_info->cpuid_len);
+ }
+
+ sev_verify_cbit(top_level_pgt);
+}