@@ -361,7 +361,7 @@ void snp_cpuid_init_boot(struct boot_params *bp)
if (!cc_info)
return;
- snp_cpuid_info_create(cc_info);
+ snp_cpuid_info_create(cc_info, 0);
/* SEV-SNP CPUID table is set up now. Do some sanity checks. */
if (!snp_cpuid_active())
@@ -50,7 +50,7 @@ extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp);
extern unsigned long __startup_secondary_64(void);
-extern void startup_64_setup_env(unsigned long physbase);
+extern void startup_64_setup_env(unsigned long physbase, struct boot_params *bp);
extern void early_setup_idt(void);
extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
@@ -127,17 +127,8 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
void snp_set_wakeup_secondary_cpu(void);
-/*
- * TODO: These are exported only temporarily while boot/compressed/sev.c is
- * the only user. This is to avoid unused function warnings for kernel/sev.c
- * during the build of kernel proper.
- *
- * Once the code is added to consume these in kernel proper these functions
- * can be moved back to being statically-scoped to units that pull in
- * sev-shared.c via #include and these declarations can be dropped.
- */
-void __init snp_cpuid_info_create(const struct cc_blob_sev_info *cc_info);
-struct cc_blob_sev_info *snp_find_cc_blob_setup_data(struct boot_params *bp);
+void snp_cpuid_init_startup(struct boot_params *bp, unsigned long physaddr);
+void snp_cpuid_init(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
@@ -153,8 +144,8 @@ static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz,
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { }
static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { }
static inline void snp_set_wakeup_secondary_cpu(void) { }
-void snp_cpuid_info_create(const struct cc_blob_sev_info *cc_info) { }
-struct cc_blob_sev_info *snp_find_cc_blob_setup_data(struct boot_params *bp) { }
+static inline void snp_cpuid_startup(struct boot_params *bp, unsigned long physbase) { }
+static inline void snp_cpuid_init(void) { }
#endif
#endif
@@ -571,7 +571,7 @@ static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
}
/* This runs while still in the direct mapping */
-static void startup_64_load_idt(unsigned long physbase)
+static void startup_64_load_idt(unsigned long physbase, struct boot_params *bp)
{
struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
@@ -587,6 +587,9 @@ static void startup_64_load_idt(unsigned long physbase)
desc->address = (unsigned long)idt;
native_load_idt(desc);
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
+ snp_cpuid_init_startup(bp, physbase);
}
/* This is used when running on kernel addresses */
@@ -598,12 +601,15 @@ void early_setup_idt(void)
bringup_idt_descr.address = (unsigned long)bringup_idt_table;
native_load_idt(&bringup_idt_descr);
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
+ snp_cpuid_init();
}
/*
* Setup boot CPU state needed before kernel switches to virtual addresses.
*/
-void __head startup_64_setup_env(unsigned long physbase)
+void __head startup_64_setup_env(unsigned long physbase, struct boot_params *bp)
{
/* Load GDT */
startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
@@ -614,5 +620,5 @@ void __head startup_64_setup_env(unsigned long physbase)
"movl %%eax, %%ss\n"
"movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
- startup_64_load_idt(physbase);
+ startup_64_load_idt(physbase, bp);
}
@@ -976,7 +976,7 @@ static struct cc_setup_data *get_cc_setup_data(struct boot_params *bp)
* Search for a Confidential Computing blob passed in as a setup_data entry
* via the Linux Boot Protocol.
*/
-struct cc_blob_sev_info *
+static struct cc_blob_sev_info *
snp_find_cc_blob_setup_data(struct boot_params *bp)
{
struct cc_setup_data *sd;
@@ -988,6 +988,22 @@ snp_find_cc_blob_setup_data(struct boot_params *bp)
return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address;
}
+static const struct snp_cpuid_info *
+snp_cpuid_info_get_ptr(unsigned long physbase)
+{
+ void *ptr = &cpuid_info_copy;
+
+ /* physbase is only 0 when the caller doesn't need adjustments */
+ if (!physbase)
+ return ptr;
+
+ /*
+ * Handle relocation adjustments for global pointers, as done by
+ * fixup_pointer() in __startup64().
+ */
+ return ptr - (void *)_text + (void *)physbase;
+}
+
/*
* Initialize the kernel's copy of the SEV-SNP CPUID table, and set up the
* pointer that will be used to access it.
@@ -997,7 +1013,8 @@ snp_find_cc_blob_setup_data(struct boot_params *bp)
* mapping needs to be updated in sync with all the changes to virtual memory
* layout and related mapping facilities throughout the boot process.
*/
-void __init snp_cpuid_info_create(const struct cc_blob_sev_info *cc_info)
+static void __init snp_cpuid_info_create(const struct cc_blob_sev_info *cc_info,
+ unsigned long physbase)
{
const struct snp_cpuid_info *cpuid_info_fw;
@@ -1008,7 +1025,7 @@ void __init snp_cpuid_info_create(const struct cc_blob_sev_info *cc_info)
if (!cpuid_info_fw->count || cpuid_info_fw->count > SNP_CPUID_COUNT_MAX)
sev_es_terminate(1, GHCB_TERM_CPUID);
- cpuid_info = &cpuid_info_copy;
+ cpuid_info = snp_cpuid_info_get_ptr(physbase);
memcpy((void *)cpuid_info, cpuid_info_fw, sizeof(*cpuid_info));
snp_cpuid_set_ranges();
}
@@ -1986,3 +1986,138 @@ bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
while (true)
halt();
}
+
+/*
+ * Initial set up of SEV-SNP CPUID table relies on information provided
+ * by the Confidential Computing blob, which can be passed to the kernel
+ * in the following ways, depending on how it is booted:
+ *
+ * - when booted via the boot/decompress kernel:
+ * - via boot_params
+ *
+ * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
+ * - via a setup_data entry, as defined by the Linux Boot Protocol
+ *
+ * Scan for the blob in that order.
+ */
+struct cc_blob_sev_info *snp_find_cc_blob(struct boot_params *bp)
+{
+ struct cc_blob_sev_info *cc_info;
+
+ /* Boot kernel would have passed the CC blob via boot_params. */
+ if (bp->cc_blob_address) {
+ cc_info = (struct cc_blob_sev_info *)
+ (unsigned long)bp->cc_blob_address;
+ goto found_cc_info;
+ }
+
+ /*
+ * If kernel was booted directly, without the use of the
+ * boot/decompression kernel, the CC blob may have been passed via
+ * setup_data instead.
+ */
+ cc_info = snp_find_cc_blob_setup_data(bp);
+ if (!cc_info)
+ return NULL;
+
+found_cc_info:
+ if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
+ sev_es_terminate(1, GHCB_SNP_UNSUPPORTED);
+
+ return cc_info;
+}
+
+/*
+ * Initial set up of SEV-SNP CPUID table during early startup when still
+ * using identity-mapped addresses.
+ *
+ * Since this is during early startup, physbase is needed to generate the
+ * correct pointer to the initialized CPUID table. This pointer will be
+ * adjusted again later via snp_cpuid_init() after the kernel switches over
+ * to virtual addresses and pointer fixups are no longer needed.
+ */
+void __init snp_cpuid_init_startup(struct boot_params *bp,
+ unsigned long physbase)
+{
+ struct cc_blob_sev_info *cc_info;
+ u32 eax;
+
+ if (!bp)
+ return;
+
+ cc_info = snp_find_cc_blob(bp);
+ if (!cc_info)
+ return;
+
+ snp_cpuid_info_create(cc_info, physbase);
+
+ /* SEV-SNP CPUID table is set up now. Do some sanity checks. */
+ if (!snp_cpuid_active())
+ sev_es_terminate(1, GHCB_TERM_CPUID);
+
+ /* SEV (bit 1) and SEV-SNP (bit 4) should be enabled in CPUID. */
+ eax = native_cpuid_eax(0x8000001f);
+ if (!(eax & (BIT(4) | BIT(1))))
+ sev_es_terminate(1, GHCB_TERM_CPUID);
+
+ /* #VC generated by CPUID above will set sev_status based on SEV MSR. */
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ sev_es_terminate(1, GHCB_TERM_CPUID);
+
+ /*
+ * The CC blob will be used later to access the secrets page. Cache
+ * it here like the boot kernel does.
+ */
+ bp->cc_blob_address = (u32)(unsigned long)cc_info;
+}
+
+/*
+ * This is called after the kernel switches over to virtual addresses. Fixup
+ * offsets are no longer needed at this point, so update the CPUID table
+ * pointer accordingly.
+ */
+void snp_cpuid_init(void)
+{
+ if (!cc_platform_has(CC_ATTR_SEV_SNP)) {
+ /* Firmware should not have advertised the feature. */
+ if (snp_cpuid_active())
+ panic("Invalid use of SEV-SNP CPUID table.");
+ return;
+ }
+
+ /* CPUID table should always be available when SEV-SNP is enabled. */
+ if (!snp_cpuid_active())
+ sev_es_terminate(1, GHCB_TERM_CPUID);
+
+ /* Remove the fixup offset from the cpuid_info pointer. */
+ cpuid_info = snp_cpuid_info_get_ptr(0);
+}
+
+/*
+ * It is useful from an auditing/testing perspective to provide an easy way
+ * for the guest owner to know that the CPUID table has been initialized as
+ * expected, but that initialization happens too early in boot to print any
+ * sort of indicator, and there's not really any other good place to do it. So
+ * do it here, and while at it, go ahead and re-verify that nothing strange has
+ * happened between early boot and now.
+ */
+static int __init snp_cpuid_check_status(void)
+{
+ if (!cc_platform_has(CC_ATTR_SEV_SNP)) {
+ /* Firmware should not have advertised the feature. */
+ if (snp_cpuid_active())
+ panic("Invalid use of SEV-SNP CPUID table.");
+ return 0;
+ }
+
+ /* CPUID table should always be available when SEV-SNP is enabled. */
+ if (!snp_cpuid_active())
+ sev_es_terminate(1, GHCB_TERM_CPUID);
+
+ pr_info("Using SEV-SNP CPUID table, %d entries present.\n",
+ cpuid_info->count);
+
+ return 0;
+}
+
+arch_initcall(snp_cpuid_check_status);