@@ -12,6 +12,7 @@
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/asm-offsets.h>
+#include <asm/cpufeatures.h>
.text
.code32
@@ -31,7 +32,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
movl $0x80000000, %eax /* CPUID to check the highest leaf */
cpuid
- cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
+ cmpl $CPUID_AMD_SME, %eax /* See if 0x8000001f is available */
jb .Lno_sev
/*
@@ -40,7 +41,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
* CPUID Fn8000_001F[EBX] - Bits 5:0
* Pagetable bit position used to indicate encryption
*/
- movl $0x8000001f, %eax
+ movl $CPUID_AMD_SME, %eax
cpuid
bt $1, %eax /* Check if SEV is available */
jnc .Lno_sev
@@ -10,6 +10,11 @@
#include <asm/disabled-features.h>
#endif
+/*
+ * AMD CPUID functions
+ */
+#define CPUID_AMD_SME 0x8000001f /* Secure Memory Encryption */
+
/*
* Defines x86 CPU feature bits
*/
@@ -630,7 +630,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
* will be a value above 32-bits this is still done for
* CONFIG_X86_32 so that accurate values are reported.
*/
- c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
+ c->x86_phys_bits -= (cpuid_ebx(CPUID_AMD_SME) >> 6) & 0x3f;
if (IS_ENABLED(CONFIG_X86_32))
goto clear_all;
@@ -39,8 +39,8 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
- { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
- { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
+ { X86_FEATURE_SME, CPUID_EAX, 0, CPUID_AMD_SME, 0 },
+ { X86_FEATURE_SEV, CPUID_EAX, 1, CPUID_AMD_SME, 0 },
{ 0, 0, 0, 0, 0 }
};
@@ -756,7 +756,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
break;
case 0x80000000:
- entry->eax = min(entry->eax, 0x8000001f);
+ entry->eax = min(entry->eax, CPUID_AMD_SME);
break;
case 0x80000001:
cpuid_entry_override(entry, CPUID_8000_0001_EDX);
@@ -749,7 +749,7 @@ static __init void svm_adjust_mmio_mask(void)
u64 msr, mask;
/* If there is no memory encryption support, use existing mask */
- if (cpuid_eax(0x80000000) < 0x8000001f)
+ if (cpuid_eax(0x80000000) < CPUID_AMD_SME)
return;
/* If memory encryption is not enabled, use existing mask */
@@ -757,7 +757,7 @@ static __init void svm_adjust_mmio_mask(void)
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
return;
- enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
+ enc_bit = cpuid_ebx(CPUID_AMD_SME) & 0x3f;
mask_bit = boot_cpu_data.x86_phys_bits;
/* Increment the mask bit if it is the same as the encryption bit */
@@ -498,7 +498,7 @@ void __init sme_enable(struct boot_params *bp)
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax < 0x8000001f)
+ if (eax < CPUID_AMD_SME)
return;
#define AMD_SME_BIT BIT(0)
@@ -520,7 +520,7 @@ void __init sme_enable(struct boot_params *bp)
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
- eax = 0x8000001f;
+ eax = CPUID_AMD_SME;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (!(eax & feature_mask))
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com> --- arch/x86/boot/compressed/mem_encrypt.S | 5 +++-- arch/x86/include/asm/cpufeatures.h | 5 +++++ arch/x86/kernel/cpu/amd.c | 2 +- arch/x86/kernel/cpu/scattered.c | 4 ++-- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/svm/svm.c | 4 ++-- arch/x86/mm/mem_encrypt_identity.c | 4 ++-- 7 files changed, 16 insertions(+), 10 deletions(-)