@@ -1265,6 +1265,8 @@ struct kvm_x86_ops {
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
void (*enable_smi_window)(struct kvm_vcpu *vcpu);
+ unsigned long (*mask_cr3_rsvd_bits)(struct kvm_vcpu *vcpu,
+ unsigned long cr3);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
@@ -4070,6 +4070,11 @@ static void enable_smi_window(struct kvm_vcpu *vcpu)
}
}
+static unsigned long svm_mask_cr3_rsvd_bits(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+ return cr3;
+}
+
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
{
bool smep, smap, is_user;
@@ -4285,6 +4290,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.pre_enter_smm = svm_pre_enter_smm,
.pre_leave_smm = svm_pre_leave_smm,
.enable_smi_window = enable_smi_window,
+ .mask_cr3_rsvd_bits = svm_mask_cr3_rsvd_bits,
.mem_enc_op = svm_mem_enc_op,
.mem_enc_reg_region = svm_register_enc_region,
@@ -7556,6 +7556,11 @@ static void enable_smi_window(struct kvm_vcpu *vcpu)
/* RSM will cause a vmexit anyway. */
}
+static unsigned long vmx_mask_cr3_rsvd_bits(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+ return cr3;
+}
+
static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->nested.vmxon;
@@ -7709,6 +7714,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm,
.enable_smi_window = enable_smi_window,
+ .mask_cr3_rsvd_bits = vmx_mask_cr3_rsvd_bits,
.can_emulate_instruction = vmx_can_emulate_instruction,
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
@@ -1042,7 +1042,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
}
if (is_long_mode(vcpu) &&
- (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
+ (kvm_x86_ops.mask_cr3_rsvd_bits(vcpu, cr3) &
+ rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
return 1;
else if (is_pae_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
SEV guests fail to boot on a system that supports the PCID feature. While emulating the RSM instruction, KVM reads the guest CR3 and calls kvm_set_cr3(). If the vCPU is in the long mode, kvm_set_cr3() does a sanity check for the CR3 value. In this case, it validates whether the value has any reserved bits set. The reserved bit range is 63:cpuid_maxphysaddr(). When AMD memory encryption is enabled, the memory encryption bit is set in the CR3 value. The memory encryption bit may fall within the KVM reserved bit range, causing the KVM emulation failure. Introduce a generic callback function that can be used to mask bits within the CR3 value before being checked by kvm_set_cr3(). Fixes: a780a3ea628268b2 ("KVM: X86: Fix reserved bits check for MOV to CR3") Signed-off-by: Babu Moger <babu.moger@amd.com> --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/svm.c | 6 ++++++ arch/x86/kvm/vmx/vmx.c | 6 ++++++ arch/x86/kvm/x86.c | 3 ++- 4 files changed, 16 insertions(+), 1 deletion(-)