Message ID | 20180828160459.14093-3-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: nVMX: add option to perform early consistency checks via H/W | expand |
On Tue, Aug 28, 2018 at 9:04 AM, Sean Christopherson <sean.j.christopherson@intel.com> wrote: > An invalid EPTP causes a VMFail(VMXERR_ENTRY_INVALID_CONTROL_FIELD), > not a VMExit. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Reviewed-by: Jim Mattson <jmattson@google.com>
On 08/28/2018 09:04 AM, Sean Christopherson wrote: > An invalid EPTP causes a VMFail(VMXERR_ENTRY_INVALID_CONTROL_FIELD), > not a VMExit. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > --- > arch/x86/kvm/vmx.c | 21 +++++++++------------ > 1 file changed, 9 insertions(+), 12 deletions(-) > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index b217614de7ac..6451e63847d9 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -11277,11 +11277,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) > return get_vmcs12(vcpu)->ept_pointer; > } > > -static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) > +static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) > { > WARN_ON(mmu_is_nested(vcpu)); > - if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) > - return 1; > > kvm_init_shadow_ept_mmu(vcpu, > to_vmx(vcpu)->nested.msrs.ept_caps & > @@ -11293,7 +11291,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) > vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; > > vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; > - return 0; > } > > static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) > @@ -12243,15 +12240,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, > vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); > } > > - if (nested_cpu_has_ept(vmcs12)) { > - if (nested_ept_init_mmu_context(vcpu)) { > - *entry_failure_code = ENTRY_FAIL_DEFAULT; > - return 1; > - } > - } else if (nested_cpu_has2(vmcs12, > - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { > + if (nested_cpu_has_ept(vmcs12)) > + nested_ept_init_mmu_context(vcpu); > + else if (nested_cpu_has2(vmcs12, > + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) > vmx_flush_tlb(vcpu, true); > - } > > /* > * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those > @@ -12458,6 +12451,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) > } > } > > + if (nested_cpu_has_ept(vmcs12) && > + !valid_ept_address(vcpu, vmcs12->ept_pointer)) > + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; > + > return 0; > } > Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b217614de7ac..6451e63847d9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11277,11 +11277,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) return get_vmcs12(vcpu)->ept_pointer; } -static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) +static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); - if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) - return 1; kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.msrs.ept_caps & @@ -11293,7 +11291,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; - return 0; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) @@ -12243,15 +12240,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } - if (nested_cpu_has_ept(vmcs12)) { - if (nested_ept_init_mmu_context(vcpu)) { - *entry_failure_code = ENTRY_FAIL_DEFAULT; - return 1; - } - } else if (nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + if (nested_cpu_has_ept(vmcs12)) + nested_ept_init_mmu_context(vcpu); + else if (nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) vmx_flush_tlb(vcpu, true); - } /* * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those @@ -12458,6 +12451,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) } } + if (nested_cpu_has_ept(vmcs12) && + !valid_ept_address(vcpu, vmcs12->ept_pointer)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + return 0; }
An invalid EPTP causes a VMFail(VMXERR_ENTRY_INVALID_CONTROL_FIELD), not a VMExit. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/vmx.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-)