[RFC,11/21] KVM: x86: Export kvm_propagate_fault (as kvm_propagate_page_fault)
diff mbox series

Message ID 20190727055214.9282-12-sean.j.christopherson@intel.com
State New
Headers show
Series
  • x86/sgx: KVM: Add SGX virtualization
Related show

Commit Message

Sean Christopherson July 27, 2019, 5:52 a.m. UTC
Support for SGX Launch Control requires KVM to trap and execute
ENCLS[EINIT] on behalf of the guest.  Interception of ENCLS leafs
occurs immediately after CPL0 checks, i.e. before any processing
of the leaf-specific operands.  As such, it's possible that KVM
could intercept an EINIT from L2 and encounter an EPT fault while
walking L1's EPT tables.  Rather than force EINIT through the
generic emulator, export kvm_propagate_fault() so that the EINIT
handler can inject the proper page fault.  Rename the function to
kvm_propagate_page_fault() to clarify that it is only for page
faults, and WARN if it is invoked with an exception other than
PF_VECTOR.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h | 1 +
 arch/x86/kvm/x86.c              | 7 +++++--
 2 files changed, 6 insertions(+), 2 deletions(-)

Patch
diff mbox series

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1341d8390ebe..397d755bb353 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1357,6 +1357,7 @@  void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
+bool kvm_propagate_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			    gfn_t gfn, void *data, int offset, int len,
 			    u32 access);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2b64bb854571..ec92c5534336 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -587,8 +587,10 @@  void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 }
 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
 
-static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
+bool kvm_propagate_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 {
+	WARN_ON(fault->vector != PF_VECTOR);
+
 	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
 	else
@@ -596,6 +598,7 @@  static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau
 
 	return fault->nested_page_fault;
 }
+EXPORT_SYMBOL_GPL(kvm_propagate_page_fault);
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
 {
@@ -6089,7 +6092,7 @@  static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
 {
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
 	if (ctxt->exception.vector == PF_VECTOR)
-		return kvm_propagate_fault(vcpu, &ctxt->exception);
+		return kvm_propagate_page_fault(vcpu, &ctxt->exception);
 
 	if (ctxt->exception.error_code_valid)
 		kvm_queue_exception_e(vcpu, ctxt->exception.vector,