diff mbox

[v7,2/4] KVM: async_pf: Add L1 guest async_pf #PF vmexit handler

Message ID 20170713152921.GF3442@potion (mailing list archive)
State New, archived
Headers show

Commit Message

Radim Krčmář July 13, 2017, 3:29 p.m. UTC
2017-07-12 23:44+0200, Radim Krčmář:
> 2017-06-28 20:01-0700, Wanpeng Li:
> > From: Wanpeng Li <wanpeng.li@hotmail.com>
> > 
> > This patch adds the L1 guest async page fault #PF vmexit handler, such
> > #PF is converted into vmexit from L2 to L1 on #PF which is then handled
> > by L1 similar to ordinary async page fault.
> > 
> > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > Cc: Radim Krčmář <rkrcmar@redhat.com>
> > Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
> > ---
> 
> This patch breaks SVM, so I've taken the series off kvm/queue for now;

The error is triggered by 'WARN_ON_ONCE(tdp_enabled);', because
pf_interception() handles both cases.  (The bizzare part is that it
doesn't warn.)

I think this hunk on top of the patch would be good.  It makes the
WARN_ON_ONCE specific to VMX and also preserves the parameters that SVM
had before.

(Passes basic tests, haven't done the nested async_pf test yet.)

Comments

Wanpeng Li July 14, 2017, 1:40 a.m. UTC | #1
2017-07-13 23:29 GMT+08:00 Radim Krčmář <rkrcmar@redhat.com>:
> 2017-07-12 23:44+0200, Radim Krčmář:
>> 2017-06-28 20:01-0700, Wanpeng Li:
>> > From: Wanpeng Li <wanpeng.li@hotmail.com>
>> >
>> > This patch adds the L1 guest async page fault #PF vmexit handler, such
>> > #PF is converted into vmexit from L2 to L1 on #PF which is then handled
>> > by L1 similar to ordinary async page fault.
>> >
>> > Cc: Paolo Bonzini <pbonzini@redhat.com>
>> > Cc: Radim Krčmář <rkrcmar@redhat.com>
>> > Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
>> > ---
>>
>> This patch breaks SVM, so I've taken the series off kvm/queue for now;
>
> The error is triggered by 'WARN_ON_ONCE(tdp_enabled);', because
> pf_interception() handles both cases.  (The bizzare part is that it
> doesn't warn.)
>
> I think this hunk on top of the patch would be good.  It makes the

Thanks Radim! The work is really appreciated. :)

> WARN_ON_ONCE specific to VMX and also preserves the parameters that SVM
> had before.

I replace the tdp_enabled by enable_ept in VMX since there is a
warning: "tdp_enabled" in [kvm-intel.ko] undefined! Btw, I just sent
out v8, hope both
the v8 and vm86 stuff can catch the end of the merge window. :)

Regards,
Wanpeng Li

>
> (Passes basic tests, haven't done the nested async_pf test yet.)
>
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index f37c0307dcb0..338cb4c8cbb9 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3782,17 +3782,16 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
>  }
>
>  int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
> -                               u64 fault_address)
> +                               u64 fault_address, char *insn, int insn_len,
> +                               bool need_unprotect)
>  {
>         int r = 1;
>
>         switch (vcpu->arch.apf.host_apf_reason) {
>         default:
> -               /* TDP won't cause page fault directly */
> -               WARN_ON_ONCE(tdp_enabled);
>                 trace_kvm_page_fault(fault_address, error_code);
>
> -               if (kvm_event_needs_reinjection(vcpu))
> +               if (need_unprotect && kvm_event_needs_reinjection(vcpu))
>                         kvm_mmu_unprotect_page_virt(vcpu, fault_address);
>                 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, NULL, 0);
>                 break;
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 270d9adaa039..d7d248a000dd 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -78,7 +78,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
>                              bool accessed_dirty);
>  bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
>  int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
> -                               u64 fault_address);
> +                               u64 fault_address, char *insn, int insn_len,
> +                               bool need_unprotect);
>
>  static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
>  {
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 659b610c4711..fb23497cf915 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2123,7 +2123,9 @@ static int pf_interception(struct vcpu_svm *svm)
>         u64 fault_address = svm->vmcb->control.exit_info_2;
>         u64 error_code = svm->vmcb->control.exit_info_1;
>
> -       return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address);
> +       return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
> +                       svm->vmcb->control.insn_bytes,
> +                       svm->vmcb->control.insn_len, !npt_enabled);
>  }
>
>  static int db_interception(struct vcpu_svm *svm)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index ab33eace4f66..2e8cfb2f1371 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -5699,7 +5699,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
>
>         if (is_page_fault(intr_info)) {
>                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
> -               return kvm_handle_page_fault(vcpu, error_code, cr2);
> +               /* TDP won't cause page fault directly */
> +               WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && tdp_enabled);
> +               return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0,
> +                               true);
>         }
>
>         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f37c0307dcb0..338cb4c8cbb9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3782,17 +3782,16 @@  static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 }
 
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
-				u64 fault_address)
+				u64 fault_address, char *insn, int insn_len,
+				bool need_unprotect)
 {
 	int r = 1;
 
 	switch (vcpu->arch.apf.host_apf_reason) {
 	default:
-		/* TDP won't cause page fault directly */
-		WARN_ON_ONCE(tdp_enabled);
 		trace_kvm_page_fault(fault_address, error_code);
 
-		if (kvm_event_needs_reinjection(vcpu))
+		if (need_unprotect && kvm_event_needs_reinjection(vcpu))
 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, NULL, 0);
 		break;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 270d9adaa039..d7d248a000dd 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -78,7 +78,8 @@  void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 			     bool accessed_dirty);
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
-				u64 fault_address);
+				u64 fault_address, char *insn, int insn_len,
+				bool need_unprotect);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 659b610c4711..fb23497cf915 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2123,7 +2123,9 @@  static int pf_interception(struct vcpu_svm *svm)
 	u64 fault_address = svm->vmcb->control.exit_info_2;
 	u64 error_code = svm->vmcb->control.exit_info_1;
 
-	return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address);
+	return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
+			svm->vmcb->control.insn_bytes,
+			svm->vmcb->control.insn_len, !npt_enabled);
 }
 
 static int db_interception(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ab33eace4f66..2e8cfb2f1371 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5699,7 +5699,10 @@  static int handle_exception(struct kvm_vcpu *vcpu)
 
 	if (is_page_fault(intr_info)) {
 		cr2 = vmcs_readl(EXIT_QUALIFICATION);
-		return kvm_handle_page_fault(vcpu, error_code, cr2);
+		/* TDP won't cause page fault directly */
+		WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && tdp_enabled);
+		return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0,
+				true);
 	}
 
 	ex_no = intr_info & INTR_INFO_VECTOR_MASK;