diff mbox series

[v2,04/10] KVM: x86/mmu: Handle error PFNs in kvm_faultin_pfn()

Message ID 20220826231227.4096391-5-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Make tdp_mmu read-only and clean up TPD MMU fault handler | expand

Commit Message

David Matlack Aug. 26, 2022, 11:12 p.m. UTC
Handle error PFNs in kvm_faultin_pfn() rather than relying on the caller
to invoke handle_abnormal_pfn() after kvm_faultin_pfn().
Opportunistically rename kvm_handle_bad_page() to kvm_handle_error_pfn()
to make it more consistent with is_error_pfn().

This commit moves KVM closer to being able to drop
handle_abnormal_pfn(), which will reduce the amount of duplicate code in
the various page fault handlers.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

Comments

Isaku Yamahata Aug. 30, 2022, 11:45 p.m. UTC | #1
On Fri, Aug 26, 2022 at 04:12:21PM -0700,
David Matlack <dmatlack@google.com> wrote:

> Handle error PFNs in kvm_faultin_pfn() rather than relying on the caller
> to invoke handle_abnormal_pfn() after kvm_faultin_pfn().
> Opportunistically rename kvm_handle_bad_page() to kvm_handle_error_pfn()
> to make it more consistent with is_error_pfn().
> 
> This commit moves KVM closer to being able to drop
> handle_abnormal_pfn(), which will reduce the amount of duplicate code in
> the various page fault handlers.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 18 ++++++++++++------
>  1 file changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 49dbe274c709..273e1771965c 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3144,7 +3144,7 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
>  	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
>  }
>  
> -static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
> +static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
>  {
>  	/*
>  	 * Do not cache the mmio info caused by writing the readonly gfn
> @@ -3165,10 +3165,6 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
>  static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
>  			       unsigned int access)
>  {
> -	/* The pfn is invalid, report the error! */
> -	if (unlikely(is_error_pfn(fault->pfn)))
> -		return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
> -
>  	if (unlikely(!fault->slot)) {
>  		gva_t gva = fault->is_tdp ? 0 : fault->addr;
>  
> @@ -4185,15 +4181,25 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
>  	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
>  					  fault->write, &fault->map_writable,
>  					  &fault->hva);
> +
>  	return RET_PF_CONTINUE;
>  }

nit: unnecessary code churn.
Otherwise, code looks good to me.
David Matlack Sept. 1, 2022, 4:48 p.m. UTC | #2
On Tue, Aug 30, 2022 at 4:45 PM Isaku Yamahata <isaku.yamahata@gmail.com> wrote:
>
> On Fri, Aug 26, 2022 at 04:12:21PM -0700,
> David Matlack <dmatlack@google.com> wrote:
> >
> > @@ -4185,15 +4181,25 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
> >       fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
> >                                         fault->write, &fault->map_writable,
> >                                         &fault->hva);
> > +
> >       return RET_PF_CONTINUE;
> >  }
>
> nit: unnecessary code churn.
> Otherwise, code looks good to me.

My mistake, thanks for the catch.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 49dbe274c709..273e1771965c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3144,7 +3144,7 @@  static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
 }
 
-static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
+static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 {
 	/*
 	 * Do not cache the mmio info caused by writing the readonly gfn
@@ -3165,10 +3165,6 @@  static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 			       unsigned int access)
 {
-	/* The pfn is invalid, report the error! */
-	if (unlikely(is_error_pfn(fault->pfn)))
-		return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
-
 	if (unlikely(!fault->slot)) {
 		gva_t gva = fault->is_tdp ? 0 : fault->addr;
 
@@ -4185,15 +4181,25 @@  static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
 					  fault->write, &fault->map_writable,
 					  &fault->hva);
+
 	return RET_PF_CONTINUE;
 }
 
 static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
+	int ret;
+
 	fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
 	smp_rmb();
 
-	return __kvm_faultin_pfn(vcpu, fault);
+	ret = __kvm_faultin_pfn(vcpu, fault);
+	if (ret != RET_PF_CONTINUE)
+		return ret;
+
+	if (unlikely(is_error_pfn(fault->pfn)))
+		return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn);
+
+	return RET_PF_CONTINUE;
 }
 
 /*