diff mbox series

[08/16] KVM: MMU: change handle_abnormal_pfn() arguments to kvm_page_fault

Message ID 20210807134936.3083984-9-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: pass arguments on the page fault path via struct kvm_page_fault | expand

Commit Message

Paolo Bonzini Aug. 7, 2021, 1:49 p.m. UTC
Pass struct kvm_page_fault to handle_abnormal_pfn() instead of
extracting the arguments from the struct.

Suggested-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c         | 17 ++++++++---------
 arch/x86/kvm/mmu/paging_tmpl.h |  2 +-
 2 files changed, 9 insertions(+), 10 deletions(-)

Comments

Sean Christopherson Sept. 1, 2021, 11:15 p.m. UTC | #1
On Sat, Aug 07, 2021, Paolo Bonzini wrote:
> Pass struct kvm_page_fault to handle_abnormal_pfn() instead of
> extracting the arguments from the struct.
> 
> Suggested-by: Isaku Yamahata <isaku.yamahata@intel.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/mmu/mmu.c         | 17 ++++++++---------
>  arch/x86/kvm/mmu/paging_tmpl.h |  2 +-
>  2 files changed, 9 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index a6366f1c4197..cec59ac2e1cd 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3024,18 +3024,18 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
>  	return -EFAULT;
>  }
>  
> -static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
> -				kvm_pfn_t pfn, unsigned int access,
> -				int *ret_val)
> +static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
> +				unsigned int access, int *ret_val)
>  {
>  	/* The pfn is invalid, report the error! */
> -	if (unlikely(is_error_pfn(pfn))) {
> -		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
> +	if (unlikely(is_error_pfn(fault->pfn))) {
> +		*ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
>  		return true;
>  	}
>  
> -	if (unlikely(is_noslot_pfn(pfn))) {
> -		vcpu_cache_mmio_info(vcpu, gva, gfn,
> +	if (unlikely(is_noslot_pfn(fault->pfn))) {
> +		gva_t gva = fault->is_tdp ? 0 : fault->addr;

Checkpatch wants a newline.  I'm also surprised you didn't abuse bitwise math:

		gva_t gva = fault->addr & ((u64)fault->is_tdp - 1);

I am _not_ suggesting you actually do that ;-)

> +		vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
>  				     access & shadow_mmio_access_mask);
>  		/*
>  		 * If MMIO caching is disabled, emulate immediately without
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a6366f1c4197..cec59ac2e1cd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3024,18 +3024,18 @@  static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 	return -EFAULT;
 }
 
-static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
-				kvm_pfn_t pfn, unsigned int access,
-				int *ret_val)
+static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+				unsigned int access, int *ret_val)
 {
 	/* The pfn is invalid, report the error! */
-	if (unlikely(is_error_pfn(pfn))) {
-		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
+	if (unlikely(is_error_pfn(fault->pfn))) {
+		*ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
 		return true;
 	}
 
-	if (unlikely(is_noslot_pfn(pfn))) {
-		vcpu_cache_mmio_info(vcpu, gva, gfn,
+	if (unlikely(is_noslot_pfn(fault->pfn))) {
+		gva_t gva = fault->is_tdp ? 0 : fault->addr;
+		vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
 				     access & shadow_mmio_access_mask);
 		/*
 		 * If MMIO caching is disabled, emulate immediately without
@@ -3904,8 +3904,7 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 	if (try_async_pf(vcpu, fault))
 		return RET_PF_RETRY;
 
-	if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa,
-	                        fault->gfn, fault->pfn, ACC_ALL, &r))
+	if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r))
 		return r;
 
 	r = RET_PF_RETRY;
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 6ead472674ad..9b90097dea22 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -882,7 +882,7 @@  static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 	if (try_async_pf(vcpu, fault))
 		return RET_PF_RETRY;
 
-	if (handle_abnormal_pfn(vcpu, addr, fault->gfn, fault->pfn, walker.pte_access, &r))
+	if (handle_abnormal_pfn(vcpu, fault, walker.pte_access, &r))
 		return r;
 
 	/*