diff mbox series

[v3,05/10] KVM: x86/mmu: Avoid memslot lookup during KVM_PFN_ERR_HWPOISON handling

Message ID 20220921173546.2674386-6-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Make tdp_mmu read-only and clean up TPD MMU fault handler | expand

Commit Message

David Matlack Sept. 21, 2022, 5:35 p.m. UTC
Pass the kvm_page_fault struct down to kvm_handle_error_pfn() to avoid a
memslot lookup when handling KVM_PFN_ERR_HWPOISON. Opportunistically
move the gfn_to_hva_memslot() call and @current down into
kvm_send_hwpoison_signal() to cut down on line lengths.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

Comments

Isaku Yamahata Oct. 3, 2022, 7:23 p.m. UTC | #1
On Wed, Sep 21, 2022 at 10:35:41AM -0700,
David Matlack <dmatlack@google.com> wrote:

> Pass the kvm_page_fault struct down to kvm_handle_error_pfn() to avoid a
> memslot lookup when handling KVM_PFN_ERR_HWPOISON. Opportunistically
> move the gfn_to_hva_memslot() call and @current down into
> kvm_send_hwpoison_signal() to cut down on line lengths.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 16 +++++++++-------
>  1 file changed, 9 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 49a5e38ecc5c..b6f84e470677 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3136,23 +3136,25 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>  	return ret;
>  }
>  
> -static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
> +static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
>  {
> -	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
> +	unsigned long hva = gfn_to_hva_memslot(slot, gfn);
> +
> +	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
>  }
>  
> -static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
> +static int kvm_handle_error_pfn(struct kvm_page_fault *fault)
>  {
>  	/*
>  	 * Do not cache the mmio info caused by writing the readonly gfn
>  	 * into the spte otherwise read access on readonly gfn also can
>  	 * caused mmio page fault and treat it as mmio access.
>  	 */
> -	if (pfn == KVM_PFN_ERR_RO_FAULT)
> +	if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
>  		return RET_PF_EMULATE;
>  
> -	if (pfn == KVM_PFN_ERR_HWPOISON) {
> -		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
> +	if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
> +		kvm_send_hwpoison_signal(fault->slot, fault->gfn);
>  		return RET_PF_RETRY;
>  	}
>  
> @@ -4193,7 +4195,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>  		return ret;
>  
>  	if (unlikely(is_error_pfn(fault->pfn)))
> -		return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn);
> +		return kvm_handle_error_pfn(fault);
>  
>  	return RET_PF_CONTINUE;
>  }
> -- 
> 2.37.3.998.g577e59143f-goog
> 

Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 49a5e38ecc5c..b6f84e470677 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3136,23 +3136,25 @@  static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 	return ret;
 }
 
-static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
+static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
 {
-	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
+	unsigned long hva = gfn_to_hva_memslot(slot, gfn);
+
+	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
 }
 
-static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
+static int kvm_handle_error_pfn(struct kvm_page_fault *fault)
 {
 	/*
 	 * Do not cache the mmio info caused by writing the readonly gfn
 	 * into the spte otherwise read access on readonly gfn also can
 	 * caused mmio page fault and treat it as mmio access.
 	 */
-	if (pfn == KVM_PFN_ERR_RO_FAULT)
+	if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
 		return RET_PF_EMULATE;
 
-	if (pfn == KVM_PFN_ERR_HWPOISON) {
-		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
+	if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
+		kvm_send_hwpoison_signal(fault->slot, fault->gfn);
 		return RET_PF_RETRY;
 	}
 
@@ -4193,7 +4195,7 @@  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 		return ret;
 
 	if (unlikely(is_error_pfn(fault->pfn)))
-		return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn);
+		return kvm_handle_error_pfn(fault);
 
 	return RET_PF_CONTINUE;
 }