[02/14] KVM: x86/mmu: Split remote_flush+zap case out of kvm_mmu_flush_or_zap()
diff mbox series

Message ID 20190110180706.24974-3-sean.j.christopherson@intel.com
State New
Headers show
Series
  • KVM: x86/mmu: Remove fast invalidate mechanism
Related show

Commit Message

Sean Christopherson Jan. 10, 2019, 6:06 p.m. UTC
...and into a separate helper, kvm_mmu_remote_flush_or_zap(), that does
not require a vcpu so that the code can be (re)used by
kvm_mmu_invalidate_zap_pages_in_memslot().

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu.c | 22 ++++++++++++++++------
 1 file changed, 16 insertions(+), 6 deletions(-)

Comments

Sean Christopherson Jan. 11, 2019, 3:16 p.m. UTC | #1
On Thu, Jan 10, 2019 at 10:06:54AM -0800, Sean Christopherson wrote:
> ...and into a separate helper, kvm_mmu_remote_flush_or_zap(), that does
> not require a vcpu so that the code can be (re)used by
> kvm_mmu_invalidate_zap_pages_in_memslot().
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/mmu.c | 22 ++++++++++++++++------
>  1 file changed, 16 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 07ee9b727a68..0f43458f3782 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2231,18 +2231,28 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
>  	return true;
>  }
>  
> +static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
> +					struct list_head *invalid_list,
> +					bool remote_flush)
> +{
> +	if (!remote_flush && !list_empty(invalid_list))
> +		return false;
> +
> +	if (remote_flush)
> +		kvm_flush_remote_tlbs(kvm);
> +	else
> +		kvm_mmu_commit_zap_page(kvm, invalid_list);

Doh, this is wrong, zapping the list should have priority, e.g.:

	if (!list_empty(invalid_list))
		kvm_mmu_commit_zap_page(kvm, invalid_list);
	else
		kvm_flush_remote_tlbs(kvm);

> +	return true;
> +}
> +
>  static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
>  				 struct list_head *invalid_list,
>  				 bool remote_flush, bool local_flush)
>  {
> -	if (!list_empty(invalid_list)) {
> -		kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
> +	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
>  		return;
> -	}
>  
> -	if (remote_flush)
> -		kvm_flush_remote_tlbs(vcpu->kvm);
> -	else if (local_flush)
> +	if (local_flush)
>  		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  }
>  
> -- 
> 2.19.2
>

Patch
diff mbox series

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 07ee9b727a68..0f43458f3782 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2231,18 +2231,28 @@  static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	return true;
 }
 
+static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
+					struct list_head *invalid_list,
+					bool remote_flush)
+{
+	if (!remote_flush && !list_empty(invalid_list))
+		return false;
+
+	if (remote_flush)
+		kvm_flush_remote_tlbs(kvm);
+	else
+		kvm_mmu_commit_zap_page(kvm, invalid_list);
+	return true;
+}
+
 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
 				 struct list_head *invalid_list,
 				 bool remote_flush, bool local_flush)
 {
-	if (!list_empty(invalid_list)) {
-		kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
+	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
 		return;
-	}
 
-	if (remote_flush)
-		kvm_flush_remote_tlbs(vcpu->kvm);
-	else if (local_flush)
+	if (local_flush)
 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 }