diff mbox series

[14/14] KVM: x86/mmu: Consolidate kvm_mmu_zap_all() and kvm_mmu_zap_mmio_sptes()

Message ID 20190110180706.24974-15-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Remove fast invalidate mechanism | expand

Commit Message

Sean Christopherson Jan. 10, 2019, 6:07 p.m. UTC
...via a new helper, __kvm_mmu_zap_all().  An alternative to passing a
'bool mmio_only' would be to pass a callback function to filter the
shadow page, i.e. to make __kvm_mmu_zap_all() generic and reusable, but
zapping all shadow pages is a last resort, i.e. making the helper less
extensible is a feature of sorts.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu.c | 26 ++++++++------------------
 1 file changed, 8 insertions(+), 18 deletions(-)

Comments

Paolo Bonzini Jan. 30, 2019, 4:44 p.m. UTC | #1
On 10/01/19 19:07, Sean Christopherson wrote:
> ...via a new helper, __kvm_mmu_zap_all().  An alternative to passing a
> 'bool mmio_only' would be to pass a callback function to filter the
> shadow page, i.e. to make __kvm_mmu_zap_all() generic and reusable, but
> zapping all shadow pages is a last resort, i.e. making the helper less
> extensible is a feature of sorts.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/mmu.c | 26 ++++++++------------------
>  1 file changed, 8 insertions(+), 18 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 6707500da41a..c9134b54125f 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -5816,22 +5816,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
>  }
>  EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
>  
> -void kvm_mmu_zap_all(struct kvm *kvm)
> -{
> -	struct kvm_mmu_page *sp, *node;
> -	LIST_HEAD(invalid_list);
> -
> -	spin_lock(&kvm->mmu_lock);
> -restart:
> -	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
> -		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
> -			goto restart;
> -
> -	kvm_mmu_commit_zap_page(kvm, &invalid_list);
> -	spin_unlock(&kvm->mmu_lock);
> -}
> -
> -static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
> +static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
>  {
>  	struct kvm_mmu_page *sp, *node;
>  	LIST_HEAD(invalid_list);
> @@ -5839,7 +5824,7 @@ static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
>  	spin_lock(&kvm->mmu_lock);
>  restart:
>  	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
> -		if (!sp->mmio_cached)
> +		if (mmio_only && !sp->mmio_cached)
>  			continue;
>  		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
>  			goto restart;
> @@ -5849,6 +5834,11 @@ static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
>  	spin_unlock(&kvm->mmu_lock);
>  }
>  
> +void kvm_mmu_zap_all(struct kvm *kvm)
> +{
> +	return __kvm_mmu_zap_all(kvm, false);
> +}
> +
>  void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
>  {
>  	/*
> @@ -5857,7 +5847,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
>  	 */
>  	if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
>  		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
> -		kvm_mmu_zap_mmio_sptes(kvm);
> +		__kvm_mmu_zap_all(kvm, true);
>  	}
>  }
>  
> 

Even simpler if you just call kvm_mmu_zap_all as just suggested. :)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6707500da41a..c9134b54125f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5816,22 +5816,7 @@  void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 
-void kvm_mmu_zap_all(struct kvm *kvm)
-{
-	struct kvm_mmu_page *sp, *node;
-	LIST_HEAD(invalid_list);
-
-	spin_lock(&kvm->mmu_lock);
-restart:
-	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
-		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
-			goto restart;
-
-	kvm_mmu_commit_zap_page(kvm, &invalid_list);
-	spin_unlock(&kvm->mmu_lock);
-}
-
-static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
+static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
 {
 	struct kvm_mmu_page *sp, *node;
 	LIST_HEAD(invalid_list);
@@ -5839,7 +5824,7 @@  static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
 	spin_lock(&kvm->mmu_lock);
 restart:
 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
-		if (!sp->mmio_cached)
+		if (mmio_only && !sp->mmio_cached)
 			continue;
 		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
 			goto restart;
@@ -5849,6 +5834,11 @@  static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
 	spin_unlock(&kvm->mmu_lock);
 }
 
+void kvm_mmu_zap_all(struct kvm *kvm)
+{
+	return __kvm_mmu_zap_all(kvm, false);
+}
+
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
 {
 	/*
@@ -5857,7 +5847,7 @@  void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
 	 */
 	if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
-		kvm_mmu_zap_mmio_sptes(kvm);
+		__kvm_mmu_zap_all(kvm, true);
 	}
 }