diff mbox series

[V2,03/10] KVM: Remove tlbs_dirty

Message ID 20210918005636.3675-4-jiangshanlai@gmail.com (mailing list archive)
State New, archived
Headers show
Series [V2,01/10] KVM: X86: Fix missed remote tlb flush in rmap_write_protect() | expand

Commit Message

Lai Jiangshan Sept. 18, 2021, 12:56 a.m. UTC
From: Lai Jiangshan <laijs@linux.alibaba.com>

There is no user of tlbs_dirty.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
---
 include/linux/kvm_host.h | 1 -
 virt/kvm/kvm_main.c      | 9 +--------
 2 files changed, 1 insertion(+), 9 deletions(-)

Comments

Paolo Bonzini Sept. 23, 2021, 3:23 p.m. UTC | #1
On 18/09/21 02:56, Lai Jiangshan wrote:
> From: Lai Jiangshan <laijs@linux.alibaba.com>
> 
> There is no user of tlbs_dirty.
> 
> Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
> ---
>   include/linux/kvm_host.h | 1 -
>   virt/kvm/kvm_main.c      | 9 +--------
>   2 files changed, 1 insertion(+), 9 deletions(-)
> 
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index e4d712e9f760..3b7846cd0637 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -608,7 +608,6 @@ struct kvm {
>   	unsigned long mmu_notifier_range_start;
>   	unsigned long mmu_notifier_range_end;
>   #endif
> -	long tlbs_dirty;
>   	struct list_head devices;
>   	u64 manual_dirty_log_protect;
>   	struct dentry *debugfs_dentry;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 3e67c93ca403..6d6be42ec78d 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -312,12 +312,6 @@ EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
>   #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
>   void kvm_flush_remote_tlbs(struct kvm *kvm)
>   {
> -	/*
> -	 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
> -	 * kvm_make_all_cpus_request.
> -	 */
> -	long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
> -
>   	/*
>   	 * We want to publish modifications to the page tables before reading
>   	 * mode. Pairs with a memory barrier in arch-specific code.
> @@ -332,7 +326,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
>   	if (!kvm_arch_flush_remote_tlb(kvm)
>   	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
>   		++kvm->stat.generic.remote_tlb_flush;
> -	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
>   }
>   EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
>   #endif
> @@ -537,7 +530,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
>   		}
>   	}
>   
> -	if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
> +	if (range->flush_on_ret && ret)
>   		kvm_flush_remote_tlbs(kvm);
>   
>   	if (locked)
> 

Queued up to here for 5.15, thanks!

Paolo
Lai Jiangshan Sept. 24, 2021, 3:40 p.m. UTC | #2
On 2021/9/23 23:23, Paolo Bonzini wrote:
> On 18/09/21 02:56, Lai Jiangshan wrote:

> 
> Queued up to here for 5.15, thanks!
> 
> Paolo

Any comments on other commits?

Thanks
Lai
Paolo Bonzini Sept. 24, 2021, 4:03 p.m. UTC | #3
On 24/09/21 17:40, Lai Jiangshan wrote:
> 
> 
> On 2021/9/23 23:23, Paolo Bonzini wrote:
>> On 18/09/21 02:56, Lai Jiangshan wrote:
> 
>>
>> Queued up to here for 5.15, thanks!
>>
>> Paolo
> 
> Any comments on other commits?

Queued now for 5.16. :)

More precisely this is what I have queued from you for 5.16 only:

       KVM: X86: Don't flush current tlb on shadow page modification
       KVM: X86: Remove kvm_mmu_flush_or_zap()
       KVM: X86: Change kvm_sync_page() to return true when remote flush is needed
       KVM: X86: Zap the invalid list after remote tlb flushing
       KVM: X86: Remove FNAME(update_pte)
       KVM: X86: Don't unsync pagetables when speculative
       KVM: X86: Don't check unsync if the original spte is writible
       KVM: X86: Move PTE present check from loop body to __shadow_walk_next()

Paolo
diff mbox series

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e4d712e9f760..3b7846cd0637 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -608,7 +608,6 @@  struct kvm {
 	unsigned long mmu_notifier_range_start;
 	unsigned long mmu_notifier_range_end;
 #endif
-	long tlbs_dirty;
 	struct list_head devices;
 	u64 manual_dirty_log_protect;
 	struct dentry *debugfs_dentry;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3e67c93ca403..6d6be42ec78d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -312,12 +312,6 @@  EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
-	/*
-	 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
-	 * kvm_make_all_cpus_request.
-	 */
-	long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
-
 	/*
 	 * We want to publish modifications to the page tables before reading
 	 * mode. Pairs with a memory barrier in arch-specific code.
@@ -332,7 +326,6 @@  void kvm_flush_remote_tlbs(struct kvm *kvm)
 	if (!kvm_arch_flush_remote_tlb(kvm)
 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 		++kvm->stat.generic.remote_tlb_flush;
-	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 #endif
@@ -537,7 +530,7 @@  static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
 		}
 	}
 
-	if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
+	if (range->flush_on_ret && ret)
 		kvm_flush_remote_tlbs(kvm);
 
 	if (locked)