diff mbox series

[V4,7/15] KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range()

Message ID 20181013145406.4911-8-Tianyu.Lan@microsoft.com (mailing list archive)
State New, archived
Headers show
Series x86/KVM/Hyper-v: Add HV ept tlb range flush hypercall support in KVM | expand

Commit Message

Tianyu Lan Oct. 13, 2018, 2:53 p.m. UTC
From: Lan Tianyu <Tianyu.Lan@microsoft.com>

Originally, flush tlb is done by slot_handle_level_range(). This patch
is to flush tlb directly in the kvm_zap_gfn_range() when range
flush is available.

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

Comments

Paolo Bonzini Oct. 15, 2018, 10:04 a.m. UTC | #1
On 13/10/2018 16:53, lantianyu1986@gmail.com wrote:
> +	bool flush = false;
>  	int i;
>  
>  	spin_lock(&kvm->mmu_lock);
> @@ -5654,18 +5655,27 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
>  		slots = __kvm_memslots(kvm, i);
>  		kvm_for_each_memslot(memslot, slots) {
>  			gfn_t start, end;
> +			bool flush_tlb = true;
>  
>  			start = max(gfn_start, memslot->base_gfn);
>  			end = min(gfn_end, memslot->base_gfn + memslot->npages);
>  			if (start >= end)
>  				continue;
>  
> -			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
> -						PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
> -						start, end - 1, true);
> +			if (kvm_available_flush_tlb_with_range())
> +				flush_tlb = false;

This should be moved outside the for, because it's invariant.

> +			flush = slot_handle_level_range(kvm, memslot,
> +					kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
> +					PT_MAX_HUGEPAGE_LEVEL, start,
> +					end - 1, flush_tlb);

... and this should be "flush |= ".
>  		}
>  	}
>  
> +	if (flush && kvm_available_flush_tlb_with_range())
> +		kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
> +				gfn_end - gfn_start + 1);
> +

... and this can be just if (flush), because if flush_tlb is true then
slot_handle_level_range always returns false.

Paolo
Tianyu Lan Oct. 15, 2018, 1:08 p.m. UTC | #2
Hi Paolo:
              Thanks for your review.

On Mon, Oct 15, 2018 at 6:04 PM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> On 13/10/2018 16:53, lantianyu1986@gmail.com wrote:
> > +     bool flush = false;
> >       int i;
> >
> >       spin_lock(&kvm->mmu_lock);
> > @@ -5654,18 +5655,27 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
> >               slots = __kvm_memslots(kvm, i);
> >               kvm_for_each_memslot(memslot, slots) {
> >                       gfn_t start, end;
> > +                     bool flush_tlb = true;
> >
> >                       start = max(gfn_start, memslot->base_gfn);
> >                       end = min(gfn_end, memslot->base_gfn + memslot->npages);
> >                       if (start >= end)
> >                               continue;
> >
> > -                     slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
> > -                                             PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
> > -                                             start, end - 1, true);
> > +                     if (kvm_available_flush_tlb_with_range())
> > +                             flush_tlb = false;
>
> This should be moved outside the for, because it's invariant.
>
> > +                     flush = slot_handle_level_range(kvm, memslot,
> > +                                     kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
> > +                                     PT_MAX_HUGEPAGE_LEVEL, start,
> > +                                     end - 1, flush_tlb);
>
> ... and this should be "flush |= ".
> >               }
> >       }
> >
> > +     if (flush && kvm_available_flush_tlb_with_range())
> > +             kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
> > +                             gfn_end - gfn_start + 1);
> > +
>
> ... and this can be just if (flush), because if flush_tlb is true then
> slot_handle_level_range always returns false.

OK. Will update.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f3742ff4ec18..c4f7679f12c3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5647,6 +5647,7 @@  void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 {
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
+	bool flush = false;
 	int i;
 
 	spin_lock(&kvm->mmu_lock);
@@ -5654,18 +5655,27 @@  void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 		slots = __kvm_memslots(kvm, i);
 		kvm_for_each_memslot(memslot, slots) {
 			gfn_t start, end;
+			bool flush_tlb = true;
 
 			start = max(gfn_start, memslot->base_gfn);
 			end = min(gfn_end, memslot->base_gfn + memslot->npages);
 			if (start >= end)
 				continue;
 
-			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
-						PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
-						start, end - 1, true);
+			if (kvm_available_flush_tlb_with_range())
+				flush_tlb = false;
+
+			flush = slot_handle_level_range(kvm, memslot,
+					kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
+					PT_MAX_HUGEPAGE_LEVEL, start,
+					end - 1, flush_tlb);
 		}
 	}
 
+	if (flush && kvm_available_flush_tlb_with_range())
+		kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+				gfn_end - gfn_start + 1);
+
 	spin_unlock(&kvm->mmu_lock);
 }