diff mbox series

[v3,4/5] KVM: LAPIC: Delay trace advance expire delta

Message ID 1557975980-9875-5-git-send-email-wanpengli@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: LAPIC: Optimize timer latency further | expand

Commit Message

Wanpeng Li May 16, 2019, 3:06 a.m. UTC
From: Wanpeng Li <wanpengli@tencent.com>

wait_lapic_expire() call was moved above guest_enter_irqoff() because of 
its tracepoint, which violated the RCU extended quiescent state invoked 
by guest_enter_irqoff()[1][2]. This patch simply moves the tracepoint 
below guest_exit_irqoff() in vcpu_enter_guest(). Snapshot the delta before 
VM-Enter, but trace it after VM-Exit. This can help us to move 
wait_lapic_expire() just before vmentry in the later patch.

[1] Commit 8b89fe1f6c43 ("kvm: x86: move tracepoints outside extended quiescent state")
[2] https://patchwork.kernel.org/patch/7821111/

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
 arch/x86/kvm/lapic.c | 16 ++++++++--------
 arch/x86/kvm/lapic.h |  1 +
 arch/x86/kvm/x86.c   |  2 ++
 3 files changed, 11 insertions(+), 8 deletions(-)

Comments

Sean Christopherson May 17, 2019, 7:44 p.m. UTC | #1
On Thu, May 16, 2019 at 11:06:19AM +0800, Wanpeng Li wrote:
> From: Wanpeng Li <wanpengli@tencent.com>
> 
> wait_lapic_expire() call was moved above guest_enter_irqoff() because of 
> its tracepoint, which violated the RCU extended quiescent state invoked 
> by guest_enter_irqoff()[1][2]. This patch simply moves the tracepoint 
> below guest_exit_irqoff() in vcpu_enter_guest(). Snapshot the delta before 
> VM-Enter, but trace it after VM-Exit. This can help us to move 
> wait_lapic_expire() just before vmentry in the later patch.
> 
> [1] Commit 8b89fe1f6c43 ("kvm: x86: move tracepoints outside extended quiescent state")
> [2] https://patchwork.kernel.org/patch/7821111/
> 
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Radim Krčmář <rkrcmar@redhat.com>
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Cc: Liran Alon <liran.alon@oracle.com>
> Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> ---
>  arch/x86/kvm/lapic.c | 16 ++++++++--------
>  arch/x86/kvm/lapic.h |  1 +
>  arch/x86/kvm/x86.c   |  2 ++
>  3 files changed, 11 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 2f364fe..af38ece 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -1502,27 +1502,27 @@ static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
>  }
>  
>  static inline void adaptive_tune_timer_advancement(struct kvm_vcpu *vcpu,
> -				u64 guest_tsc, u64 tsc_deadline)
> +				s64 advance_expire_delta)
>  {
>  	struct kvm_lapic *apic = vcpu->arch.apic;
>  	u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
>  	u64 ns;
>  
>  	/* too early */
> -	if (guest_tsc < tsc_deadline) {
> -		ns = (tsc_deadline - guest_tsc) * 1000000ULL;
> +	if (advance_expire_delta < 0) {
> +		ns = -advance_expire_delta * 1000000ULL;
>  		do_div(ns, vcpu->arch.virtual_tsc_khz);
>  		timer_advance_ns -= min((u32)ns,
>  			timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
>  	} else {
>  	/* too late */
> -		ns = (guest_tsc - tsc_deadline) * 1000000ULL;
> +		ns = advance_expire_delta * 1000000ULL;
>  		do_div(ns, vcpu->arch.virtual_tsc_khz);
>  		timer_advance_ns += min((u32)ns,
>  			timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
>  	}
>  
> -	if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
> +	if (abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
>  		apic->lapic_timer.timer_advance_adjust_done = true;
>  	if (unlikely(timer_advance_ns > 5000)) {
>  		timer_advance_ns = 0;
> @@ -1545,13 +1545,13 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
>  	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
>  	apic->lapic_timer.expired_tscdeadline = 0;
>  	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
> -	trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
> +	apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
>  
> -	if (guest_tsc < tsc_deadline)
> +	if (apic->lapic_timer.advance_expire_delta < 0)

I'd prefer to keep "guest_tsc < tsc_deadline" here, just so that it's
obvious that the call to __wait_lapic_expire() is safe.  My eyes did a
few double takes reading this code :-)

>  		__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
>  
>  	if (unlikely(!apic->lapic_timer.timer_advance_adjust_done))
> -		adaptive_tune_timer_advancement(vcpu, guest_tsc, tsc_deadline);
> +		adaptive_tune_timer_advancement(vcpu, apic->lapic_timer.advance_expire_delta);
>  }
>  
>  static void start_sw_tscdeadline(struct kvm_lapic *apic)
> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> index d6d049b..3e72a25 100644
> --- a/arch/x86/kvm/lapic.h
> +++ b/arch/x86/kvm/lapic.h
> @@ -32,6 +32,7 @@ struct kvm_timer {
>  	u64 tscdeadline;
>  	u64 expired_tscdeadline;
>  	u32 timer_advance_ns;
> +	s64 advance_expire_delta;
>  	atomic_t pending;			/* accumulated triggered timers */
>  	bool hv_timer_in_use;
>  	bool timer_advance_adjust_done;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index f2e3847..4a7b00c 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -7961,6 +7961,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>  	++vcpu->stat.exits;
>  
>  	guest_exit_irqoff();
> +	trace_kvm_wait_lapic_expire(vcpu->vcpu_id,
> +		vcpu->arch.apic->lapic_timer.advance_expire_delta);

This needs to be guarded with lapic_in_kernel(vcpu).  But, since this is
all in the same flow, a better approach would be to return the delta from
wait_lapic_expire().  That saves 8 bytes in struct kvm_timer and avoids
additional checks for tracing the delta.

E.g.:

	s64 lapic_expire_delta;

	...

        if (lapic_in_kernel(vcpu) &&
            vcpu->arch.apic->lapic_timer.timer_advance_ns)
                lapic_expire_delta = wait_lapic_expire(vcpu);
	else
		lapic_expire_delta = 0;

	...
	
	trace_kvm_wait_lapic_expire(vcpu->vcpu_id, lapic_expire_delta);
>  
>  	local_irq_enable();
>  	preempt_enable();
> -- 
> 2.7.4
>
Wanpeng Li May 20, 2019, 6:38 a.m. UTC | #2
On Sat, 18 May 2019 at 03:44, Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Thu, May 16, 2019 at 11:06:19AM +0800, Wanpeng Li wrote:
> > From: Wanpeng Li <wanpengli@tencent.com>
> >
> > wait_lapic_expire() call was moved above guest_enter_irqoff() because of
> > its tracepoint, which violated the RCU extended quiescent state invoked
> > by guest_enter_irqoff()[1][2]. This patch simply moves the tracepoint
> > below guest_exit_irqoff() in vcpu_enter_guest(). Snapshot the delta before
> > VM-Enter, but trace it after VM-Exit. This can help us to move
> > wait_lapic_expire() just before vmentry in the later patch.
> >
> > [1] Commit 8b89fe1f6c43 ("kvm: x86: move tracepoints outside extended quiescent state")
> > [2] https://patchwork.kernel.org/patch/7821111/
> >
> > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > Cc: Radim Krčmář <rkrcmar@redhat.com>
> > Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> > Cc: Liran Alon <liran.alon@oracle.com>
> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> > ---
> >  arch/x86/kvm/lapic.c | 16 ++++++++--------
> >  arch/x86/kvm/lapic.h |  1 +
> >  arch/x86/kvm/x86.c   |  2 ++
> >  3 files changed, 11 insertions(+), 8 deletions(-)
> >
> > diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> > index 2f364fe..af38ece 100644
> > --- a/arch/x86/kvm/lapic.c
> > +++ b/arch/x86/kvm/lapic.c
> > @@ -1502,27 +1502,27 @@ static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
> >  }
> >
> >  static inline void adaptive_tune_timer_advancement(struct kvm_vcpu *vcpu,
> > -                             u64 guest_tsc, u64 tsc_deadline)
> > +                             s64 advance_expire_delta)
> >  {
> >       struct kvm_lapic *apic = vcpu->arch.apic;
> >       u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
> >       u64 ns;
> >
> >       /* too early */
> > -     if (guest_tsc < tsc_deadline) {
> > -             ns = (tsc_deadline - guest_tsc) * 1000000ULL;
> > +     if (advance_expire_delta < 0) {
> > +             ns = -advance_expire_delta * 1000000ULL;
> >               do_div(ns, vcpu->arch.virtual_tsc_khz);
> >               timer_advance_ns -= min((u32)ns,
> >                       timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
> >       } else {
> >       /* too late */
> > -             ns = (guest_tsc - tsc_deadline) * 1000000ULL;
> > +             ns = advance_expire_delta * 1000000ULL;
> >               do_div(ns, vcpu->arch.virtual_tsc_khz);
> >               timer_advance_ns += min((u32)ns,
> >                       timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
> >       }
> >
> > -     if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
> > +     if (abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
> >               apic->lapic_timer.timer_advance_adjust_done = true;
> >       if (unlikely(timer_advance_ns > 5000)) {
> >               timer_advance_ns = 0;
> > @@ -1545,13 +1545,13 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
> >       tsc_deadline = apic->lapic_timer.expired_tscdeadline;
> >       apic->lapic_timer.expired_tscdeadline = 0;
> >       guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
> > -     trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
> > +     apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
> >
> > -     if (guest_tsc < tsc_deadline)
> > +     if (apic->lapic_timer.advance_expire_delta < 0)
>
> I'd prefer to keep "guest_tsc < tsc_deadline" here, just so that it's
> obvious that the call to __wait_lapic_expire() is safe.  My eyes did a
> few double takes reading this code :-)

Ok.

>
> >               __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
> >
> >       if (unlikely(!apic->lapic_timer.timer_advance_adjust_done))
> > -             adaptive_tune_timer_advancement(vcpu, guest_tsc, tsc_deadline);
> > +             adaptive_tune_timer_advancement(vcpu, apic->lapic_timer.advance_expire_delta);
> >  }
> >
> >  static void start_sw_tscdeadline(struct kvm_lapic *apic)
> > diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> > index d6d049b..3e72a25 100644
> > --- a/arch/x86/kvm/lapic.h
> > +++ b/arch/x86/kvm/lapic.h
> > @@ -32,6 +32,7 @@ struct kvm_timer {
> >       u64 tscdeadline;
> >       u64 expired_tscdeadline;
> >       u32 timer_advance_ns;
> > +     s64 advance_expire_delta;
> >       atomic_t pending;                       /* accumulated triggered timers */
> >       bool hv_timer_in_use;
> >       bool timer_advance_adjust_done;
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index f2e3847..4a7b00c 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -7961,6 +7961,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> >       ++vcpu->stat.exits;
> >
> >       guest_exit_irqoff();
> > +     trace_kvm_wait_lapic_expire(vcpu->vcpu_id,
> > +             vcpu->arch.apic->lapic_timer.advance_expire_delta);
>
> This needs to be guarded with lapic_in_kernel(vcpu).  But, since this is
> all in the same flow, a better approach would be to return the delta from
> wait_lapic_expire().  That saves 8 bytes in struct kvm_timer and avoids
> additional checks for tracing the delta.

As you know, the function wait_lapic_expire() will be moved to vmx.c
and svm.c, so this is not suitable any more.

Regards,
Wanpeng Li

>
> E.g.:
>
>         s64 lapic_expire_delta;
>
>         ...
>
>         if (lapic_in_kernel(vcpu) &&
>             vcpu->arch.apic->lapic_timer.timer_advance_ns)
>                 lapic_expire_delta = wait_lapic_expire(vcpu);
>         else
>                 lapic_expire_delta = 0;
>
>         ...
>
>         trace_kvm_wait_lapic_expire(vcpu->vcpu_id, lapic_expire_delta);
> >
> >       local_irq_enable();
> >       preempt_enable();
> > --
> > 2.7.4
> >
Sean Christopherson May 20, 2019, 2:56 p.m. UTC | #3
On Mon, May 20, 2019 at 02:38:44PM +0800, Wanpeng Li wrote:
> On Sat, 18 May 2019 at 03:44, Sean Christopherson
> <sean.j.christopherson@intel.com> wrote:
> > This needs to be guarded with lapic_in_kernel(vcpu).  But, since this is
> > all in the same flow, a better approach would be to return the delta from
> > wait_lapic_expire().  That saves 8 bytes in struct kvm_timer and avoids
> > additional checks for tracing the delta.
> 
> As you know, the function wait_lapic_expire() will be moved to vmx.c
> and svm.c, so this is not suitable any more.

Doh, I was too excited about my cleverness and completely forgot why you
were moving the tracepoint in the first place.
diff mbox series

Patch

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2f364fe..af38ece 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1502,27 +1502,27 @@  static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
 }
 
 static inline void adaptive_tune_timer_advancement(struct kvm_vcpu *vcpu,
-				u64 guest_tsc, u64 tsc_deadline)
+				s64 advance_expire_delta)
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 	u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
 	u64 ns;
 
 	/* too early */
-	if (guest_tsc < tsc_deadline) {
-		ns = (tsc_deadline - guest_tsc) * 1000000ULL;
+	if (advance_expire_delta < 0) {
+		ns = -advance_expire_delta * 1000000ULL;
 		do_div(ns, vcpu->arch.virtual_tsc_khz);
 		timer_advance_ns -= min((u32)ns,
 			timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
 	} else {
 	/* too late */
-		ns = (guest_tsc - tsc_deadline) * 1000000ULL;
+		ns = advance_expire_delta * 1000000ULL;
 		do_div(ns, vcpu->arch.virtual_tsc_khz);
 		timer_advance_ns += min((u32)ns,
 			timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
 	}
 
-	if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
+	if (abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
 		apic->lapic_timer.timer_advance_adjust_done = true;
 	if (unlikely(timer_advance_ns > 5000)) {
 		timer_advance_ns = 0;
@@ -1545,13 +1545,13 @@  void wait_lapic_expire(struct kvm_vcpu *vcpu)
 	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
 	apic->lapic_timer.expired_tscdeadline = 0;
 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
-	trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
+	apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
 
-	if (guest_tsc < tsc_deadline)
+	if (apic->lapic_timer.advance_expire_delta < 0)
 		__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
 
 	if (unlikely(!apic->lapic_timer.timer_advance_adjust_done))
-		adaptive_tune_timer_advancement(vcpu, guest_tsc, tsc_deadline);
+		adaptive_tune_timer_advancement(vcpu, apic->lapic_timer.advance_expire_delta);
 }
 
 static void start_sw_tscdeadline(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index d6d049b..3e72a25 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -32,6 +32,7 @@  struct kvm_timer {
 	u64 tscdeadline;
 	u64 expired_tscdeadline;
 	u32 timer_advance_ns;
+	s64 advance_expire_delta;
 	atomic_t pending;			/* accumulated triggered timers */
 	bool hv_timer_in_use;
 	bool timer_advance_adjust_done;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f2e3847..4a7b00c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7961,6 +7961,8 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	++vcpu->stat.exits;
 
 	guest_exit_irqoff();
+	trace_kvm_wait_lapic_expire(vcpu->vcpu_id,
+		vcpu->arch.apic->lapic_timer.advance_expire_delta);
 
 	local_irq_enable();
 	preempt_enable();