diff mbox series

[2/3] KVM: VMX: modify preemption timer bit only when arming timer

Message ID 20180827222112.6640-3-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: handle preemption timer value of zero | expand

Commit Message

Sean Christopherson Aug. 27, 2018, 10:21 p.m. UTC
Provide a singular location where the VMX preemption timer bit is
set/cleared so that future usages of the preemption timer can ensure
the VMCS bit is up-to-date without having to modify unrelated code
paths.  For example, the preemption timer can be used to force an
immediate VMExit.  Cache the status of the timer to avoid redundant
VMREAD and VMWRITE, e.g. if the timer stays armed across multiple
VMEnters/VMExits.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx.c | 45 ++++++++++++++++++++++-----------------------
 1 file changed, 22 insertions(+), 23 deletions(-)

Comments

Paolo Bonzini Sept. 14, 2018, 5:46 p.m. UTC | #1
On 28/08/2018 00:21, Sean Christopherson wrote:
> Provide a singular location where the VMX preemption timer bit is
> set/cleared so that future usages of the preemption timer can ensure
> the VMCS bit is up-to-date without having to modify unrelated code
> paths.  For example, the preemption timer can be used to force an
> immediate VMExit.  Cache the status of the timer to avoid redundant
> VMREAD and VMWRITE, e.g. if the timer stays armed across multiple
> VMEnters/VMExits.

It's mostly aesthetic, but I slightly prefer this:

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8f8aa6391a47..7a66a7e817af 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10596,10 +10596,9 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
 					msrs[i].host, false);
 }
 
-static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	bool arm_timer;
 	u64 tscl;
 	u32 delta_tsc;
 
@@ -10613,16 +10612,16 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
 			delta_tsc = 0;
 
 		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+		if (!vmx->hv_timer_armed)
+			vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+				      PIN_BASED_VMX_PREEMPTION_TIMER);
+		vmx->hv_timer_armed = true;
+	} else {
+		if (vmx->hv_timer_armed)
+			vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+					PIN_BASED_VMX_PREEMPTION_TIMER);
+		vmx->hv_timer_armed = false;
 	}
-
-	arm_timer = (vmx->hv_deadline_tsc != -1);
-	if (arm_timer && !vmx->hv_timer_armed)
-		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
-			PIN_BASED_VMX_PREEMPTION_TIMER);
-	else if (!arm_timer && vmx->hv_timer_armed)
-		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
-			PIN_BASED_VMX_PREEMPTION_TIMER);
-	vmx->hv_timer_armed = arm_timer;
 }
 
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -10682,7 +10681,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	atomic_switch_perf_msrs(vmx);
 
-	vmx_arm_hv_timer(vcpu);
+	vmx_update_hv_timer(vcpu);
 
 	/*
 	 * If this vCPU has touched SPEC_CTRL, restore the guest's value if

The generated code is probably pretty much the same, since the compiler
can figure out most of this.

Paolo
Paolo Bonzini Sept. 14, 2018, 5:51 p.m. UTC | #2
On 28/08/2018 00:21, Sean Christopherson wrote:
> Provide a singular location where the VMX preemption timer bit is
> set/cleared so that future usages of the preemption timer can ensure
> the VMCS bit is up-to-date without having to modify unrelated code
> paths.  For example, the preemption timer can be used to force an
> immediate VMExit.  Cache the status of the timer to avoid redundant
> VMREAD and VMWRITE, e.g. if the timer stays armed across multiple
> VMEnters/VMExits.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/vmx.c | 45 ++++++++++++++++++++++-----------------------
>  1 file changed, 22 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 04afaaeb27a7..5ae46af2077d 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1025,6 +1025,7 @@ struct vcpu_vmx {
>  
>  	/* apic deadline value in host tsc */
>  	u64 hv_deadline_tsc;
> +	bool hv_timer_armed;
>  
>  	u64 current_tsc_ratio;
>  
> @@ -10601,21 +10602,30 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
>  static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +	bool arm_timer;
>  	u64 tscl;
>  	u32 delta_tsc;
>  
> -	if (vmx->hv_deadline_tsc == -1)
> -		return;
> +	if (vmx->hv_deadline_tsc != -1) {
> +		tscl = rdtsc();
> +		if (vmx->hv_deadline_tsc > tscl)
> +			/* set_hv_timer ensures the delta fits in 32-bits */
> +			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
> +				cpu_preemption_timer_multi);
> +		else
> +			delta_tsc = 0;
>  
> -	tscl = rdtsc();
> -	if (vmx->hv_deadline_tsc > tscl)
> -		/* sure to be 32 bit only because checked on set_hv_timer */
> -		delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
> -			cpu_preemption_timer_multi);
> -	else
> -		delta_tsc = 0;
> +		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
> +	}
>  
> -	vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
> +	arm_timer = (vmx->hv_deadline_tsc != -1);
> +	if (arm_timer && !vmx->hv_timer_armed)
> +		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
> +			PIN_BASED_VMX_PREEMPTION_TIMER);
> +	else if (!arm_timer && vmx->hv_timer_armed)
> +		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
> +			PIN_BASED_VMX_PREEMPTION_TIMER);
> +	vmx->hv_timer_armed = arm_timer;
>  }
>  
>  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
> @@ -13236,12 +13246,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
>  	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
>  	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
>  	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
> -	if (vmx->hv_deadline_tsc == -1)
> -		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
> -				PIN_BASED_VMX_PREEMPTION_TIMER);
> -	else
> -		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
> -			      PIN_BASED_VMX_PREEMPTION_TIMER);
> +
>  	if (kvm_has_tsc_control)
>  		decache_tsc_multiplier(vmx);
>  
> @@ -13445,18 +13450,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
>  		return -ERANGE;
>  
>  	vmx->hv_deadline_tsc = tscl + delta_tsc;
> -	vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
> -			PIN_BASED_VMX_PREEMPTION_TIMER);
> -
>  	return delta_tsc == 0;
>  }
>  
>  static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
>  {
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	vmx->hv_deadline_tsc = -1;
> -	vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
> -			PIN_BASED_VMX_PREEMPTION_TIMER);
> +	to_vmx(vcpu)->hv_deadline_tsc = -1;
>  }
>  #endif
>  
> 

Thinking more about it, hv_timer_armed actually must be in struct
loaded_vmcs.  I'll resend it later after testing.

Paolo
Sean Christopherson Sept. 19, 2018, 10:13 p.m. UTC | #3
On Fri, Sep 14, 2018 at 07:46:48PM +0200, Paolo Bonzini wrote:
> On 28/08/2018 00:21, Sean Christopherson wrote:
> > Provide a singular location where the VMX preemption timer bit is
> > set/cleared so that future usages of the preemption timer can ensure
> > the VMCS bit is up-to-date without having to modify unrelated code
> > paths.  For example, the preemption timer can be used to force an
> > immediate VMExit.  Cache the status of the timer to avoid redundant
> > VMREAD and VMWRITE, e.g. if the timer stays armed across multiple
> > VMEnters/VMExits.
> 
> It's mostly aesthetic, but I slightly prefer this:
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 8f8aa6391a47..7a66a7e817af 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -10596,10 +10596,9 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
>  					msrs[i].host, false);
>  }
>  
> -static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
> +static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	bool arm_timer;
>  	u64 tscl;
>  	u32 delta_tsc;
>  
> @@ -10613,16 +10612,16 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
>  			delta_tsc = 0;
>  
>  		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
> +		if (!vmx->hv_timer_armed)
> +			vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
> +				      PIN_BASED_VMX_PREEMPTION_TIMER);
> +		vmx->hv_timer_armed = true;
> +	} else {
> +		if (vmx->hv_timer_armed)
> +			vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
> +					PIN_BASED_VMX_PREEMPTION_TIMER);
> +		vmx->hv_timer_armed = false;
>  	}
> -
> -	arm_timer = (vmx->hv_deadline_tsc != -1);
> -	if (arm_timer && !vmx->hv_timer_armed)
> -		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
> -			PIN_BASED_VMX_PREEMPTION_TIMER);
> -	else if (!arm_timer && vmx->hv_timer_armed)
> -		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
> -			PIN_BASED_VMX_PREEMPTION_TIMER);
> -	vmx->hv_timer_armed = arm_timer;
>  }
>  
>  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
> @@ -10682,7 +10681,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  
>  	atomic_switch_perf_msrs(vmx);
>  
> -	vmx_arm_hv_timer(vcpu);
> +	vmx_update_hv_timer(vcpu);
>  
>  	/*
>  	 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
> 
> The generated code is probably pretty much the same, since the compiler
> can figure out most of this.

Works for me, I all but literally flipped a coin when choosing between
the two approaches.  Do you want me to send a v2 with this change and
hv_timer_armed moved to loaded_vmcs?
Paolo Bonzini Sept. 19, 2018, 10:44 p.m. UTC | #4
On 20/09/2018 00:13, Sean Christopherson wrote:
> Works for me, I all but literally flipped a coin when choosing between
> the two approaches.  Do you want me to send a v2 with this change and
> hv_timer_armed moved to loaded_vmcs?

No, I'm sending it now (the above doesn't actually compile, it was just
a sketch).

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 04afaaeb27a7..5ae46af2077d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1025,6 +1025,7 @@  struct vcpu_vmx {
 
 	/* apic deadline value in host tsc */
 	u64 hv_deadline_tsc;
+	bool hv_timer_armed;
 
 	u64 current_tsc_ratio;
 
@@ -10601,21 +10602,30 @@  static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
 static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	bool arm_timer;
 	u64 tscl;
 	u32 delta_tsc;
 
-	if (vmx->hv_deadline_tsc == -1)
-		return;
+	if (vmx->hv_deadline_tsc != -1) {
+		tscl = rdtsc();
+		if (vmx->hv_deadline_tsc > tscl)
+			/* set_hv_timer ensures the delta fits in 32-bits */
+			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
+				cpu_preemption_timer_multi);
+		else
+			delta_tsc = 0;
 
-	tscl = rdtsc();
-	if (vmx->hv_deadline_tsc > tscl)
-		/* sure to be 32 bit only because checked on set_hv_timer */
-		delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
-			cpu_preemption_timer_multi);
-	else
-		delta_tsc = 0;
+		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+	}
 
-	vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+	arm_timer = (vmx->hv_deadline_tsc != -1);
+	if (arm_timer && !vmx->hv_timer_armed)
+		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+			PIN_BASED_VMX_PREEMPTION_TIMER);
+	else if (!arm_timer && vmx->hv_timer_armed)
+		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+			PIN_BASED_VMX_PREEMPTION_TIMER);
+	vmx->hv_timer_armed = arm_timer;
 }
 
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -13236,12 +13246,7 @@  static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
-	if (vmx->hv_deadline_tsc == -1)
-		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
-				PIN_BASED_VMX_PREEMPTION_TIMER);
-	else
-		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
-			      PIN_BASED_VMX_PREEMPTION_TIMER);
+
 	if (kvm_has_tsc_control)
 		decache_tsc_multiplier(vmx);
 
@@ -13445,18 +13450,12 @@  static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
 		return -ERANGE;
 
 	vmx->hv_deadline_tsc = tscl + delta_tsc;
-	vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
-			PIN_BASED_VMX_PREEMPTION_TIMER);
-
 	return delta_tsc == 0;
 }
 
 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
 {
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	vmx->hv_deadline_tsc = -1;
-	vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
-			PIN_BASED_VMX_PREEMPTION_TIMER);
+	to_vmx(vcpu)->hv_deadline_tsc = -1;
 }
 #endif