diff mbox series

[RFC,2/3] KVM: Avoid re-reading kvm->max_halt_poll_ns during halt-polling

Message ID 20221117001657.1067231-3-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: Restore original behavior of kvm.halt_poll_ns | expand

Commit Message

David Matlack Nov. 17, 2022, 12:16 a.m. UTC
Avoid re-reading kvm->max_halt_poll_ns multiple times during
halt-polling except when it is explicitly useful, e.g. to check if the
max time changed across a halt. kvm->max_halt_poll_ns can be changed at
any time by userspace via KVM_CAP_HALT_POLL.

This bug is unlikely to cause any serious side-effects. In the worst
case one halt polls for shorter or longer than it should, and then is
fixed up on the next halt. Furthmore, this is still possible since
kvm->max_halt_poll_ns are not synchronized with halts.

Fixes: acd05785e48c ("kvm: add capability for halt polling")
Signed-off-by: David Matlack <dmatlack@google.com>
---
 virt/kvm/kvm_main.c | 21 +++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)

Comments

Yanan Wang Nov. 18, 2022, 8 a.m. UTC | #1
On 2022/11/17 8:16, David Matlack wrote:
> Avoid re-reading kvm->max_halt_poll_ns multiple times during
> halt-polling except when it is explicitly useful, e.g. to check if the
> max time changed across a halt. kvm->max_halt_poll_ns can be changed at
> any time by userspace via KVM_CAP_HALT_POLL.
>
> This bug is unlikely to cause any serious side-effects. In the worst
> case one halt polls for shorter or longer than it should, and then is
> fixed up on the next halt. Furthmore, this is still possible since
s/Furthmore/Furthermore
> kvm->max_halt_poll_ns are not synchronized with halts.
>
> Fixes: acd05785e48c ("kvm: add capability for halt polling")
> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
>   virt/kvm/kvm_main.c | 21 +++++++++++++++------
>   1 file changed, 15 insertions(+), 6 deletions(-)
Looks good to me:
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>

Thanks,
Yanan
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 4b868f33c45d..78caf19608eb 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -3488,6 +3488,11 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
>   	}
>   }
>   
> +static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
> +{
> +	return READ_ONCE(vcpu->kvm->max_halt_poll_ns);
> +}
> +
>   /*
>    * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
>    * polling is enabled, busy wait for a short time before blocking to avoid the
> @@ -3496,14 +3501,15 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
>    */
>   void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
>   {
> +	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
>   	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
>   	ktime_t start, cur, poll_end;
>   	bool waited = false;
>   	bool do_halt_poll;
>   	u64 halt_ns;
>   
> -	if (vcpu->halt_poll_ns > vcpu->kvm->max_halt_poll_ns)
> -		vcpu->halt_poll_ns = vcpu->kvm->max_halt_poll_ns;
> +	if (vcpu->halt_poll_ns > max_halt_poll_ns)
> +		vcpu->halt_poll_ns = max_halt_poll_ns;
>   
>   	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
>   
> @@ -3545,18 +3551,21 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
>   		update_halt_poll_stats(vcpu, start, poll_end, !waited);
>   
>   	if (halt_poll_allowed) {
> +		/* Recompute the max halt poll time in case it changed. */
> +		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
> +
>   		if (!vcpu_valid_wakeup(vcpu)) {
>   			shrink_halt_poll_ns(vcpu);
> -		} else if (vcpu->kvm->max_halt_poll_ns) {
> +		} else if (max_halt_poll_ns) {
>   			if (halt_ns <= vcpu->halt_poll_ns)
>   				;
>   			/* we had a long block, shrink polling */
>   			else if (vcpu->halt_poll_ns &&
> -				 halt_ns > vcpu->kvm->max_halt_poll_ns)
> +				 halt_ns > max_halt_poll_ns)
>   				shrink_halt_poll_ns(vcpu);
>   			/* we had a short halt and our poll time is too small */
> -			else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
> -				 halt_ns < vcpu->kvm->max_halt_poll_ns)
> +			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
> +				 halt_ns < max_halt_poll_ns)
>   				grow_halt_poll_ns(vcpu);
>   		} else {
>   			vcpu->halt_poll_ns = 0;
Christian Borntraeger Nov. 21, 2022, 8:04 a.m. UTC | #2
Am 17.11.22 um 01:16 schrieb David Matlack:
> Avoid re-reading kvm->max_halt_poll_ns multiple times during
> halt-polling except when it is explicitly useful, e.g. to check if the
> max time changed across a halt. kvm->max_halt_poll_ns can be changed at
> any time by userspace via KVM_CAP_HALT_POLL.
> 
> This bug is unlikely to cause any serious side-effects. In the worst
> case one halt polls for shorter or longer than it should, and then is
> fixed up on the next halt. Furthmore, this is still possible since
> kvm->max_halt_poll_ns are not synchronized with halts.
> 
> Fixes: acd05785e48c ("kvm: add capability for halt polling")
> Signed-off-by: David Matlack <dmatlack@google.com>

Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com>
> ---
>   virt/kvm/kvm_main.c | 21 +++++++++++++++------
>   1 file changed, 15 insertions(+), 6 deletions(-)
> 
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 4b868f33c45d..78caf19608eb 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -3488,6 +3488,11 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
>   	}
>   }
>   
> +static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
> +{
> +	return READ_ONCE(vcpu->kvm->max_halt_poll_ns);
> +}
> +
>   /*
>    * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
>    * polling is enabled, busy wait for a short time before blocking to avoid the
> @@ -3496,14 +3501,15 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
>    */
>   void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
>   {
> +	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
>   	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
>   	ktime_t start, cur, poll_end;
>   	bool waited = false;
>   	bool do_halt_poll;
>   	u64 halt_ns;
>   
> -	if (vcpu->halt_poll_ns > vcpu->kvm->max_halt_poll_ns)
> -		vcpu->halt_poll_ns = vcpu->kvm->max_halt_poll_ns;
> +	if (vcpu->halt_poll_ns > max_halt_poll_ns)
> +		vcpu->halt_poll_ns = max_halt_poll_ns;
>   
>   	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
>   
> @@ -3545,18 +3551,21 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
>   		update_halt_poll_stats(vcpu, start, poll_end, !waited);
>   
>   	if (halt_poll_allowed) {
> +		/* Recompute the max halt poll time in case it changed. */
> +		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
> +
>   		if (!vcpu_valid_wakeup(vcpu)) {
>   			shrink_halt_poll_ns(vcpu);
> -		} else if (vcpu->kvm->max_halt_poll_ns) {
> +		} else if (max_halt_poll_ns) {
>   			if (halt_ns <= vcpu->halt_poll_ns)
>   				;
>   			/* we had a long block, shrink polling */
>   			else if (vcpu->halt_poll_ns &&
> -				 halt_ns > vcpu->kvm->max_halt_poll_ns)
> +				 halt_ns > max_halt_poll_ns)
>   				shrink_halt_poll_ns(vcpu);
>   			/* we had a short halt and our poll time is too small */
> -			else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
> -				 halt_ns < vcpu->kvm->max_halt_poll_ns)
> +			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
> +				 halt_ns < max_halt_poll_ns)
>   				grow_halt_poll_ns(vcpu);
>   		} else {
>   			vcpu->halt_poll_ns = 0;
diff mbox series

Patch

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4b868f33c45d..78caf19608eb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3488,6 +3488,11 @@  static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
 	}
 }
 
+static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+	return READ_ONCE(vcpu->kvm->max_halt_poll_ns);
+}
+
 /*
  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
  * polling is enabled, busy wait for a short time before blocking to avoid the
@@ -3496,14 +3501,15 @@  static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
  */
 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 {
+	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
 	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
 	ktime_t start, cur, poll_end;
 	bool waited = false;
 	bool do_halt_poll;
 	u64 halt_ns;
 
-	if (vcpu->halt_poll_ns > vcpu->kvm->max_halt_poll_ns)
-		vcpu->halt_poll_ns = vcpu->kvm->max_halt_poll_ns;
+	if (vcpu->halt_poll_ns > max_halt_poll_ns)
+		vcpu->halt_poll_ns = max_halt_poll_ns;
 
 	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
 
@@ -3545,18 +3551,21 @@  void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 		update_halt_poll_stats(vcpu, start, poll_end, !waited);
 
 	if (halt_poll_allowed) {
+		/* Recompute the max halt poll time in case it changed. */
+		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
+
 		if (!vcpu_valid_wakeup(vcpu)) {
 			shrink_halt_poll_ns(vcpu);
-		} else if (vcpu->kvm->max_halt_poll_ns) {
+		} else if (max_halt_poll_ns) {
 			if (halt_ns <= vcpu->halt_poll_ns)
 				;
 			/* we had a long block, shrink polling */
 			else if (vcpu->halt_poll_ns &&
-				 halt_ns > vcpu->kvm->max_halt_poll_ns)
+				 halt_ns > max_halt_poll_ns)
 				shrink_halt_poll_ns(vcpu);
 			/* we had a short halt and our poll time is too small */
-			else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
-				 halt_ns < vcpu->kvm->max_halt_poll_ns)
+			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
+				 halt_ns < max_halt_poll_ns)
 				grow_halt_poll_ns(vcpu);
 		} else {
 			vcpu->halt_poll_ns = 0;