diff mbox series

[v4,4/6] KVM: x86: Bump hypercall stat prior to fully completing hypercall

Message ID 20241128004344.4072099-5-seanjc@google.com (mailing list archive)
State New
Headers show
Series KVM: x86: Prep KVM hypercall handling for TDX | expand

Commit Message

Sean Christopherson Nov. 28, 2024, 12:43 a.m. UTC
Increment the "hypercalls" stat for KVM hypercalls as soon as KVM knows
it will skip the guest instruction, i.e. once KVM is committed to emulating
the hypercall.  Waiting until completion adds no known value, and creates a
discrepancy where the stat will be bumped if KVM exits to userspace as a
result of trying to skip the instruction, but not if the hypercall itself
exits.

Handling the stat in common code will also avoid the need for another
helper to dedup code when TDX comes along (TDX needs a separate completion
path due to GPR usage differences).

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/x86.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

Comments

Xiaoyao Li Nov. 28, 2024, 3:24 a.m. UTC | #1
On 11/28/2024 8:43 AM, Sean Christopherson wrote:
> Increment the "hypercalls" stat for KVM hypercalls as soon as KVM knows
> it will skip the guest instruction, i.e. once KVM is committed to emulating
> the hypercall.  Waiting until completion adds no known value, and creates a
> discrepancy where the stat will be bumped if KVM exits to userspace as a
> result of trying to skip the instruction, but not if the hypercall itself
> exits.
> 
> Handling the stat in common code will also avoid the need for another
> helper to dedup code when TDX comes along (TDX needs a separate completion
> path due to GPR usage differences).

Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>

> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>   arch/x86/kvm/x86.c | 4 ++--
>   1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 13fe5d6eb8f3..11434752b467 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9979,7 +9979,6 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
>   	if (!is_64_bit_hypercall(vcpu))
>   		ret = (u32)ret;
>   	kvm_rax_write(vcpu, ret);
> -	++vcpu->stat.hypercalls;
>   	return kvm_skip_emulated_instruction(vcpu);
>   }
>   
> @@ -9990,6 +9989,8 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
>   {
>   	unsigned long ret;
>   
> +	++vcpu->stat.hypercalls;
> +
>   	trace_kvm_hypercall(nr, a0, a1, a2, a3);
>   
>   	if (!op_64_bit) {
> @@ -10070,7 +10071,6 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
>   	}
>   
>   out:
> -	++vcpu->stat.hypercalls;
>   	return ret;
>   }
>   EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 13fe5d6eb8f3..11434752b467 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9979,7 +9979,6 @@  static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
 	if (!is_64_bit_hypercall(vcpu))
 		ret = (u32)ret;
 	kvm_rax_write(vcpu, ret);
-	++vcpu->stat.hypercalls;
 	return kvm_skip_emulated_instruction(vcpu);
 }
 
@@ -9990,6 +9989,8 @@  unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
 {
 	unsigned long ret;
 
+	++vcpu->stat.hypercalls;
+
 	trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
 	if (!op_64_bit) {
@@ -10070,7 +10071,6 @@  unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
 	}
 
 out:
-	++vcpu->stat.hypercalls;
 	return ret;
 }
 EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);