@@ -1170,7 +1170,7 @@ struct kvm_vcpu_stat {
u64 req_event;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
- u64 nested_run;
+ u64 nested_runs;
u64 directed_yield_attempted;
u64 directed_yield_successful;
};
@@ -596,8 +596,6 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
u64 vmcb12_gpa;
- ++vcpu->stat.nested_run;
-
if (is_smm(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
@@ -3872,6 +3872,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
svm->next_rip = 0;
if (is_guest_mode(vcpu)) {
nested_sync_control_from_vmcb02(svm);
+
+ /* Track VMRUNs that have made past consistency checking */
+ if (svm->nested.nested_run_pending &&
+ svm->vmcb->control.exit_code != SVM_EXIT_ERR)
+ ++vcpu->stat.nested_runs;
+
svm->nested.nested_run_pending = 0;
}
@@ -3454,8 +3454,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
enum nested_evmptrld_status evmptrld_status;
- ++vcpu->stat.nested_run;
-
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -6839,7 +6839,18 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_load_host_xsave_state(vcpu);
- vmx->nested.nested_run_pending = 0;
+ if (is_guest_mode(vcpu)) {
+ /*
+ * Track VMLAUNCH/VMRESUME that have made past guest state
+ * checking.
+ */
+ if (vmx->nested.nested_run_pending &&
+ !vmx->exit_reason.failed_vmentry)
+ ++vcpu->stat.nested_runs;
+
+ vmx->nested.nested_run_pending = 0;
+ }
+
vmx->idt_vectoring_info = 0;
if (unlikely(vmx->fail)) {
@@ -243,7 +243,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("l1d_flush", l1d_flush),
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("nested_run", nested_run),
+ VCPU_STAT("nested_runs", nested_runs),
VCPU_STAT("directed_yield_attempted", directed_yield_attempted),
VCPU_STAT("directed_yield_successful", directed_yield_successful),
VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
Currently, the 'nested_run' statistic counts all guest-entry attempts, including those that fail during vmentry checks on Intel and during consistency checks on AMD. Convert this statistic to count only those guest-entries that make it past these state checks and make it to guest code. This will tell us the number of guest-entries that actually executed or tried to execute guest code. Also, rename this statistic to 'nested_runs' since it is a count. Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com> --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm/nested.c | 2 -- arch/x86/kvm/svm/svm.c | 6 ++++++ arch/x86/kvm/vmx/nested.c | 2 -- arch/x86/kvm/vmx/vmx.c | 13 ++++++++++++- arch/x86/kvm/x86.c | 2 +- 6 files changed, 20 insertions(+), 7 deletions(-)