diff mbox series

[3/4,v2] KVM: nVMX: nSVM: Add a new debugfs statistic to show how many VCPUs have run nested guests

Message ID 20210520005012.68377-4-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM: nVMX: nSVM: Add more statistics to KVM debugfs | expand

Commit Message

Krish Sadhukhan May 20, 2021, 12:50 a.m. UTC
Add a new debugfs statistic to show how many VCPUs have run nested guests.
This statistic considers only the first time a given VCPU successfully runs
a nested guest.

Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
Suggested-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/include/asm/kvm_host.h | 1 +
 arch/x86/kvm/svm/svm.c          | 5 ++++-
 arch/x86/kvm/vmx/vmx.c          | 5 ++++-
 arch/x86/kvm/x86.c              | 1 +
 4 files changed, 10 insertions(+), 2 deletions(-)

Comments

Sean Christopherson May 20, 2021, 2:56 p.m. UTC | #1
On Wed, May 19, 2021, Krish Sadhukhan wrote:
> Add a new debugfs statistic to show how many VCPUs have run nested guests.
> This statistic considers only the first time a given VCPU successfully runs
> a nested guest.
> 
> Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
> Suggested-by: Jim Mattson <jmattson@google.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 1 +
>  arch/x86/kvm/svm/svm.c          | 5 ++++-
>  arch/x86/kvm/vmx/vmx.c          | 5 ++++-
>  arch/x86/kvm/x86.c              | 1 +
>  4 files changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index cf8557b2b90f..a19fe2cfaa93 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1138,6 +1138,7 @@ struct kvm_vm_stat {
>  	ulong lpages;
>  	ulong nx_lpage_splits;
>  	ulong max_mmu_page_hash_collisions;
> +	ulong vcpus_ran_nested;
>  };
>  
>  struct kvm_vcpu_stat {
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 57c351640355..d1871c51411f 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3876,8 +3876,11 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
>  		/* Track VMRUNs that have made past consistency checking */
>  		if (svm->nested.nested_run_pending &&
>  		    svm->vmcb->control.exit_code != SVM_EXIT_ERR &&
> -		    svm->vmcb->control.exit_code != SVM_EXIT_NPF)
> +		    svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
> +			if (!vcpu->stat.nested_runs)
> +				++vcpu->kvm->stat.vcpus_ran_nested;

Using a separate counter seems unnecessary, userspace can aggregate
vcpu->stat.nested_run itself to see how many vCPUs have done nested VM-Enter.

Jim, were you thinking of something else?  Am I missing something?
Jim Mattson May 20, 2021, 4:57 p.m. UTC | #2
On Thu, May 20, 2021 at 7:56 AM Sean Christopherson <seanjc@google.com> wrote:
>
> On Wed, May 19, 2021, Krish Sadhukhan wrote:
> > Add a new debugfs statistic to show how many VCPUs have run nested guests.
> > This statistic considers only the first time a given VCPU successfully runs
> > a nested guest.
> >
> > Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
> > Suggested-by: Jim Mattson <jmattson@google.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h | 1 +
> >  arch/x86/kvm/svm/svm.c          | 5 ++++-
> >  arch/x86/kvm/vmx/vmx.c          | 5 ++++-
> >  arch/x86/kvm/x86.c              | 1 +
> >  4 files changed, 10 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index cf8557b2b90f..a19fe2cfaa93 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -1138,6 +1138,7 @@ struct kvm_vm_stat {
> >       ulong lpages;
> >       ulong nx_lpage_splits;
> >       ulong max_mmu_page_hash_collisions;
> > +     ulong vcpus_ran_nested;
> >  };
> >
> >  struct kvm_vcpu_stat {
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index 57c351640355..d1871c51411f 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -3876,8 +3876,11 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
> >               /* Track VMRUNs that have made past consistency checking */
> >               if (svm->nested.nested_run_pending &&
> >                   svm->vmcb->control.exit_code != SVM_EXIT_ERR &&
> > -                 svm->vmcb->control.exit_code != SVM_EXIT_NPF)
> > +                 svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
> > +                     if (!vcpu->stat.nested_runs)
> > +                             ++vcpu->kvm->stat.vcpus_ran_nested;
>
> Using a separate counter seems unnecessary, userspace can aggregate
> vcpu->stat.nested_run itself to see how many vCPUs have done nested VM-Enter.
>
> Jim, were you thinking of something else?  Am I missing something?

It was in the context of a proposed stat to indicate how many vCPUs
are *currently* running nested guests that I said I'd rather just know
how many vCPUs had *ever* run nested guests. I don't need a separate
stat. Checking vcpu->stat.nested_run for non-zero values works fine
for me.
Krish Sadhukhan May 20, 2021, 6:01 p.m. UTC | #3
On 5/20/21 9:57 AM, Jim Mattson wrote:
> On Thu, May 20, 2021 at 7:56 AM Sean Christopherson <seanjc@google.com> wrote:
>> On Wed, May 19, 2021, Krish Sadhukhan wrote:
>>> Add a new debugfs statistic to show how many VCPUs have run nested guests.
>>> This statistic considers only the first time a given VCPU successfully runs
>>> a nested guest.
>>>
>>> Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
>>> Suggested-by: Jim Mattson <jmattson@google.com>
>>> ---
>>>   arch/x86/include/asm/kvm_host.h | 1 +
>>>   arch/x86/kvm/svm/svm.c          | 5 ++++-
>>>   arch/x86/kvm/vmx/vmx.c          | 5 ++++-
>>>   arch/x86/kvm/x86.c              | 1 +
>>>   4 files changed, 10 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>>> index cf8557b2b90f..a19fe2cfaa93 100644
>>> --- a/arch/x86/include/asm/kvm_host.h
>>> +++ b/arch/x86/include/asm/kvm_host.h
>>> @@ -1138,6 +1138,7 @@ struct kvm_vm_stat {
>>>        ulong lpages;
>>>        ulong nx_lpage_splits;
>>>        ulong max_mmu_page_hash_collisions;
>>> +     ulong vcpus_ran_nested;
>>>   };
>>>
>>>   struct kvm_vcpu_stat {
>>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>>> index 57c351640355..d1871c51411f 100644
>>> --- a/arch/x86/kvm/svm/svm.c
>>> +++ b/arch/x86/kvm/svm/svm.c
>>> @@ -3876,8 +3876,11 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
>>>                /* Track VMRUNs that have made past consistency checking */
>>>                if (svm->nested.nested_run_pending &&
>>>                    svm->vmcb->control.exit_code != SVM_EXIT_ERR &&
>>> -                 svm->vmcb->control.exit_code != SVM_EXIT_NPF)
>>> +                 svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
>>> +                     if (!vcpu->stat.nested_runs)
>>> +                             ++vcpu->kvm->stat.vcpus_ran_nested;
>> Using a separate counter seems unnecessary, userspace can aggregate
>> vcpu->stat.nested_run itself to see how many vCPUs have done nested VM-Enter.
>>
>> Jim, were you thinking of something else?  Am I missing something?
> It was in the context of a proposed stat to indicate how many vCPUs
> are *currently* running nested guests that I said I'd rather just know
> how many vCPUs had *ever* run nested guests. I don't need a separate
> stat. Checking vcpu->stat.nested_run for non-zero values works fine
> for me.
I will fall back to my v1 idea then. That's at least useful if we want 
to create a time graph of VCPUs running nested guests.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cf8557b2b90f..a19fe2cfaa93 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1138,6 +1138,7 @@  struct kvm_vm_stat {
 	ulong lpages;
 	ulong nx_lpage_splits;
 	ulong max_mmu_page_hash_collisions;
+	ulong vcpus_ran_nested;
 };
 
 struct kvm_vcpu_stat {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 57c351640355..d1871c51411f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3876,8 +3876,11 @@  static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 		/* Track VMRUNs that have made past consistency checking */
 		if (svm->nested.nested_run_pending &&
 		    svm->vmcb->control.exit_code != SVM_EXIT_ERR &&
-		    svm->vmcb->control.exit_code != SVM_EXIT_NPF)
+		    svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
+			if (!vcpu->stat.nested_runs)
+				++vcpu->kvm->stat.vcpus_ran_nested;
                         ++vcpu->stat.nested_runs;
+		}
 
 		svm->nested.nested_run_pending = 0;
 	}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index fa8df7ab2756..dc29aa926be6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6845,8 +6845,11 @@  static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		 * checking.
 		 */
 		if (vmx->nested.nested_run_pending &&
-		    !vmx->exit_reason.failed_vmentry)
+		    !vmx->exit_reason.failed_vmentry) {
+			if (!vcpu->stat.nested_runs)
+				++vcpu->kvm->stat.vcpus_ran_nested;
 			++vcpu->stat.nested_runs;
+		}
 
 		vmx->nested.nested_run_pending = 0;
 	}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6d1f51f6c344..cbca3609a152 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -257,6 +257,7 @@  struct kvm_stats_debugfs_item debugfs_entries[] = {
 	VM_STAT("largepages", lpages, .mode = 0444),
 	VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
 	VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
+	VM_STAT("vcpus_ran_nested", vcpus_ran_nested),
 	{ NULL }
 };