diff mbox series

[4/4,v2] KVM: x86: Add a new VM statistic to show number of VCPUs created in a given VM

Message ID 20210520005012.68377-5-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM: nVMX: nSVM: Add more statistics to KVM debugfs | expand

Commit Message

Krish Sadhukhan May 20, 2021, 12:50 a.m. UTC
'struct kvm' already has a member for counting the number of VCPUs created
for a given VM. Add this as a new VM statistic to KVM debugfs.

Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
---
 arch/x86/include/asm/kvm_host.h | 1 +
 arch/x86/kvm/svm/svm.c          | 3 +--
 arch/x86/kvm/x86.c              | 1 +
 virt/kvm/kvm_main.c             | 2 ++
 4 files changed, 5 insertions(+), 2 deletions(-)

Comments

Sean Christopherson May 20, 2021, 3:04 p.m. UTC | #1
On Wed, May 19, 2021, Krish Sadhukhan wrote:
> 'struct kvm' already has a member for counting the number of VCPUs created
> for a given VM. Add this as a new VM statistic to KVM debugfs.

Huh!??  Why?  Userspace is the one creating the vCPUs, it darn well should know
how many it's created.
 
> Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 1 +
>  arch/x86/kvm/svm/svm.c          | 3 +--
>  arch/x86/kvm/x86.c              | 1 +
>  virt/kvm/kvm_main.c             | 2 ++
>  4 files changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index a19fe2cfaa93..69ca1d6f6557 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1139,6 +1139,7 @@ struct kvm_vm_stat {
>  	ulong nx_lpage_splits;
>  	ulong max_mmu_page_hash_collisions;
>  	ulong vcpus_ran_nested;
> +	u64 created_vcpus;
>  };
>  
>  struct kvm_vcpu_stat {
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index d1871c51411f..fef0baba043b 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3875,8 +3875,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
>  
>  		/* Track VMRUNs that have made past consistency checking */
>  		if (svm->nested.nested_run_pending &&
> -		    svm->vmcb->control.exit_code != SVM_EXIT_ERR &&
> -		    svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
> +		    svm->vmcb->control.exit_code != SVM_EXIT_ERR) {

???

>  			if (!vcpu->stat.nested_runs)
>  				++vcpu->kvm->stat.vcpus_ran_nested;
>                          ++vcpu->stat.nested_runs;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index cbca3609a152..a9d27ce4cc93 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -258,6 +258,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
>  	VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
>  	VM_STAT("vcpus_ran_nested", vcpus_ran_nested),
> +	VM_STAT("created_vcpus", created_vcpus),
>  	{ NULL }
>  };
>  
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 6b4feb92dc79..ac8f02d8a051 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -3318,6 +3318,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
>  	}
>  
>  	kvm->created_vcpus++;
> +	kvm->stat.created_vcpus++;
>  	mutex_unlock(&kvm->lock);
>  
>  	r = kvm_arch_vcpu_precreate(kvm, id);
> @@ -3394,6 +3395,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
>  vcpu_decrement:
>  	mutex_lock(&kvm->lock);
>  	kvm->created_vcpus--;
> +	kvm->stat.created_vcpus--;
>  	mutex_unlock(&kvm->lock);
>  	return r;
>  }
> -- 
> 2.27.0
>
Krish Sadhukhan May 21, 2021, 6:06 p.m. UTC | #2
On 5/20/21 8:04 AM, Sean Christopherson wrote:
> On Wed, May 19, 2021, Krish Sadhukhan wrote:
>> 'struct kvm' already has a member for counting the number of VCPUs created
>> for a given VM. Add this as a new VM statistic to KVM debugfs.
> Huh!??  Why?  Userspace is the one creating the vCPUs, it darn well should know
> how many it's created.
>   


If I am providing a host for users to create VMs, how do I know who 
creates how many VCPUs ? This statistic is intended show usage of VCPU 
resources on a host used by customers.

>> Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
>> ---
>>   arch/x86/include/asm/kvm_host.h | 1 +
>>   arch/x86/kvm/svm/svm.c          | 3 +--
>>   arch/x86/kvm/x86.c              | 1 +
>>   virt/kvm/kvm_main.c             | 2 ++
>>   4 files changed, 5 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>> index a19fe2cfaa93..69ca1d6f6557 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -1139,6 +1139,7 @@ struct kvm_vm_stat {
>>   	ulong nx_lpage_splits;
>>   	ulong max_mmu_page_hash_collisions;
>>   	ulong vcpus_ran_nested;
>> +	u64 created_vcpus;
>>   };
>>   
>>   struct kvm_vcpu_stat {
>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> index d1871c51411f..fef0baba043b 100644
>> --- a/arch/x86/kvm/svm/svm.c
>> +++ b/arch/x86/kvm/svm/svm.c
>> @@ -3875,8 +3875,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
>>   
>>   		/* Track VMRUNs that have made past consistency checking */
>>   		if (svm->nested.nested_run_pending &&
>> -		    svm->vmcb->control.exit_code != SVM_EXIT_ERR &&
>> -		    svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
>> +		    svm->vmcb->control.exit_code != SVM_EXIT_ERR) {
> ???


Sorry, this one sneaked in here from the other patch!  Will remove it.

>
>>   			if (!vcpu->stat.nested_runs)
>>   				++vcpu->kvm->stat.vcpus_ran_nested;
>>                           ++vcpu->stat.nested_runs;
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index cbca3609a152..a9d27ce4cc93 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -258,6 +258,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
>>   	VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
>>   	VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
>>   	VM_STAT("vcpus_ran_nested", vcpus_ran_nested),
>> +	VM_STAT("created_vcpus", created_vcpus),
>>   	{ NULL }
>>   };
>>   
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 6b4feb92dc79..ac8f02d8a051 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -3318,6 +3318,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
>>   	}
>>   
>>   	kvm->created_vcpus++;
>> +	kvm->stat.created_vcpus++;
>>   	mutex_unlock(&kvm->lock);
>>   
>>   	r = kvm_arch_vcpu_precreate(kvm, id);
>> @@ -3394,6 +3395,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
>>   vcpu_decrement:
>>   	mutex_lock(&kvm->lock);
>>   	kvm->created_vcpus--;
>> +	kvm->stat.created_vcpus--;
>>   	mutex_unlock(&kvm->lock);
>>   	return r;
>>   }
>> -- 
>> 2.27.0
>>
Sean Christopherson May 27, 2021, 5:46 p.m. UTC | #3
On Fri, May 21, 2021, Krish Sadhukhan wrote:
> 
> On 5/20/21 8:04 AM, Sean Christopherson wrote:
> > On Wed, May 19, 2021, Krish Sadhukhan wrote:
> > > 'struct kvm' already has a member for counting the number of VCPUs created
> > > for a given VM. Add this as a new VM statistic to KVM debugfs.
> > Huh!??  Why?  Userspace is the one creating the vCPUs, it darn well should know
> > how many it's created.
> 
> If I am providing a host for users to create VMs, how do I know who creates
> how many VCPUs ? This statistic is intended show usage of VCPU resources on
> a host used by customers.

How are reviewers supposed to know that that's the use case?  Use the changelog
to state _why_ a patch is needed/justified.

> > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > > index cbca3609a152..a9d27ce4cc93 100644
> > > --- a/arch/x86/kvm/x86.c
> > > +++ b/arch/x86/kvm/x86.c
> > > @@ -258,6 +258,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
> > >   	VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
> > >   	VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
> > >   	VM_STAT("vcpus_ran_nested", vcpus_ran_nested),
> > > +	VM_STAT("created_vcpus", created_vcpus),

IMO, the "created" part is unnecessary for the stats, i.e. just call it "vcpus",
or maybe "nr_vcpus".

> > >   	{ NULL }
> > >   };
> > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > > index 6b4feb92dc79..ac8f02d8a051 100644
> > > --- a/virt/kvm/kvm_main.c
> > > +++ b/virt/kvm/kvm_main.c
> > > @@ -3318,6 +3318,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
> > >   	}
> > >   	kvm->created_vcpus++;
> > > +	kvm->stat.created_vcpus++;
> > >   	mutex_unlock(&kvm->lock);
> > >   	r = kvm_arch_vcpu_precreate(kvm, id);
> > > @@ -3394,6 +3395,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
> > >   vcpu_decrement:
> > >   	mutex_lock(&kvm->lock);
> > >   	kvm->created_vcpus--;
> > > +	kvm->stat.created_vcpus--;
> > >   	mutex_unlock(&kvm->lock);
> > >   	return r;
> > >   }
> > > -- 
> > > 2.27.0
> > >
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a19fe2cfaa93..69ca1d6f6557 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1139,6 +1139,7 @@  struct kvm_vm_stat {
 	ulong nx_lpage_splits;
 	ulong max_mmu_page_hash_collisions;
 	ulong vcpus_ran_nested;
+	u64 created_vcpus;
 };
 
 struct kvm_vcpu_stat {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d1871c51411f..fef0baba043b 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3875,8 +3875,7 @@  static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 
 		/* Track VMRUNs that have made past consistency checking */
 		if (svm->nested.nested_run_pending &&
-		    svm->vmcb->control.exit_code != SVM_EXIT_ERR &&
-		    svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
+		    svm->vmcb->control.exit_code != SVM_EXIT_ERR) {
 			if (!vcpu->stat.nested_runs)
 				++vcpu->kvm->stat.vcpus_ran_nested;
                         ++vcpu->stat.nested_runs;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cbca3609a152..a9d27ce4cc93 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -258,6 +258,7 @@  struct kvm_stats_debugfs_item debugfs_entries[] = {
 	VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
 	VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
 	VM_STAT("vcpus_ran_nested", vcpus_ran_nested),
+	VM_STAT("created_vcpus", created_vcpus),
 	{ NULL }
 };
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6b4feb92dc79..ac8f02d8a051 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3318,6 +3318,7 @@  static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 	}
 
 	kvm->created_vcpus++;
+	kvm->stat.created_vcpus++;
 	mutex_unlock(&kvm->lock);
 
 	r = kvm_arch_vcpu_precreate(kvm, id);
@@ -3394,6 +3395,7 @@  static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 vcpu_decrement:
 	mutex_lock(&kvm->lock);
 	kvm->created_vcpus--;
+	kvm->stat.created_vcpus--;
 	mutex_unlock(&kvm->lock);
 	return r;
 }