Message ID | 20210609011935.103017-4-krish.sadhukhan@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: nVMX: nSVM: Add more statistics to KVM debugfs | expand |
Hi Krish, Thank you for the patch! Yet something to improve: [auto build test ERROR on kvm/queue] [also build test ERROR on v5.13-rc5 next-20210608] [cannot apply to vhost/linux-next] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Krish-Sadhukhan/KVM-nVMX-nSVM-Add-more-statistics-to-KVM-debugfs/20210609-101158 base: https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue config: s390-randconfig-r034-20210608 (attached as .config) compiler: s390-linux-gcc (GCC) 9.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/8b558261089468777eaf3ec89ca30eb954242e4e git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Krish-Sadhukhan/KVM-nVMX-nSVM-Add-more-statistics-to-KVM-debugfs/20210609-101158 git checkout 8b558261089468777eaf3ec89ca30eb954242e4e # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=s390 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>): arch/s390/kvm/../../../virt/kvm/kvm_main.c: In function 'kvm_vm_ioctl_create_vcpu': >> arch/s390/kvm/../../../virt/kvm/kvm_main.c:3321:11: error: 'struct kvm_vm_stat' has no member named 'vcpus' 3321 | kvm->stat.vcpus++; | ^ arch/s390/kvm/../../../virt/kvm/kvm_main.c:3398:11: error: 'struct kvm_vm_stat' has no member named 'vcpus' 3398 | kvm->stat.vcpus--; | ^ vim +3321 arch/s390/kvm/../../../virt/kvm/kvm_main.c 3301 3302 /* 3303 * Creates some virtual cpus. Good luck creating more than one. 3304 */ 3305 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3306 { 3307 int r; 3308 struct kvm_vcpu *vcpu; 3309 struct page *page; 3310 3311 if (id >= KVM_MAX_VCPU_ID) 3312 return -EINVAL; 3313 3314 mutex_lock(&kvm->lock); 3315 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3316 mutex_unlock(&kvm->lock); 3317 return -EINVAL; 3318 } 3319 3320 kvm->created_vcpus++; > 3321 kvm->stat.vcpus++; 3322 mutex_unlock(&kvm->lock); 3323 3324 r = kvm_arch_vcpu_precreate(kvm, id); 3325 if (r) 3326 goto vcpu_decrement; 3327 3328 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3329 if (!vcpu) { 3330 r = -ENOMEM; 3331 goto vcpu_decrement; 3332 } 3333 3334 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3335 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3336 if (!page) { 3337 r = -ENOMEM; 3338 goto vcpu_free; 3339 } 3340 vcpu->run = page_address(page); 3341 3342 kvm_vcpu_init(vcpu, kvm, id); 3343 3344 r = kvm_arch_vcpu_create(vcpu); 3345 if (r) 3346 goto vcpu_free_run_page; 3347 3348 if (kvm->dirty_ring_size) { 3349 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3350 id, kvm->dirty_ring_size); 3351 if (r) 3352 goto arch_vcpu_destroy; 3353 } 3354 3355 mutex_lock(&kvm->lock); 3356 if (kvm_get_vcpu_by_id(kvm, id)) { 3357 r = -EEXIST; 3358 goto unlock_vcpu_destroy; 3359 } 3360 3361 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3362 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); 3363 3364 /* Now it's all set up, let userspace reach it */ 3365 kvm_get_kvm(kvm); 3366 r = create_vcpu_fd(vcpu); 3367 if (r < 0) { 3368 kvm_put_kvm_no_destroy(kvm); 3369 goto unlock_vcpu_destroy; 3370 } 3371 3372 kvm->vcpus[vcpu->vcpu_idx] = vcpu; 3373 3374 /* 3375 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 3376 * before kvm->online_vcpu's incremented value. 3377 */ 3378 smp_wmb(); 3379 atomic_inc(&kvm->online_vcpus); 3380 3381 mutex_unlock(&kvm->lock); 3382 kvm_arch_vcpu_postcreate(vcpu); 3383 kvm_create_vcpu_debugfs(vcpu); 3384 return r; 3385 3386 unlock_vcpu_destroy: 3387 mutex_unlock(&kvm->lock); 3388 kvm_dirty_ring_free(&vcpu->dirty_ring); 3389 arch_vcpu_destroy: 3390 kvm_arch_vcpu_destroy(vcpu); 3391 vcpu_free_run_page: 3392 free_page((unsigned long)vcpu->run); 3393 vcpu_free: 3394 kmem_cache_free(kvm_vcpu_cache, vcpu); 3395 vcpu_decrement: 3396 mutex_lock(&kvm->lock); 3397 kvm->created_vcpus--; 3398 kvm->stat.vcpus--; 3399 mutex_unlock(&kvm->lock); 3400 return r; 3401 } 3402 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Hi Krish, Thank you for the patch! Yet something to improve: [auto build test ERROR on kvm/queue] [also build test ERROR on v5.13-rc5 next-20210608] [cannot apply to vhost/linux-next] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Krish-Sadhukhan/KVM-nVMX-nSVM-Add-more-statistics-to-KVM-debugfs/20210609-101158 base: https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue config: powerpc-pseries_defconfig (attached as .config) compiler: powerpc64-linux-gcc (GCC) 9.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/8b558261089468777eaf3ec89ca30eb954242e4e git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Krish-Sadhukhan/KVM-nVMX-nSVM-Add-more-statistics-to-KVM-debugfs/20210609-101158 git checkout 8b558261089468777eaf3ec89ca30eb954242e4e # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>): arch/powerpc/kvm/../../../virt/kvm/kvm_main.c: In function 'kvm_vm_ioctl_create_vcpu': >> arch/powerpc/kvm/../../../virt/kvm/kvm_main.c:3321:11: error: 'struct kvm_vm_stat' has no member named 'vcpus' 3321 | kvm->stat.vcpus++; | ^ arch/powerpc/kvm/../../../virt/kvm/kvm_main.c:3398:11: error: 'struct kvm_vm_stat' has no member named 'vcpus' 3398 | kvm->stat.vcpus--; | ^ vim +3321 arch/powerpc/kvm/../../../virt/kvm/kvm_main.c 3301 3302 /* 3303 * Creates some virtual cpus. Good luck creating more than one. 3304 */ 3305 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3306 { 3307 int r; 3308 struct kvm_vcpu *vcpu; 3309 struct page *page; 3310 3311 if (id >= KVM_MAX_VCPU_ID) 3312 return -EINVAL; 3313 3314 mutex_lock(&kvm->lock); 3315 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3316 mutex_unlock(&kvm->lock); 3317 return -EINVAL; 3318 } 3319 3320 kvm->created_vcpus++; > 3321 kvm->stat.vcpus++; 3322 mutex_unlock(&kvm->lock); 3323 3324 r = kvm_arch_vcpu_precreate(kvm, id); 3325 if (r) 3326 goto vcpu_decrement; 3327 3328 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3329 if (!vcpu) { 3330 r = -ENOMEM; 3331 goto vcpu_decrement; 3332 } 3333 3334 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3335 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3336 if (!page) { 3337 r = -ENOMEM; 3338 goto vcpu_free; 3339 } 3340 vcpu->run = page_address(page); 3341 3342 kvm_vcpu_init(vcpu, kvm, id); 3343 3344 r = kvm_arch_vcpu_create(vcpu); 3345 if (r) 3346 goto vcpu_free_run_page; 3347 3348 if (kvm->dirty_ring_size) { 3349 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3350 id, kvm->dirty_ring_size); 3351 if (r) 3352 goto arch_vcpu_destroy; 3353 } 3354 3355 mutex_lock(&kvm->lock); 3356 if (kvm_get_vcpu_by_id(kvm, id)) { 3357 r = -EEXIST; 3358 goto unlock_vcpu_destroy; 3359 } 3360 3361 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3362 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); 3363 3364 /* Now it's all set up, let userspace reach it */ 3365 kvm_get_kvm(kvm); 3366 r = create_vcpu_fd(vcpu); 3367 if (r < 0) { 3368 kvm_put_kvm_no_destroy(kvm); 3369 goto unlock_vcpu_destroy; 3370 } 3371 3372 kvm->vcpus[vcpu->vcpu_idx] = vcpu; 3373 3374 /* 3375 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 3376 * before kvm->online_vcpu's incremented value. 3377 */ 3378 smp_wmb(); 3379 atomic_inc(&kvm->online_vcpus); 3380 3381 mutex_unlock(&kvm->lock); 3382 kvm_arch_vcpu_postcreate(vcpu); 3383 kvm_create_vcpu_debugfs(vcpu); 3384 return r; 3385 3386 unlock_vcpu_destroy: 3387 mutex_unlock(&kvm->lock); 3388 kvm_dirty_ring_free(&vcpu->dirty_ring); 3389 arch_vcpu_destroy: 3390 kvm_arch_vcpu_destroy(vcpu); 3391 vcpu_free_run_page: 3392 free_page((unsigned long)vcpu->run); 3393 vcpu_free: 3394 kmem_cache_free(kvm_vcpu_cache, vcpu); 3395 vcpu_decrement: 3396 mutex_lock(&kvm->lock); 3397 kvm->created_vcpus--; 3398 kvm->stat.vcpus--; 3399 mutex_unlock(&kvm->lock); 3400 return r; 3401 } 3402 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f6d5387bb88f..8f61a3fc3d39 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1138,6 +1138,7 @@ struct kvm_vm_stat { ulong lpages; ulong nx_lpage_splits; ulong max_mmu_page_hash_collisions; + ulong vcpus; }; struct kvm_vcpu_stat { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index baa953757911..7a1ff3052488 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -258,6 +258,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VM_STAT("largepages", lpages, .mode = 0444), VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444), VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions), + VM_STAT("vcpus", vcpus), { NULL } }; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6b4feb92dc79..d910e4020a43 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3318,6 +3318,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) } kvm->created_vcpus++; + kvm->stat.vcpus++; mutex_unlock(&kvm->lock); r = kvm_arch_vcpu_precreate(kvm, id); @@ -3394,6 +3395,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) vcpu_decrement: mutex_lock(&kvm->lock); kvm->created_vcpus--; + kvm->stat.vcpus--; mutex_unlock(&kvm->lock); return r; }
'struct kvm' already has a member for tracking the number of VCPUs created in a given VM. Add this as a new VM statistic to KVM debugfs. This statistic can be a useful metric to track the usage of VCPUs on a host running customer VMs. Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 1 + virt/kvm/kvm_main.c | 2 ++ 3 files changed, 4 insertions(+)