diff mbox

[v5,1/1] kvm: Make VM ioctl do valloc for some archs

Message ID 20180515113737.170480-1-marcorr@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Marc Orr May 15, 2018, 11:37 a.m. UTC
The kvm struct has been bloating. For example, it's tens of kilo-bytes
for x86, which turns out to be a large amount of memory to allocate
contiguously via kzalloc. Thus, this patch does the following:
1. Uses architecture-specific routines to allocat the kvm struct via
   kzalloc for x86 and also for arm when has_tbe() is true.
2. Introduces a baseline allocator for the kvm struct that uses valloc.
   This allocator can be used by an architecture by defining the
   __KVM_VALLOC_ARCH_VM macro before including include/linux/kvm_host.h.
3. Finally, continue to default to kalloc for all other situations, as
   many architectures have taken a dependence on kalloc.

Signed-off-by: Marc Orr <marcorr@google.com>
Change-Id: Ide392e51991142d1325e06351231ac685e11d820
---
 arch/arm/include/asm/kvm_host.h   |  4 ++++
 arch/arm64/include/asm/kvm_host.h |  4 ++++
 arch/x86/kvm/svm.c                |  4 ++--
 arch/x86/kvm/vmx.c                |  4 ++--
 include/linux/kvm_host.h          | 13 +++++++++++++
 virt/kvm/arm/arm.c                | 15 +++++++++++++++
 6 files changed, 40 insertions(+), 4 deletions(-)

Comments

David Hildenbrand May 16, 2018, 9:43 a.m. UTC | #1
On 15.05.2018 13:37, Marc Orr wrote:
> The kvm struct has been bloating. For example, it's tens of kilo-bytes
> for x86, which turns out to be a large amount of memory to allocate
> contiguously via kzalloc. Thus, this patch does the following:
> 1. Uses architecture-specific routines to allocat the kvm struct via
>    kzalloc for x86 and also for arm when has_tbe() is true.
> 2. Introduces a baseline allocator for the kvm struct that uses valloc.
>    This allocator can be used by an architecture by defining the
>    __KVM_VALLOC_ARCH_VM macro before including include/linux/kvm_host.h.
> 3. Finally, continue to default to kalloc for all other situations, as
>    many architectures have taken a dependence on kalloc.
> 
> Signed-off-by: Marc Orr <marcorr@google.com>
> Change-Id: Ide392e51991142d1325e06351231ac685e11d820
> ---
>  arch/arm/include/asm/kvm_host.h   |  4 ++++
>  arch/arm64/include/asm/kvm_host.h |  4 ++++
>  arch/x86/kvm/svm.c                |  4 ++--
>  arch/x86/kvm/vmx.c                |  4 ++--
>  include/linux/kvm_host.h          | 13 +++++++++++++
>  virt/kvm/arm/arm.c                | 15 +++++++++++++++
>  6 files changed, 40 insertions(+), 4 deletions(-)
> 

I thought we verified that s390x is also not a problem. Any reason not
to include it? (sorry if I am missing review comments from earlier
versions but if there is a reason, it should go into the cover letter)


> diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
> index c7c28c885a19..f38fd60b4d4d 100644
> --- a/arch/arm/include/asm/kvm_host.h
> +++ b/arch/arm/include/asm/kvm_host.h
> @@ -318,4 +318,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>  static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
>  static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
>  
> +#define __KVM_HAVE_ARCH_VM_ALLOC
> +struct kvm *kvm_arch_alloc_vm(void);
> +void kvm_arch_free_vm(struct kvm *kvm);
> +
>  #endif /* __ARM_KVM_HOST_H__ */
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 469de8acd06f..358b1b4dcd92 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -455,4 +455,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>  void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
>  void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
>  
> +#define __KVM_HAVE_ARCH_VM_ALLOC
> +struct kvm *kvm_arch_alloc_vm(void);
> +void kvm_arch_free_vm(struct kvm *kvm);
> +
>  #endif /* __ARM64_KVM_HOST_H__ */
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 1fc05e428aba..45b903ed3e5d 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1849,13 +1849,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
>  
>  static struct kvm *svm_vm_alloc(void)
>  {
> -	struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
> +	struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
>  	return &kvm_svm->kvm;
>  }
>  
>  static void svm_vm_free(struct kvm *kvm)
>  {
> -	kfree(to_kvm_svm(kvm));
> +	vfree(to_kvm_svm(kvm));
>  }
>  
>  static void sev_vm_destroy(struct kvm *kvm)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 3f1696570b41..ee85ba887db6 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -9936,13 +9936,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
>  
>  static struct kvm *vmx_vm_alloc(void)
>  {
> -	struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
> +	struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
>  	return &kvm_vmx->kvm;
>  }
>  
>  static void vmx_vm_free(struct kvm *kvm)
>  {
> -	kfree(to_kvm_vmx(kvm));
> +	vfree(to_kvm_vmx(kvm));
>  }
>  
>  static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 6d6e79c59e68..e3134283321a 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -19,6 +19,7 @@
>  #include <linux/preempt.h>
>  #include <linux/msi.h>
>  #include <linux/slab.h>
> +#include <linux/vmalloc.h>
>  #include <linux/rcupdate.h>
>  #include <linux/ratelimit.h>
>  #include <linux/err.h>
> @@ -808,6 +809,17 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
>  int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
>  
>  #ifndef __KVM_HAVE_ARCH_VM_ALLOC
> +#ifdef __KVM_VALLOC_ARCH_VM
> +static inline struct kvm *kvm_arch_alloc_vm(void)
> +{
> +	return vzalloc(sizeof(struct kvm));
> +}
> +
> +static inline void kvm_arch_free_vm(struct kvm *kvm)
> +{
> +	vfree(kvm);
> +}
> +#else
>  static inline struct kvm *kvm_arch_alloc_vm(void)
>  {
>  	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> @@ -818,6 +830,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
>  	kfree(kvm);
>  }
>  #endif
> +#endif
>  
>  #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
>  void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index a4c1b76240df..ee96f18f852a 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -249,6 +249,21 @@ long kvm_arch_dev_ioctl(struct file *filp,
>  	return -EINVAL;
>  }
>  
> +struct kvm *kvm_arch_alloc_vm(void)
> +{
> +	if (!has_vhe())
> +		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> +
> +	return vzalloc(sizeof(struct kvm));
> +}
> +
> +void kvm_arch_free_vm(struct kvm *kvm)
> +{
> +	if (!has_vhe())
> +		kfree(kvm);
> +	else
> +		vfree(kvm);
> +}
>  
>  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
>  {
>
Janosch Frank May 16, 2018, 10:48 a.m. UTC | #2
On 16.05.2018 11:43, David Hildenbrand wrote:
> On 15.05.2018 13:37, Marc Orr wrote:
>> The kvm struct has been bloating. For example, it's tens of kilo-bytes
>> for x86, which turns out to be a large amount of memory to allocate
>> contiguously via kzalloc. Thus, this patch does the following:
>> 1. Uses architecture-specific routines to allocat the kvm struct via
>>    kzalloc for x86 and also for arm when has_tbe() is true.
>> 2. Introduces a baseline allocator for the kvm struct that uses valloc.
>>    This allocator can be used by an architecture by defining the
>>    __KVM_VALLOC_ARCH_VM macro before including include/linux/kvm_host.h.
>> 3. Finally, continue to default to kalloc for all other situations, as
>>    many architectures have taken a dependence on kalloc.
>>
>> Signed-off-by: Marc Orr <marcorr@google.com>
>> Change-Id: Ide392e51991142d1325e06351231ac685e11d820
>> ---
>>  arch/arm/include/asm/kvm_host.h   |  4 ++++
>>  arch/arm64/include/asm/kvm_host.h |  4 ++++
>>  arch/x86/kvm/svm.c                |  4 ++--
>>  arch/x86/kvm/vmx.c                |  4 ++--
>>  include/linux/kvm_host.h          | 13 +++++++++++++
>>  virt/kvm/arm/arm.c                | 15 +++++++++++++++
>>  6 files changed, 40 insertions(+), 4 deletions(-)
>>
> 
> I thought we verified that s390x is also not a problem. Any reason not
> to include it? (sorry if I am missing review comments from earlier
> versions but if there is a reason, it should go into the cover letter)

Christian did decide against it as our struct is quite small compared to
others.

See:
Message-ID: <2401fda4-f081-0693-bcb6-3538bcea7be1@de.ibm.com>

> 
> 
>> diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
>> index c7c28c885a19..f38fd60b4d4d 100644
>> --- a/arch/arm/include/asm/kvm_host.h
>> +++ b/arch/arm/include/asm/kvm_host.h
>> @@ -318,4 +318,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>>  static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
>>  static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
>>  
>> +#define __KVM_HAVE_ARCH_VM_ALLOC
>> +struct kvm *kvm_arch_alloc_vm(void);
>> +void kvm_arch_free_vm(struct kvm *kvm);
>> +
>>  #endif /* __ARM_KVM_HOST_H__ */
>> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
>> index 469de8acd06f..358b1b4dcd92 100644
>> --- a/arch/arm64/include/asm/kvm_host.h
>> +++ b/arch/arm64/include/asm/kvm_host.h
>> @@ -455,4 +455,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>>  void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
>>  void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
>>  
>> +#define __KVM_HAVE_ARCH_VM_ALLOC
>> +struct kvm *kvm_arch_alloc_vm(void);
>> +void kvm_arch_free_vm(struct kvm *kvm);
>> +
>>  #endif /* __ARM64_KVM_HOST_H__ */
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 1fc05e428aba..45b903ed3e5d 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -1849,13 +1849,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
>>  
>>  static struct kvm *svm_vm_alloc(void)
>>  {
>> -	struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
>> +	struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
>>  	return &kvm_svm->kvm;
>>  }
>>  
>>  static void svm_vm_free(struct kvm *kvm)
>>  {
>> -	kfree(to_kvm_svm(kvm));
>> +	vfree(to_kvm_svm(kvm));
>>  }
>>  
>>  static void sev_vm_destroy(struct kvm *kvm)
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 3f1696570b41..ee85ba887db6 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -9936,13 +9936,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
>>  
>>  static struct kvm *vmx_vm_alloc(void)
>>  {
>> -	struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
>> +	struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
>>  	return &kvm_vmx->kvm;
>>  }
>>  
>>  static void vmx_vm_free(struct kvm *kvm)
>>  {
>> -	kfree(to_kvm_vmx(kvm));
>> +	vfree(to_kvm_vmx(kvm));
>>  }
>>  
>>  static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 6d6e79c59e68..e3134283321a 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -19,6 +19,7 @@
>>  #include <linux/preempt.h>
>>  #include <linux/msi.h>
>>  #include <linux/slab.h>
>> +#include <linux/vmalloc.h>
>>  #include <linux/rcupdate.h>
>>  #include <linux/ratelimit.h>
>>  #include <linux/err.h>
>> @@ -808,6 +809,17 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
>>  int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
>>  
>>  #ifndef __KVM_HAVE_ARCH_VM_ALLOC
>> +#ifdef __KVM_VALLOC_ARCH_VM
>> +static inline struct kvm *kvm_arch_alloc_vm(void)
>> +{
>> +	return vzalloc(sizeof(struct kvm));
>> +}
>> +
>> +static inline void kvm_arch_free_vm(struct kvm *kvm)
>> +{
>> +	vfree(kvm);
>> +}
>> +#else
>>  static inline struct kvm *kvm_arch_alloc_vm(void)
>>  {
>>  	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
>> @@ -818,6 +830,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
>>  	kfree(kvm);
>>  }
>>  #endif
>> +#endif
>>  
>>  #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
>>  void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
>> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
>> index a4c1b76240df..ee96f18f852a 100644
>> --- a/virt/kvm/arm/arm.c
>> +++ b/virt/kvm/arm/arm.c
>> @@ -249,6 +249,21 @@ long kvm_arch_dev_ioctl(struct file *filp,
>>  	return -EINVAL;
>>  }
>>  
>> +struct kvm *kvm_arch_alloc_vm(void)
>> +{
>> +	if (!has_vhe())
>> +		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
>> +
>> +	return vzalloc(sizeof(struct kvm));
>> +}
>> +
>> +void kvm_arch_free_vm(struct kvm *kvm)
>> +{
>> +	if (!has_vhe())
>> +		kfree(kvm);
>> +	else
>> +		vfree(kvm);
>> +}
>>  
>>  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
>>  {
>>
> 
>
Christian Borntraeger May 16, 2018, 11:01 a.m. UTC | #3
On 05/16/2018 11:43 AM, David Hildenbrand wrote:
> On 15.05.2018 13:37, Marc Orr wrote:
>> The kvm struct has been bloating. For example, it's tens of kilo-bytes
>> for x86, which turns out to be a large amount of memory to allocate
>> contiguously via kzalloc. Thus, this patch does the following:
>> 1. Uses architecture-specific routines to allocat the kvm struct via
>>    kzalloc for x86 and also for arm when has_tbe() is true.
>> 2. Introduces a baseline allocator for the kvm struct that uses valloc.
>>    This allocator can be used by an architecture by defining the
>>    __KVM_VALLOC_ARCH_VM macro before including include/linux/kvm_host.h.
>> 3. Finally, continue to default to kalloc for all other situations, as
>>    many architectures have taken a dependence on kalloc.
>>
>> Signed-off-by: Marc Orr <marcorr@google.com>
>> Change-Id: Ide392e51991142d1325e06351231ac685e11d820
>> ---
>>  arch/arm/include/asm/kvm_host.h   |  4 ++++
>>  arch/arm64/include/asm/kvm_host.h |  4 ++++
>>  arch/x86/kvm/svm.c                |  4 ++--
>>  arch/x86/kvm/vmx.c                |  4 ++--
>>  include/linux/kvm_host.h          | 13 +++++++++++++
>>  virt/kvm/arm/arm.c                | 15 +++++++++++++++
>>  6 files changed, 40 insertions(+), 4 deletions(-)
>>
> 
> I thought we verified that s390x is also not a problem. Any reason not
> to include it? (sorry if I am missing review comments from earlier
> versions but if there is a reason, it should go into the cover letter)

I asked for this. We are <16k and the allocation will always succeed. 
vmalloced memory is backed by 4k pages, kmalloced memory is backed by 1M
or 2G pages. This can avoid tlb misses, especially if backed by 2GB pages.
> 
> 
>> diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
>> index c7c28c885a19..f38fd60b4d4d 100644
>> --- a/arch/arm/include/asm/kvm_host.h
>> +++ b/arch/arm/include/asm/kvm_host.h
>> @@ -318,4 +318,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>>  static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
>>  static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
>>  
>> +#define __KVM_HAVE_ARCH_VM_ALLOC
>> +struct kvm *kvm_arch_alloc_vm(void);
>> +void kvm_arch_free_vm(struct kvm *kvm);
>> +
>>  #endif /* __ARM_KVM_HOST_H__ */
>> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
>> index 469de8acd06f..358b1b4dcd92 100644
>> --- a/arch/arm64/include/asm/kvm_host.h
>> +++ b/arch/arm64/include/asm/kvm_host.h
>> @@ -455,4 +455,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>>  void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
>>  void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
>>  
>> +#define __KVM_HAVE_ARCH_VM_ALLOC
>> +struct kvm *kvm_arch_alloc_vm(void);
>> +void kvm_arch_free_vm(struct kvm *kvm);
>> +
>>  #endif /* __ARM64_KVM_HOST_H__ */
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 1fc05e428aba..45b903ed3e5d 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -1849,13 +1849,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
>>  
>>  static struct kvm *svm_vm_alloc(void)
>>  {
>> -	struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
>> +	struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
>>  	return &kvm_svm->kvm;
>>  }
>>  
>>  static void svm_vm_free(struct kvm *kvm)
>>  {
>> -	kfree(to_kvm_svm(kvm));
>> +	vfree(to_kvm_svm(kvm));
>>  }
>>  
>>  static void sev_vm_destroy(struct kvm *kvm)
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 3f1696570b41..ee85ba887db6 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -9936,13 +9936,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
>>  
>>  static struct kvm *vmx_vm_alloc(void)
>>  {
>> -	struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
>> +	struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
>>  	return &kvm_vmx->kvm;
>>  }
>>  
>>  static void vmx_vm_free(struct kvm *kvm)
>>  {
>> -	kfree(to_kvm_vmx(kvm));
>> +	vfree(to_kvm_vmx(kvm));
>>  }
>>  
>>  static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 6d6e79c59e68..e3134283321a 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -19,6 +19,7 @@
>>  #include <linux/preempt.h>
>>  #include <linux/msi.h>
>>  #include <linux/slab.h>
>> +#include <linux/vmalloc.h>
>>  #include <linux/rcupdate.h>
>>  #include <linux/ratelimit.h>
>>  #include <linux/err.h>
>> @@ -808,6 +809,17 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
>>  int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
>>  
>>  #ifndef __KVM_HAVE_ARCH_VM_ALLOC
>> +#ifdef __KVM_VALLOC_ARCH_VM
>> +static inline struct kvm *kvm_arch_alloc_vm(void)
>> +{
>> +	return vzalloc(sizeof(struct kvm));
>> +}
>> +
>> +static inline void kvm_arch_free_vm(struct kvm *kvm)
>> +{
>> +	vfree(kvm);
>> +}
>> +#else
>>  static inline struct kvm *kvm_arch_alloc_vm(void)
>>  {
>>  	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
>> @@ -818,6 +830,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
>>  	kfree(kvm);
>>  }
>>  #endif
>> +#endif
>>  
>>  #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
>>  void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
>> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
>> index a4c1b76240df..ee96f18f852a 100644
>> --- a/virt/kvm/arm/arm.c
>> +++ b/virt/kvm/arm/arm.c
>> @@ -249,6 +249,21 @@ long kvm_arch_dev_ioctl(struct file *filp,
>>  	return -EINVAL;
>>  }
>>  
>> +struct kvm *kvm_arch_alloc_vm(void)
>> +{
>> +	if (!has_vhe())
>> +		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
>> +
>> +	return vzalloc(sizeof(struct kvm));
>> +}
>> +
>> +void kvm_arch_free_vm(struct kvm *kvm)
>> +{
>> +	if (!has_vhe())
>> +		kfree(kvm);
>> +	else
>> +		vfree(kvm);
>> +}
>>  
>>  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
>>  {
>>
> 
>
Marc Zyngier May 16, 2018, 11:02 a.m. UTC | #4
On 15/05/18 12:37, Marc Orr wrote:
> The kvm struct has been bloating. For example, it's tens of kilo-bytes
> for x86, which turns out to be a large amount of memory to allocate
> contiguously via kzalloc. Thus, this patch does the following:
> 1. Uses architecture-specific routines to allocat the kvm struct via
>    kzalloc for x86 and also for arm when has_tbe() is true.

s/has_tbe/has_vhe/.

> 2. Introduces a baseline allocator for the kvm struct that uses valloc.
>    This allocator can be used by an architecture by defining the
>    __KVM_VALLOC_ARCH_VM macro before including include/linux/kvm_host.h.
> 3. Finally, continue to default to kalloc for all other situations, as
>    many architectures have taken a dependence on kalloc.
> 
> Signed-off-by: Marc Orr <marcorr@google.com>
> Change-Id: Ide392e51991142d1325e06351231ac685e11d820

Please drop this tag.

> ---
>  arch/arm/include/asm/kvm_host.h   |  4 ++++
>  arch/arm64/include/asm/kvm_host.h |  4 ++++
>  arch/x86/kvm/svm.c                |  4 ++--
>  arch/x86/kvm/vmx.c                |  4 ++--
>  include/linux/kvm_host.h          | 13 +++++++++++++
>  virt/kvm/arm/arm.c                | 15 +++++++++++++++
>  6 files changed, 40 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
> index c7c28c885a19..f38fd60b4d4d 100644
> --- a/arch/arm/include/asm/kvm_host.h
> +++ b/arch/arm/include/asm/kvm_host.h
> @@ -318,4 +318,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>  static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
>  static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
>  
> +#define __KVM_HAVE_ARCH_VM_ALLOC
> +struct kvm *kvm_arch_alloc_vm(void);
> +void kvm_arch_free_vm(struct kvm *kvm);
> +
>  #endif /* __ARM_KVM_HOST_H__ */
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 469de8acd06f..358b1b4dcd92 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -455,4 +455,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>  void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
>  void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
>  
> +#define __KVM_HAVE_ARCH_VM_ALLOC
> +struct kvm *kvm_arch_alloc_vm(void);
> +void kvm_arch_free_vm(struct kvm *kvm);
> +
>  #endif /* __ARM64_KVM_HOST_H__ */
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 1fc05e428aba..45b903ed3e5d 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1849,13 +1849,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
>  
>  static struct kvm *svm_vm_alloc(void)
>  {
> -	struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
> +	struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
>  	return &kvm_svm->kvm;
>  }
>  
>  static void svm_vm_free(struct kvm *kvm)
>  {
> -	kfree(to_kvm_svm(kvm));
> +	vfree(to_kvm_svm(kvm));
>  }
>  
>  static void sev_vm_destroy(struct kvm *kvm)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 3f1696570b41..ee85ba887db6 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -9936,13 +9936,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
>  
>  static struct kvm *vmx_vm_alloc(void)
>  {
> -	struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
> +	struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
>  	return &kvm_vmx->kvm;
>  }
>  
>  static void vmx_vm_free(struct kvm *kvm)
>  {
> -	kfree(to_kvm_vmx(kvm));
> +	vfree(to_kvm_vmx(kvm));
>  }
>  
>  static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 6d6e79c59e68..e3134283321a 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -19,6 +19,7 @@
>  #include <linux/preempt.h>
>  #include <linux/msi.h>
>  #include <linux/slab.h>
> +#include <linux/vmalloc.h>
>  #include <linux/rcupdate.h>
>  #include <linux/ratelimit.h>
>  #include <linux/err.h>
> @@ -808,6 +809,17 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
>  int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
>  
>  #ifndef __KVM_HAVE_ARCH_VM_ALLOC
> +#ifdef __KVM_VALLOC_ARCH_VM
> +static inline struct kvm *kvm_arch_alloc_vm(void)
> +{
> +	return vzalloc(sizeof(struct kvm));
> +}
> +
> +static inline void kvm_arch_free_vm(struct kvm *kvm)
> +{
> +	vfree(kvm);
> +}
> +#else
>  static inline struct kvm *kvm_arch_alloc_vm(void)
>  {
>  	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> @@ -818,6 +830,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
>  	kfree(kvm);
>  }
>  #endif
> +#endif
>  
>  #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
>  void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index a4c1b76240df..ee96f18f852a 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -249,6 +249,21 @@ long kvm_arch_dev_ioctl(struct file *filp,
>  	return -EINVAL;
>  }
>  
> +struct kvm *kvm_arch_alloc_vm(void)
> +{
> +	if (!has_vhe())
> +		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> +
> +	return vzalloc(sizeof(struct kvm));
> +}
> +
> +void kvm_arch_free_vm(struct kvm *kvm)
> +{
> +	if (!has_vhe())
> +		kfree(kvm);
> +	else
> +		vfree(kvm);
> +}
>  
>  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
>  {
> 

Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>

	M.
Paolo Bonzini May 16, 2018, 11:11 a.m. UTC | #5
On 15/05/2018 13:37, Marc Orr wrote:
> The kvm struct has been bloating. For example, it's tens of kilo-bytes
> for x86, which turns out to be a large amount of memory to allocate
> contiguously via kzalloc. Thus, this patch does the following:
> 1. Uses architecture-specific routines to allocat the kvm struct via
>    kzalloc for x86 and also for arm when has_tbe() is true.
> 2. Introduces a baseline allocator for the kvm struct that uses valloc.
>    This allocator can be used by an architecture by defining the
>    __KVM_VALLOC_ARCH_VM macro before including include/linux/kvm_host.h.
> 3. Finally, continue to default to kalloc for all other situations, as
>    many architectures have taken a dependence on kalloc.
> 
> Signed-off-by: Marc Orr <marcorr@google.com>
> Change-Id: Ide392e51991142d1325e06351231ac685e11d820

Thanks, applied.  However__KVM_VALLOC_ARCH_VM is unused so I've dropped
it from the patch.

Paolo

> ---
>  arch/arm/include/asm/kvm_host.h   |  4 ++++
>  arch/arm64/include/asm/kvm_host.h |  4 ++++
>  arch/x86/kvm/svm.c                |  4 ++--
>  arch/x86/kvm/vmx.c                |  4 ++--
>  include/linux/kvm_host.h          | 13 +++++++++++++
>  virt/kvm/arm/arm.c                | 15 +++++++++++++++
>  6 files changed, 40 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
> index c7c28c885a19..f38fd60b4d4d 100644
> --- a/arch/arm/include/asm/kvm_host.h
> +++ b/arch/arm/include/asm/kvm_host.h
> @@ -318,4 +318,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>  static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
>  static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
>  
> +#define __KVM_HAVE_ARCH_VM_ALLOC
> +struct kvm *kvm_arch_alloc_vm(void);
> +void kvm_arch_free_vm(struct kvm *kvm);
> +
>  #endif /* __ARM_KVM_HOST_H__ */
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 469de8acd06f..358b1b4dcd92 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -455,4 +455,8 @@ static inline bool kvm_arm_harden_branch_predictor(void)
>  void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
>  void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
>  
> +#define __KVM_HAVE_ARCH_VM_ALLOC
> +struct kvm *kvm_arch_alloc_vm(void);
> +void kvm_arch_free_vm(struct kvm *kvm);
> +
>  #endif /* __ARM64_KVM_HOST_H__ */
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 1fc05e428aba..45b903ed3e5d 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1849,13 +1849,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
>  
>  static struct kvm *svm_vm_alloc(void)
>  {
> -	struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
> +	struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
>  	return &kvm_svm->kvm;
>  }
>  
>  static void svm_vm_free(struct kvm *kvm)
>  {
> -	kfree(to_kvm_svm(kvm));
> +	vfree(to_kvm_svm(kvm));
>  }
>  
>  static void sev_vm_destroy(struct kvm *kvm)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 3f1696570b41..ee85ba887db6 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -9936,13 +9936,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
>  
>  static struct kvm *vmx_vm_alloc(void)
>  {
> -	struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
> +	struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
>  	return &kvm_vmx->kvm;
>  }
>  
>  static void vmx_vm_free(struct kvm *kvm)
>  {
> -	kfree(to_kvm_vmx(kvm));
> +	vfree(to_kvm_vmx(kvm));
>  }
>  
>  static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 6d6e79c59e68..e3134283321a 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -19,6 +19,7 @@
>  #include <linux/preempt.h>
>  #include <linux/msi.h>
>  #include <linux/slab.h>
> +#include <linux/vmalloc.h>
>  #include <linux/rcupdate.h>
>  #include <linux/ratelimit.h>
>  #include <linux/err.h>
> @@ -808,6 +809,17 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
>  int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
>  
>  #ifndef __KVM_HAVE_ARCH_VM_ALLOC
> +#ifdef __KVM_VALLOC_ARCH_VM
> +static inline struct kvm *kvm_arch_alloc_vm(void)
> +{
> +	return vzalloc(sizeof(struct kvm));
> +}
> +
> +static inline void kvm_arch_free_vm(struct kvm *kvm)
> +{
> +	vfree(kvm);
> +}
> +#else
>  static inline struct kvm *kvm_arch_alloc_vm(void)
>  {
>  	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> @@ -818,6 +830,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
>  	kfree(kvm);
>  }
>  #endif
> +#endif
>  
>  #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
>  void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index a4c1b76240df..ee96f18f852a 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -249,6 +249,21 @@ long kvm_arch_dev_ioctl(struct file *filp,
>  	return -EINVAL;
>  }
>  
> +struct kvm *kvm_arch_alloc_vm(void)
> +{
> +	if (!has_vhe())
> +		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> +
> +	return vzalloc(sizeof(struct kvm));
> +}
> +
> +void kvm_arch_free_vm(struct kvm *kvm)
> +{
> +	if (!has_vhe())
> +		kfree(kvm);
> +	else
> +		vfree(kvm);
> +}
>  
>  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
>  {
>
diff mbox

Patch

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c7c28c885a19..f38fd60b4d4d 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -318,4 +318,8 @@  static inline bool kvm_arm_harden_branch_predictor(void)
 static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
 static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
 
+#define __KVM_HAVE_ARCH_VM_ALLOC
+struct kvm *kvm_arch_alloc_vm(void);
+void kvm_arch_free_vm(struct kvm *kvm);
+
 #endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 469de8acd06f..358b1b4dcd92 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -455,4 +455,8 @@  static inline bool kvm_arm_harden_branch_predictor(void)
 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
 
+#define __KVM_HAVE_ARCH_VM_ALLOC
+struct kvm *kvm_arch_alloc_vm(void);
+void kvm_arch_free_vm(struct kvm *kvm);
+
 #endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1fc05e428aba..45b903ed3e5d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1849,13 +1849,13 @@  static void __unregister_enc_region_locked(struct kvm *kvm,
 
 static struct kvm *svm_vm_alloc(void)
 {
-	struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
+	struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
 	return &kvm_svm->kvm;
 }
 
 static void svm_vm_free(struct kvm *kvm)
 {
-	kfree(to_kvm_svm(kvm));
+	vfree(to_kvm_svm(kvm));
 }
 
 static void sev_vm_destroy(struct kvm *kvm)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3f1696570b41..ee85ba887db6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9936,13 +9936,13 @@  STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
 
 static struct kvm *vmx_vm_alloc(void)
 {
-	struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
+	struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
 	return &kvm_vmx->kvm;
 }
 
 static void vmx_vm_free(struct kvm *kvm)
 {
-	kfree(to_kvm_vmx(kvm));
+	vfree(to_kvm_vmx(kvm));
 }
 
 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6d6e79c59e68..e3134283321a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -19,6 +19,7 @@ 
 #include <linux/preempt.h>
 #include <linux/msi.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/rcupdate.h>
 #include <linux/ratelimit.h>
 #include <linux/err.h>
@@ -808,6 +809,17 @@  bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
+#ifdef __KVM_VALLOC_ARCH_VM
+static inline struct kvm *kvm_arch_alloc_vm(void)
+{
+	return vzalloc(sizeof(struct kvm));
+}
+
+static inline void kvm_arch_free_vm(struct kvm *kvm)
+{
+	vfree(kvm);
+}
+#else
 static inline struct kvm *kvm_arch_alloc_vm(void)
 {
 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -818,6 +830,7 @@  static inline void kvm_arch_free_vm(struct kvm *kvm)
 	kfree(kvm);
 }
 #endif
+#endif
 
 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a4c1b76240df..ee96f18f852a 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -249,6 +249,21 @@  long kvm_arch_dev_ioctl(struct file *filp,
 	return -EINVAL;
 }
 
+struct kvm *kvm_arch_alloc_vm(void)
+{
+	if (!has_vhe())
+		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+
+	return vzalloc(sizeof(struct kvm));
+}
+
+void kvm_arch_free_vm(struct kvm *kvm)
+{
+	if (!has_vhe())
+		kfree(kvm);
+	else
+		vfree(kvm);
+}
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 {