diff mbox series

[2/2] kvm: vmx: use vmalloc() to allocate vcpus

Message ID 20181020211200.255171-3-marcorr@google.com (mailing list archive)
State New, archived
Headers show
Series kvm: vmalloc vmx vcpus | expand

Commit Message

Marc Orr Oct. 20, 2018, 9:12 p.m. UTC
Previously, vcpus were allocated through the kmem_cache_zalloc() API,
which requires the underlying physical memory to be contiguous.
Because the x86 vcpu struct, struct vcpu_vmx, is relatively large
(e.g., currently 47680 bytes on my setup), it can become hard to find
contiguous memory.

At the same time, the comments in the code indicate that the primary
reason for using the kmem_cache_zalloc() API is to align the memory
rather than to provide physical contiguity.

Thus, this patch updates the vcpu allocation logic for vmx to use the
vmalloc() API.

Signed-off-by: Marc Orr <marcorr@google.com>
---
 arch/x86/kvm/vmx.c  | 98 +++++++++++++++++++++++++++++++++++++++++----
 virt/kvm/kvm_main.c | 28 +++++++------
 2 files changed, 106 insertions(+), 20 deletions(-)

Comments

Marc Orr Oct. 23, 2018, 9:13 p.m. UTC | #1
Adding everyone that's cc'd on [kvm PATCH 1/2] mm: export
__vmalloc_node_range().
Thanks,
Marc
On Sat, Oct 20, 2018 at 5:13 PM Marc Orr <marcorr@google.com> wrote:
>
> Previously, vcpus were allocated through the kmem_cache_zalloc() API,
> which requires the underlying physical memory to be contiguous.
> Because the x86 vcpu struct, struct vcpu_vmx, is relatively large
> (e.g., currently 47680 bytes on my setup), it can become hard to find
> contiguous memory.
>
> At the same time, the comments in the code indicate that the primary
> reason for using the kmem_cache_zalloc() API is to align the memory
> rather than to provide physical contiguity.
>
> Thus, this patch updates the vcpu allocation logic for vmx to use the
> vmalloc() API.
>
> Signed-off-by: Marc Orr <marcorr@google.com>
> ---
>  arch/x86/kvm/vmx.c  | 98 +++++++++++++++++++++++++++++++++++++++++----
>  virt/kvm/kvm_main.c | 28 +++++++------
>  2 files changed, 106 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index abeeb45d1c33..d480a2cc0667 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -898,7 +898,14 @@ struct nested_vmx {
>  #define POSTED_INTR_ON  0
>  #define POSTED_INTR_SN  1
>
> -/* Posted-Interrupt Descriptor */
> +/*
> + * Posted-Interrupt Descriptor
> + *
> + * Note, the physical address of this structure is used by VMX. Furthermore, the
> + * translation code assumes that the entire pi_desc struct resides within a
> + * single page, which will be true because the struct is 64 bytes and 64-byte
> + * aligned.
> + */
>  struct pi_desc {
>         u32 pir[8];     /* Posted interrupt requested */
>         union {
> @@ -970,8 +977,25 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
>
>  struct vmx_msrs {
>         unsigned int            nr;
> -       struct vmx_msr_entry    val[NR_AUTOLOAD_MSRS];
> +       struct vmx_msr_entry    *val;
>  };
> +struct kmem_cache *vmx_msr_entry_cache;
> +
> +/*
> + * To prevent vmx_msr_entry array from crossing a page boundary, require:
> + * sizeof(*vmx_msrs.vmx_msr_entry.val) to be a power of two. This is guaranteed
> + * through compile-time asserts that:
> + *   - NR_AUTOLOAD_MSRS * sizeof(struct vmx_msr_entry) is a power of two
> + *   - NR_AUTOLOAD_MSRS * sizeof(struct vmx_msr_entry) <= PAGE_SIZE
> + *   - The allocation of vmx_msrs.vmx_msr_entry.val is aligned to its size.
> + */
> +#define CHECK_POWER_OF_TWO(val) \
> +       BUILD_BUG_ON_MSG(!((val) && !((val) & ((val) - 1))), \
> +       #val " is not a power of two.")
> +#define CHECK_INTRA_PAGE(val) do { \
> +               CHECK_POWER_OF_TWO(val); \
> +               BUILD_BUG_ON(!(val <= PAGE_SIZE)); \
> +       } while (0)
>
>  struct vcpu_vmx {
>         struct kvm_vcpu       vcpu;
> @@ -6616,6 +6640,14 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
>         }
>
>         if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
> +               /*
> +                * Note, pi_desc is contained within a single
> +                * page because the struct is 64 bytes and 64-byte aligned.
> +                */
> +               phys_addr_t pi_desc_phys =
> +                       page_to_phys(vmalloc_to_page(&vmx->pi_desc)) +
> +                       (u64)&vmx->pi_desc % PAGE_SIZE;
> +
>                 vmcs_write64(EOI_EXIT_BITMAP0, 0);
>                 vmcs_write64(EOI_EXIT_BITMAP1, 0);
>                 vmcs_write64(EOI_EXIT_BITMAP2, 0);
> @@ -6624,7 +6656,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
>                 vmcs_write16(GUEST_INTR_STATUS, 0);
>
>                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
> -               vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
> +               vmcs_write64(POSTED_INTR_DESC_ADDR, pi_desc_phys);
>         }
>
>         if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
> @@ -11476,19 +11508,43 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
>         free_loaded_vmcs(vmx->loaded_vmcs);
>         kfree(vmx->guest_msrs);
>         kvm_vcpu_uninit(vcpu);
> -       kmem_cache_free(kvm_vcpu_cache, vmx);
> +       kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.guest.val);
> +       kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.host.val);
> +       vfree(vmx);
>  }
>
>  static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>  {
>         int err;
> -       struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
> +       struct vcpu_vmx *vmx = __vmalloc_node_range(
> +                       sizeof(struct vcpu_vmx),
> +                       __alignof__(struct vcpu_vmx),
> +                       VMALLOC_START,
> +                       VMALLOC_END,
> +                       GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | __GFP_ACCOUNT,
> +                       PAGE_KERNEL,
> +                       0,
> +                       NUMA_NO_NODE,
> +                       __builtin_return_address(0));
>         unsigned long *msr_bitmap;
>         int cpu;
>
>         if (!vmx)
>                 return ERR_PTR(-ENOMEM);
>
> +       vmx->msr_autoload.guest.val =
> +               kmem_cache_zalloc(vmx_msr_entry_cache, GFP_KERNEL);
> +       if (!vmx->msr_autoload.guest.val) {
> +               err = -ENOMEM;
> +               goto free_vmx;
> +       }
> +       vmx->msr_autoload.host.val =
> +               kmem_cache_zalloc(vmx_msr_entry_cache, GFP_KERNEL);
> +       if (!vmx->msr_autoload.host.val) {
> +               err = -ENOMEM;
> +               goto free_msr_autoload_guest;
> +       }
> +
>         vmx->vpid = allocate_vpid();
>
>         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
> @@ -11576,7 +11632,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>         kvm_vcpu_uninit(&vmx->vcpu);
>  free_vcpu:
>         free_vpid(vmx->vpid);
> -       kmem_cache_free(kvm_vcpu_cache, vmx);
> +       kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.host.val);
> +free_msr_autoload_guest:
> +       kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.guest.val);
> +free_vmx:
> +       vfree(vmx);
>         return ERR_PTR(err);
>  }
>
> @@ -15153,6 +15213,10 @@ module_exit(vmx_exit);
>  static int __init vmx_init(void)
>  {
>         int r;
> +       size_t vmx_msr_entry_size =
> +               sizeof(struct vmx_msr_entry) * NR_AUTOLOAD_MSRS;
> +
> +       CHECK_INTRA_PAGE(vmx_msr_entry_size);
>
>  #if IS_ENABLED(CONFIG_HYPERV)
>         /*
> @@ -15183,10 +15247,25 @@ static int __init vmx_init(void)
>         }
>  #endif
>
> -       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
> -                    __alignof__(struct vcpu_vmx), THIS_MODULE);
> +       /*
> +        * Disable kmem cache; vmalloc will be used instead
> +        * to avoid OOM'ing when memory is available but not contiguous.
> +        */
> +       r = kvm_init(&vmx_x86_ops, 0, 0, THIS_MODULE);
>         if (r)
>                 return r;
> +       /*
> +        * A vmx_msr_entry array resides exclusively within the kernel. Thus,
> +        * use kmem_cache_create_usercopy(), with the usersize argument set to
> +        * ZERO, to blacklist copying vmx_msr_entry to/from user space.
> +        */
> +       vmx_msr_entry_cache =
> +               kmem_cache_create_usercopy("vmx_msr_entry", vmx_msr_entry_size,
> +                                 vmx_msr_entry_size, SLAB_ACCOUNT, 0, 0, NULL);
> +       if (!vmx_msr_entry_cache) {
> +               r = -ENOMEM;
> +               goto out;
> +       }
>
>         /*
>          * Must be called after kvm_init() so enable_ept is properly set
> @@ -15210,5 +15289,8 @@ static int __init vmx_init(void)
>         vmx_check_vmcs12_offsets();
>
>         return 0;
> +out:
> +       kvm_exit();
> +       return r;
>  }
>  module_init(vmx_init);
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 786ade1843a2..8b979e7c3ecd 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -4038,18 +4038,22 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
>                 goto out_free_2;
>         register_reboot_notifier(&kvm_reboot_notifier);
>
> -       /* A kmem cache lets us meet the alignment requirements of fx_save. */
> -       if (!vcpu_align)
> -               vcpu_align = __alignof__(struct kvm_vcpu);
> -       kvm_vcpu_cache =
> -               kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
> -                                          SLAB_ACCOUNT,
> -                                          offsetof(struct kvm_vcpu, arch),
> -                                          sizeof_field(struct kvm_vcpu, arch),
> -                                          NULL);
> -       if (!kvm_vcpu_cache) {
> -               r = -ENOMEM;
> -               goto out_free_3;
> +       /*
> +        * When vcpu_size is zero,
> +        * architecture-specific code manages its own vcpu allocation.
> +        */
> +       kvm_vcpu_cache = NULL;
> +       if (vcpu_size) {
> +               if (!vcpu_align)
> +                       vcpu_align = __alignof__(struct kvm_vcpu);
> +               kvm_vcpu_cache = kmem_cache_create_usercopy(
> +                       "kvm_vcpu", vcpu_size, vcpu_align, SLAB_ACCOUNT,
> +                       offsetof(struct kvm_vcpu, arch),
> +                       sizeof_field(struct kvm_vcpu, arch), NULL);
> +               if (!kvm_vcpu_cache) {
> +                       r = -ENOMEM;
> +                       goto out_free_3;
> +               }
>         }
>
>         r = kvm_async_pf_init();
> --
> 2.19.1.568.g152ad8e336-goog
>
Matthew Wilcox Oct. 24, 2018, 11:41 a.m. UTC | #2
On Tue, Oct 23, 2018 at 05:13:40PM -0400, Marc Orr wrote:
> > +       struct vcpu_vmx *vmx = __vmalloc_node_range(
> > +                       sizeof(struct vcpu_vmx),
> > +                       __alignof__(struct vcpu_vmx),
> > +                       VMALLOC_START,
> > +                       VMALLOC_END,
> > +                       GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | __GFP_ACCOUNT,
> > +                       PAGE_KERNEL,
> > +                       0,
> > +                       NUMA_NO_NODE,
> > +                       __builtin_return_address(0));

I don't understand why you need to expose the lowest-level
__vmalloc_node_range to do what you need to do.

For example, __vmalloc_node would be easier for you to use while giving you
all the flexibility you think you want.

In fact, I don't think you need all the flexibility you're using.
vmalloc is always going to give you a page-aligned address, so
__alignof__(struct foo) isn't going to do anything worthwhile.

VMALLOC_START, VMALLOC_END, PAGE_KERNEL, 0, NUMA_NO_NODE, and
__builtin_return_address(0) are all the defaults.  So all you actually
need are these GFP flags.  __GFP_HIGHMEM isn't needed; vmalloc can
always allocate from highmem unless you're doing a DMA alloc.  So
it's just __GFP_ACCOUNT that's not supported by regular vzalloc().

I see __vmalloc_node_flags_caller is already non-static, so that would
be where I went and your call becomes simply:

	vmx = __vmalloc_node_flags_caller(sizeof(struct vcpu_vmx),
				NUMA_NO_NODE,
				GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT,
				__builtin_return_address(0));

I suspect a better option might be to add a vzalloc_account() call
and then your code becomes:

	vmx = vzalloc_account(sizeof(struct vcpu_vmx));

while vmalloc gains:

void *vmalloc_account(unsigned long size)
{
	return __vmalloc_node_flags(size, NUMA_NO_NODE,
				GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
}
EXPORT_SYMBOL(vmalloc_account);
Marc Orr Oct. 24, 2018, 6:05 p.m. UTC | #3
On Wed, Oct 24, 2018 at 12:41 PM Matthew Wilcox <willy@infradead.org> wrote:
>
> On Tue, Oct 23, 2018 at 05:13:40PM -0400, Marc Orr wrote:
> > > +       struct vcpu_vmx *vmx = __vmalloc_node_range(
> > > +                       sizeof(struct vcpu_vmx),
> > > +                       __alignof__(struct vcpu_vmx),
> > > +                       VMALLOC_START,
> > > +                       VMALLOC_END,
> > > +                       GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | __GFP_ACCOUNT,
> > > +                       PAGE_KERNEL,
> > > +                       0,
> > > +                       NUMA_NO_NODE,
> > > +                       __builtin_return_address(0));
>
> I don't understand why you need to expose the lowest-level
> __vmalloc_node_range to do what you need to do.
>
> For example, __vmalloc_node would be easier for you to use while giving you
> all the flexibility you think you want.
>
> In fact, I don't think you need all the flexibility you're using.
> vmalloc is always going to give you a page-aligned address, so
> __alignof__(struct foo) isn't going to do anything worthwhile.
>
> VMALLOC_START, VMALLOC_END, PAGE_KERNEL, 0, NUMA_NO_NODE, and
> __builtin_return_address(0) are all the defaults.  So all you actually
> need are these GFP flags.  __GFP_HIGHMEM isn't needed; vmalloc can
> always allocate from highmem unless you're doing a DMA alloc.  So
> it's just __GFP_ACCOUNT that's not supported by regular vzalloc().
>
> I see __vmalloc_node_flags_caller is already non-static, so that would
> be where I went and your call becomes simply:
>
>         vmx = __vmalloc_node_flags_caller(sizeof(struct vcpu_vmx),
>                                 NUMA_NO_NODE,
>                                 GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT,
>                                 __builtin_return_address(0));
>
> I suspect a better option might be to add a vzalloc_account() call
> and then your code becomes:
>
>         vmx = vzalloc_account(sizeof(struct vcpu_vmx));
>
> while vmalloc gains:
>
> void *vmalloc_account(unsigned long size)
> {
>         return __vmalloc_node_flags(size, NUMA_NO_NODE,
>                                 GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
> }
> EXPORT_SYMBOL(vmalloc_account);

I 100% agree with this review! I only need the __GFP_ACCOUNT flag.
Actually, the first version of this patch that I developed downstream,
resembled what you're suggesting here. But I've never touched the mm
subsystem before, and we were not confident on how to shape the patch
for upstreaming, so that's how we ended up with this version, which
makes minimal changes to the mm subsystem.

Anyway, let me refactor the patch exactly as you've suggested in your
review, and send out a new version.
Sean Christopherson Oct. 24, 2018, 10:31 p.m. UTC | #4
On Tue, Oct 23, 2018 at 05:13:40PM -0400, Marc Orr wrote:
> Adding everyone that's cc'd on [kvm PATCH 1/2] mm: export
> __vmalloc_node_range().
> Thanks,
> Marc
> On Sat, Oct 20, 2018 at 5:13 PM Marc Orr <marcorr@google.com> wrote:
> >
> > Previously, vcpus were allocated through the kmem_cache_zalloc() API,
> > which requires the underlying physical memory to be contiguous.
> > Because the x86 vcpu struct, struct vcpu_vmx, is relatively large
> > (e.g., currently 47680 bytes on my setup), it can become hard to find
> > contiguous memory.
> >
> > At the same time, the comments in the code indicate that the primary
> > reason for using the kmem_cache_zalloc() API is to align the memory
> > rather than to provide physical contiguity.
> >
> > Thus, this patch updates the vcpu allocation logic for vmx to use the
> > vmalloc() API.
> >
> > Signed-off-by: Marc Orr <marcorr@google.com>
> > ---
> >  arch/x86/kvm/vmx.c  | 98 +++++++++++++++++++++++++++++++++++++++++----
> >  virt/kvm/kvm_main.c | 28 +++++++------
> >  2 files changed, 106 insertions(+), 20 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> > index abeeb45d1c33..d480a2cc0667 100644
> > --- a/arch/x86/kvm/vmx.c
> > +++ b/arch/x86/kvm/vmx.c
> > @@ -898,7 +898,14 @@ struct nested_vmx {
> >  #define POSTED_INTR_ON  0
> >  #define POSTED_INTR_SN  1
> >
> > -/* Posted-Interrupt Descriptor */
> > +/*
> > + * Posted-Interrupt Descriptor
> > + *
> > + * Note, the physical address of this structure is used by VMX. Furthermore, the
> > + * translation code assumes that the entire pi_desc struct resides within a
> > + * single page, which will be true because the struct is 64 bytes and 64-byte
> > + * aligned.
> > + */
> >  struct pi_desc {
> >         u32 pir[8];     /* Posted interrupt requested */
> >         union {
> > @@ -970,8 +977,25 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
> >
> >  struct vmx_msrs {
> >         unsigned int            nr;
> > -       struct vmx_msr_entry    val[NR_AUTOLOAD_MSRS];
> > +       struct vmx_msr_entry    *val;
> >  };
> > +struct kmem_cache *vmx_msr_entry_cache;

The vmx_msr_entry changes should be done as a separate prereq patch,
e.g. for bisecting and/or revert in case there is a bug.  AFAICT they
don't depend on moving to vmalloc.
Michal Hocko Oct. 25, 2018, 12:58 p.m. UTC | #5
On Wed 24-10-18 19:05:19, Marc Orr wrote:
> On Wed, Oct 24, 2018 at 12:41 PM Matthew Wilcox <willy@infradead.org> wrote:
> >
> > On Tue, Oct 23, 2018 at 05:13:40PM -0400, Marc Orr wrote:
> > > > +       struct vcpu_vmx *vmx = __vmalloc_node_range(
> > > > +                       sizeof(struct vcpu_vmx),
> > > > +                       __alignof__(struct vcpu_vmx),
> > > > +                       VMALLOC_START,
> > > > +                       VMALLOC_END,
> > > > +                       GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | __GFP_ACCOUNT,
> > > > +                       PAGE_KERNEL,
> > > > +                       0,
> > > > +                       NUMA_NO_NODE,
> > > > +                       __builtin_return_address(0));
> >
> > I don't understand why you need to expose the lowest-level
> > __vmalloc_node_range to do what you need to do.
> >
> > For example, __vmalloc_node would be easier for you to use while giving you
> > all the flexibility you think you want.
> >
> > In fact, I don't think you need all the flexibility you're using.
> > vmalloc is always going to give you a page-aligned address, so
> > __alignof__(struct foo) isn't going to do anything worthwhile.
> >
> > VMALLOC_START, VMALLOC_END, PAGE_KERNEL, 0, NUMA_NO_NODE, and
> > __builtin_return_address(0) are all the defaults.  So all you actually
> > need are these GFP flags.  __GFP_HIGHMEM isn't needed; vmalloc can
> > always allocate from highmem unless you're doing a DMA alloc.  So
> > it's just __GFP_ACCOUNT that's not supported by regular vzalloc().
> >
> > I see __vmalloc_node_flags_caller is already non-static, so that would
> > be where I went and your call becomes simply:
> >
> >         vmx = __vmalloc_node_flags_caller(sizeof(struct vcpu_vmx),
> >                                 NUMA_NO_NODE,
> >                                 GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT,
> >                                 __builtin_return_address(0));
> >
> > I suspect a better option might be to add a vzalloc_account() call
> > and then your code becomes:
> >
> >         vmx = vzalloc_account(sizeof(struct vcpu_vmx));
> >
> > while vmalloc gains:
> >
> > void *vmalloc_account(unsigned long size)
> > {
> >         return __vmalloc_node_flags(size, NUMA_NO_NODE,
> >                                 GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
> > }
> > EXPORT_SYMBOL(vmalloc_account);
> 
> I 100% agree with this review! I only need the __GFP_ACCOUNT flag.

__vmalloc is already exported. Can you use that instead?

> Actually, the first version of this patch that I developed downstream,
> resembled what you're suggesting here. But I've never touched the mm
> subsystem before, and we were not confident on how to shape the patch
> for upstreaming, so that's how we ended up with this version, which
> makes minimal changes to the mm subsystem.

And now you can see why there was a pushback to add the user of the
exported api in a single patch. It is much easier to review that way.
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index abeeb45d1c33..d480a2cc0667 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -898,7 +898,14 @@  struct nested_vmx {
 #define POSTED_INTR_ON  0
 #define POSTED_INTR_SN  1
 
-/* Posted-Interrupt Descriptor */
+/*
+ * Posted-Interrupt Descriptor
+ *
+ * Note, the physical address of this structure is used by VMX. Furthermore, the
+ * translation code assumes that the entire pi_desc struct resides within a
+ * single page, which will be true because the struct is 64 bytes and 64-byte
+ * aligned.
+ */
 struct pi_desc {
 	u32 pir[8];     /* Posted interrupt requested */
 	union {
@@ -970,8 +977,25 @@  static inline int pi_test_sn(struct pi_desc *pi_desc)
 
 struct vmx_msrs {
 	unsigned int		nr;
-	struct vmx_msr_entry	val[NR_AUTOLOAD_MSRS];
+	struct vmx_msr_entry	*val;
 };
+struct kmem_cache *vmx_msr_entry_cache;
+
+/*
+ * To prevent vmx_msr_entry array from crossing a page boundary, require:
+ * sizeof(*vmx_msrs.vmx_msr_entry.val) to be a power of two. This is guaranteed
+ * through compile-time asserts that:
+ *   - NR_AUTOLOAD_MSRS * sizeof(struct vmx_msr_entry) is a power of two
+ *   - NR_AUTOLOAD_MSRS * sizeof(struct vmx_msr_entry) <= PAGE_SIZE
+ *   - The allocation of vmx_msrs.vmx_msr_entry.val is aligned to its size.
+ */
+#define CHECK_POWER_OF_TWO(val) \
+	BUILD_BUG_ON_MSG(!((val) && !((val) & ((val) - 1))), \
+	#val " is not a power of two.")
+#define CHECK_INTRA_PAGE(val) do { \
+		CHECK_POWER_OF_TWO(val); \
+		BUILD_BUG_ON(!(val <= PAGE_SIZE)); \
+	} while (0)
 
 struct vcpu_vmx {
 	struct kvm_vcpu       vcpu;
@@ -6616,6 +6640,14 @@  static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	}
 
 	if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
+		/*
+		 * Note, pi_desc is contained within a single
+		 * page because the struct is 64 bytes and 64-byte aligned.
+		 */
+		phys_addr_t pi_desc_phys =
+			page_to_phys(vmalloc_to_page(&vmx->pi_desc)) +
+			(u64)&vmx->pi_desc % PAGE_SIZE;
+
 		vmcs_write64(EOI_EXIT_BITMAP0, 0);
 		vmcs_write64(EOI_EXIT_BITMAP1, 0);
 		vmcs_write64(EOI_EXIT_BITMAP2, 0);
@@ -6624,7 +6656,7 @@  static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
 		vmcs_write16(GUEST_INTR_STATUS, 0);
 
 		vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
-		vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
+		vmcs_write64(POSTED_INTR_DESC_ADDR, pi_desc_phys);
 	}
 
 	if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
@@ -11476,19 +11508,43 @@  static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 	free_loaded_vmcs(vmx->loaded_vmcs);
 	kfree(vmx->guest_msrs);
 	kvm_vcpu_uninit(vcpu);
-	kmem_cache_free(kvm_vcpu_cache, vmx);
+	kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.guest.val);
+	kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.host.val);
+	vfree(vmx);
 }
 
 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 {
 	int err;
-	struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+	struct vcpu_vmx *vmx = __vmalloc_node_range(
+			sizeof(struct vcpu_vmx),
+			__alignof__(struct vcpu_vmx),
+			VMALLOC_START,
+			VMALLOC_END,
+			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | __GFP_ACCOUNT,
+			PAGE_KERNEL,
+			0,
+			NUMA_NO_NODE,
+			__builtin_return_address(0));
 	unsigned long *msr_bitmap;
 	int cpu;
 
 	if (!vmx)
 		return ERR_PTR(-ENOMEM);
 
+	vmx->msr_autoload.guest.val =
+		kmem_cache_zalloc(vmx_msr_entry_cache, GFP_KERNEL);
+	if (!vmx->msr_autoload.guest.val) {
+		err = -ENOMEM;
+		goto free_vmx;
+	}
+	vmx->msr_autoload.host.val =
+		kmem_cache_zalloc(vmx_msr_entry_cache, GFP_KERNEL);
+	if (!vmx->msr_autoload.host.val) {
+		err = -ENOMEM;
+		goto free_msr_autoload_guest;
+	}
+
 	vmx->vpid = allocate_vpid();
 
 	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
@@ -11576,7 +11632,11 @@  static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 	kvm_vcpu_uninit(&vmx->vcpu);
 free_vcpu:
 	free_vpid(vmx->vpid);
-	kmem_cache_free(kvm_vcpu_cache, vmx);
+	kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.host.val);
+free_msr_autoload_guest:
+	kmem_cache_free(vmx_msr_entry_cache, vmx->msr_autoload.guest.val);
+free_vmx:
+	vfree(vmx);
 	return ERR_PTR(err);
 }
 
@@ -15153,6 +15213,10 @@  module_exit(vmx_exit);
 static int __init vmx_init(void)
 {
 	int r;
+	size_t vmx_msr_entry_size =
+		sizeof(struct vmx_msr_entry) * NR_AUTOLOAD_MSRS;
+
+	CHECK_INTRA_PAGE(vmx_msr_entry_size);
 
 #if IS_ENABLED(CONFIG_HYPERV)
 	/*
@@ -15183,10 +15247,25 @@  static int __init vmx_init(void)
 	}
 #endif
 
-	r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
-		     __alignof__(struct vcpu_vmx), THIS_MODULE);
+	/*
+	 * Disable kmem cache; vmalloc will be used instead
+	 * to avoid OOM'ing when memory is available but not contiguous.
+	 */
+	r = kvm_init(&vmx_x86_ops, 0, 0, THIS_MODULE);
 	if (r)
 		return r;
+	/*
+	 * A vmx_msr_entry array resides exclusively within the kernel. Thus,
+	 * use kmem_cache_create_usercopy(), with the usersize argument set to
+	 * ZERO, to blacklist copying vmx_msr_entry to/from user space.
+	 */
+	vmx_msr_entry_cache =
+		kmem_cache_create_usercopy("vmx_msr_entry", vmx_msr_entry_size,
+				  vmx_msr_entry_size, SLAB_ACCOUNT, 0, 0, NULL);
+	if (!vmx_msr_entry_cache) {
+		r = -ENOMEM;
+		goto out;
+	}
 
 	/*
 	 * Must be called after kvm_init() so enable_ept is properly set
@@ -15210,5 +15289,8 @@  static int __init vmx_init(void)
 	vmx_check_vmcs12_offsets();
 
 	return 0;
+out:
+	kvm_exit();
+	return r;
 }
 module_init(vmx_init);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 786ade1843a2..8b979e7c3ecd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4038,18 +4038,22 @@  int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 		goto out_free_2;
 	register_reboot_notifier(&kvm_reboot_notifier);
 
-	/* A kmem cache lets us meet the alignment requirements of fx_save. */
-	if (!vcpu_align)
-		vcpu_align = __alignof__(struct kvm_vcpu);
-	kvm_vcpu_cache =
-		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
-					   SLAB_ACCOUNT,
-					   offsetof(struct kvm_vcpu, arch),
-					   sizeof_field(struct kvm_vcpu, arch),
-					   NULL);
-	if (!kvm_vcpu_cache) {
-		r = -ENOMEM;
-		goto out_free_3;
+	/*
+	 * When vcpu_size is zero,
+	 * architecture-specific code manages its own vcpu allocation.
+	 */
+	kvm_vcpu_cache = NULL;
+	if (vcpu_size) {
+		if (!vcpu_align)
+			vcpu_align = __alignof__(struct kvm_vcpu);
+		kvm_vcpu_cache = kmem_cache_create_usercopy(
+			"kvm_vcpu", vcpu_size, vcpu_align, SLAB_ACCOUNT,
+			offsetof(struct kvm_vcpu, arch),
+			sizeof_field(struct kvm_vcpu, arch), NULL);
+		if (!kvm_vcpu_cache) {
+			r = -ENOMEM;
+			goto out_free_3;
+		}
 	}
 
 	r = kvm_async_pf_init();