diff mbox series

[RFC,05/15] KVM: x86/mmu: Abstract mmu caches out to a separate struct

Message ID 20211119235759.1304274-6-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand

Commit Message

David Matlack Nov. 19, 2021, 11:57 p.m. UTC
Move the kvm_mmu_memory_cache structs into a separate wrapper struct.
This is in preparation for eagerly splitting all large pages during
VM-ioctls (i.e. not in the vCPU fault path) which will require adding
kvm_mmu_memory_cache structs to struct kvm_arch.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/include/asm/kvm_host.h | 12 ++++---
 arch/x86/kvm/mmu/mmu.c          | 59 ++++++++++++++++++++++-----------
 arch/x86/kvm/mmu/tdp_mmu.c      |  7 ++--
 3 files changed, 52 insertions(+), 26 deletions(-)

Comments

Ben Gardon Nov. 22, 2021, 6:55 p.m. UTC | #1
On Fri, Nov 19, 2021 at 3:58 PM David Matlack <dmatlack@google.com> wrote:
>
> Move the kvm_mmu_memory_cache structs into a separate wrapper struct.
> This is in preparation for eagerly splitting all large pages during
> VM-ioctls (i.e. not in the vCPU fault path) which will require adding
> kvm_mmu_memory_cache structs to struct kvm_arch.
>
> Signed-off-by: David Matlack <dmatlack@google.com>

Reviewed-by: Ben Gardon

I don't think this patch creates any functional change. If that's the
intent, it'd be worth noting.


> ---
>  arch/x86/include/asm/kvm_host.h | 12 ++++---
>  arch/x86/kvm/mmu/mmu.c          | 59 ++++++++++++++++++++++-----------
>  arch/x86/kvm/mmu/tdp_mmu.c      |  7 ++--
>  3 files changed, 52 insertions(+), 26 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 1fcb345bc107..2a7564703ea6 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -612,6 +612,13 @@ struct kvm_vcpu_xen {
>         u64 runstate_times[4];
>  };
>
> +struct kvm_mmu_memory_caches {
> +       struct kvm_mmu_memory_cache pte_list_desc_cache;
> +       struct kvm_mmu_memory_cache shadow_page_cache;
> +       struct kvm_mmu_memory_cache gfn_array_cache;
> +       struct kvm_mmu_memory_cache page_header_cache;
> +};
> +
>  struct kvm_vcpu_arch {
>         /*
>          * rip and regs accesses must go through
> @@ -681,10 +688,7 @@ struct kvm_vcpu_arch {
>          */
>         struct kvm_mmu *walk_mmu;
>
> -       struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
> -       struct kvm_mmu_memory_cache mmu_shadow_page_cache;
> -       struct kvm_mmu_memory_cache mmu_gfn_array_cache;
> -       struct kvm_mmu_memory_cache mmu_page_header_cache;
> +       struct kvm_mmu_memory_caches mmu_caches;
>
>         /*
>          * QEMU userspace and the guest each have their own FPU state.
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 1146f87044a6..537952574211 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -732,38 +732,60 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
>
>  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
>  {
> +       struct kvm_mmu_memory_caches *mmu_caches;
>         int r;
>
> +       mmu_caches = &vcpu->arch.mmu_caches;
> +
>         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
> -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> +       r = kvm_mmu_topup_memory_cache(&mmu_caches->pte_list_desc_cache,
>                                        1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
>         if (r)
>                 return r;
> -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
> +       r = kvm_mmu_topup_memory_cache(&mmu_caches->shadow_page_cache,
>                                        PT64_ROOT_MAX_LEVEL);
>         if (r)
>                 return r;
>         if (maybe_indirect) {
> -               r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
> +               r = kvm_mmu_topup_memory_cache(&mmu_caches->gfn_array_cache,
>                                                PT64_ROOT_MAX_LEVEL);
>                 if (r)
>                         return r;
>         }
> -       return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
> +       return kvm_mmu_topup_memory_cache(&mmu_caches->page_header_cache,
>                                           PT64_ROOT_MAX_LEVEL);
>  }
>
>  static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
>  {
> -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
> -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
> -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
> -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
> +       struct kvm_mmu_memory_caches *mmu_caches;
> +
> +       mmu_caches = &vcpu->arch.mmu_caches;
> +
> +       kvm_mmu_free_memory_cache(&mmu_caches->pte_list_desc_cache);
> +       kvm_mmu_free_memory_cache(&mmu_caches->shadow_page_cache);
> +       kvm_mmu_free_memory_cache(&mmu_caches->gfn_array_cache);
> +       kvm_mmu_free_memory_cache(&mmu_caches->page_header_cache);
> +}
> +
> +static void mmu_init_memory_caches(struct kvm_mmu_memory_caches *caches)
> +{
> +       caches->pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> +       caches->pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> +
> +       caches->page_header_cache.kmem_cache = mmu_page_header_cache;
> +       caches->page_header_cache.gfp_zero = __GFP_ZERO;
> +
> +       caches->shadow_page_cache.gfp_zero = __GFP_ZERO;
>  }
>
>  static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
>  {
> -       return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
> +       struct kvm_mmu_memory_caches *mmu_caches;
> +
> +       mmu_caches = &vcpu->arch.mmu_caches;
> +
> +       return kvm_mmu_memory_cache_alloc(&mmu_caches->pte_list_desc_cache);
>  }
>
>  static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
> @@ -1071,7 +1093,7 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
>  {
>         struct kvm_mmu_memory_cache *mc;
>
> -       mc = &vcpu->arch.mmu_pte_list_desc_cache;
> +       mc = &vcpu->arch.mmu_caches.pte_list_desc_cache;
>         return kvm_mmu_memory_cache_nr_free_objects(mc);
>  }
>
> @@ -1742,12 +1764,15 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
>
>  static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
>  {
> +       struct kvm_mmu_memory_caches *mmu_caches;
>         struct kvm_mmu_page *sp;
>
> -       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
> -       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
> +       mmu_caches = &vcpu->arch.mmu_caches;
> +
> +       sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
> +       sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
>         if (!direct)
> -               sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
> +               sp->gfns = kvm_mmu_memory_cache_alloc(&mmu_caches->gfn_array_cache);
>         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
>
>         /*
> @@ -5544,13 +5569,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
>  {
>         int ret;
>
> -       vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> -       vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> -
> -       vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> -       vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> -
> -       vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> +       mmu_init_memory_caches(&vcpu->arch.mmu_caches);
>
>         vcpu->arch.mmu = &vcpu->arch.root_mmu;
>         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 9ee3f4f7fdf5..b70707a7fe87 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -175,10 +175,13 @@ static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
>  static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
>                                                int level)
>  {
> +       struct kvm_mmu_memory_caches *mmu_caches;
>         struct kvm_mmu_page *sp;
>
> -       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
> -       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
> +       mmu_caches = &vcpu->arch.mmu_caches;
> +
> +       sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
> +       sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
>         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
>
>         sp->role.word = page_role_for_level(vcpu, level).word;
> --
> 2.34.0.rc2.393.gf8c9666880-goog
>
Ben Gardon Nov. 22, 2021, 6:55 p.m. UTC | #2
On Mon, Nov 22, 2021 at 10:55 AM Ben Gardon <bgardon@google.com> wrote:
>
> On Fri, Nov 19, 2021 at 3:58 PM David Matlack <dmatlack@google.com> wrote:
> >
> > Move the kvm_mmu_memory_cache structs into a separate wrapper struct.
> > This is in preparation for eagerly splitting all large pages during
> > VM-ioctls (i.e. not in the vCPU fault path) which will require adding
> > kvm_mmu_memory_cache structs to struct kvm_arch.
> >
> > Signed-off-by: David Matlack <dmatlack@google.com>
>
> Reviewed-by: Ben Gardon

Woops
Reviewed-by: Ben Gardon <bgardon@google.com>

>
> I don't think this patch creates any functional change. If that's the
> intent, it'd be worth noting.
>
>
> > ---
> >  arch/x86/include/asm/kvm_host.h | 12 ++++---
> >  arch/x86/kvm/mmu/mmu.c          | 59 ++++++++++++++++++++++-----------
> >  arch/x86/kvm/mmu/tdp_mmu.c      |  7 ++--
> >  3 files changed, 52 insertions(+), 26 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 1fcb345bc107..2a7564703ea6 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -612,6 +612,13 @@ struct kvm_vcpu_xen {
> >         u64 runstate_times[4];
> >  };
> >
> > +struct kvm_mmu_memory_caches {
> > +       struct kvm_mmu_memory_cache pte_list_desc_cache;
> > +       struct kvm_mmu_memory_cache shadow_page_cache;
> > +       struct kvm_mmu_memory_cache gfn_array_cache;
> > +       struct kvm_mmu_memory_cache page_header_cache;
> > +};
> > +
> >  struct kvm_vcpu_arch {
> >         /*
> >          * rip and regs accesses must go through
> > @@ -681,10 +688,7 @@ struct kvm_vcpu_arch {
> >          */
> >         struct kvm_mmu *walk_mmu;
> >
> > -       struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
> > -       struct kvm_mmu_memory_cache mmu_shadow_page_cache;
> > -       struct kvm_mmu_memory_cache mmu_gfn_array_cache;
> > -       struct kvm_mmu_memory_cache mmu_page_header_cache;
> > +       struct kvm_mmu_memory_caches mmu_caches;
> >
> >         /*
> >          * QEMU userspace and the guest each have their own FPU state.
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 1146f87044a6..537952574211 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -732,38 +732,60 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
> >
> >  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> >  {
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> >         int r;
> >
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> >         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
> > -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> > +       r = kvm_mmu_topup_memory_cache(&mmu_caches->pte_list_desc_cache,
> >                                        1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
> >         if (r)
> >                 return r;
> > -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
> > +       r = kvm_mmu_topup_memory_cache(&mmu_caches->shadow_page_cache,
> >                                        PT64_ROOT_MAX_LEVEL);
> >         if (r)
> >                 return r;
> >         if (maybe_indirect) {
> > -               r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
> > +               r = kvm_mmu_topup_memory_cache(&mmu_caches->gfn_array_cache,
> >                                                PT64_ROOT_MAX_LEVEL);
> >                 if (r)
> >                         return r;
> >         }
> > -       return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
> > +       return kvm_mmu_topup_memory_cache(&mmu_caches->page_header_cache,
> >                                           PT64_ROOT_MAX_LEVEL);
> >  }
> >
> >  static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
> >  {
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> > +
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       kvm_mmu_free_memory_cache(&mmu_caches->pte_list_desc_cache);
> > +       kvm_mmu_free_memory_cache(&mmu_caches->shadow_page_cache);
> > +       kvm_mmu_free_memory_cache(&mmu_caches->gfn_array_cache);
> > +       kvm_mmu_free_memory_cache(&mmu_caches->page_header_cache);
> > +}
> > +
> > +static void mmu_init_memory_caches(struct kvm_mmu_memory_caches *caches)
> > +{
> > +       caches->pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > +       caches->pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > +
> > +       caches->page_header_cache.kmem_cache = mmu_page_header_cache;
> > +       caches->page_header_cache.gfp_zero = __GFP_ZERO;
> > +
> > +       caches->shadow_page_cache.gfp_zero = __GFP_ZERO;
> >  }
> >
> >  static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
> >  {
> > -       return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> > +
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       return kvm_mmu_memory_cache_alloc(&mmu_caches->pte_list_desc_cache);
> >  }
> >
> >  static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
> > @@ -1071,7 +1093,7 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
> >  {
> >         struct kvm_mmu_memory_cache *mc;
> >
> > -       mc = &vcpu->arch.mmu_pte_list_desc_cache;
> > +       mc = &vcpu->arch.mmu_caches.pte_list_desc_cache;
> >         return kvm_mmu_memory_cache_nr_free_objects(mc);
> >  }
> >
> > @@ -1742,12 +1764,15 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
> >
> >  static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
> >  {
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> >         struct kvm_mmu_page *sp;
> >
> > -       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
> > -       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
> > +       sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
> >         if (!direct)
> > -               sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
> > +               sp->gfns = kvm_mmu_memory_cache_alloc(&mmu_caches->gfn_array_cache);
> >         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
> >
> >         /*
> > @@ -5544,13 +5569,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> >  {
> >         int ret;
> >
> > -       vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > -       vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > -
> > -       vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> > -       vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> > -
> > -       vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > +       mmu_init_memory_caches(&vcpu->arch.mmu_caches);
> >
> >         vcpu->arch.mmu = &vcpu->arch.root_mmu;
> >         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
> > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> > index 9ee3f4f7fdf5..b70707a7fe87 100644
> > --- a/arch/x86/kvm/mmu/tdp_mmu.c
> > +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> > @@ -175,10 +175,13 @@ static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
> >  static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
> >                                                int level)
> >  {
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> >         struct kvm_mmu_page *sp;
> >
> > -       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
> > -       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
> > +       sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
> >         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
> >
> >         sp->role.word = page_role_for_level(vcpu, level).word;
> > --
> > 2.34.0.rc2.393.gf8c9666880-goog
> >
David Matlack Nov. 30, 2021, 11:28 p.m. UTC | #3
On Mon, Nov 22, 2021 at 10:55 AM Ben Gardon <bgardon@google.com> wrote:
>
> On Fri, Nov 19, 2021 at 3:58 PM David Matlack <dmatlack@google.com> wrote:
> >
> > Move the kvm_mmu_memory_cache structs into a separate wrapper struct.
> > This is in preparation for eagerly splitting all large pages during
> > VM-ioctls (i.e. not in the vCPU fault path) which will require adding
> > kvm_mmu_memory_cache structs to struct kvm_arch.
> >
> > Signed-off-by: David Matlack <dmatlack@google.com>
>
> Reviewed-by: Ben Gardon
>
> I don't think this patch creates any functional change. If that's the
> intent, it'd be worth noting.

Will do!

>
>
> > ---
> >  arch/x86/include/asm/kvm_host.h | 12 ++++---
> >  arch/x86/kvm/mmu/mmu.c          | 59 ++++++++++++++++++++++-----------
> >  arch/x86/kvm/mmu/tdp_mmu.c      |  7 ++--
> >  3 files changed, 52 insertions(+), 26 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 1fcb345bc107..2a7564703ea6 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -612,6 +612,13 @@ struct kvm_vcpu_xen {
> >         u64 runstate_times[4];
> >  };
> >
> > +struct kvm_mmu_memory_caches {
> > +       struct kvm_mmu_memory_cache pte_list_desc_cache;
> > +       struct kvm_mmu_memory_cache shadow_page_cache;
> > +       struct kvm_mmu_memory_cache gfn_array_cache;
> > +       struct kvm_mmu_memory_cache page_header_cache;
> > +};
> > +
> >  struct kvm_vcpu_arch {
> >         /*
> >          * rip and regs accesses must go through
> > @@ -681,10 +688,7 @@ struct kvm_vcpu_arch {
> >          */
> >         struct kvm_mmu *walk_mmu;
> >
> > -       struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
> > -       struct kvm_mmu_memory_cache mmu_shadow_page_cache;
> > -       struct kvm_mmu_memory_cache mmu_gfn_array_cache;
> > -       struct kvm_mmu_memory_cache mmu_page_header_cache;
> > +       struct kvm_mmu_memory_caches mmu_caches;
> >
> >         /*
> >          * QEMU userspace and the guest each have their own FPU state.
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 1146f87044a6..537952574211 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -732,38 +732,60 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
> >
> >  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> >  {
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> >         int r;
> >
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> >         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
> > -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> > +       r = kvm_mmu_topup_memory_cache(&mmu_caches->pte_list_desc_cache,
> >                                        1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
> >         if (r)
> >                 return r;
> > -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
> > +       r = kvm_mmu_topup_memory_cache(&mmu_caches->shadow_page_cache,
> >                                        PT64_ROOT_MAX_LEVEL);
> >         if (r)
> >                 return r;
> >         if (maybe_indirect) {
> > -               r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
> > +               r = kvm_mmu_topup_memory_cache(&mmu_caches->gfn_array_cache,
> >                                                PT64_ROOT_MAX_LEVEL);
> >                 if (r)
> >                         return r;
> >         }
> > -       return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
> > +       return kvm_mmu_topup_memory_cache(&mmu_caches->page_header_cache,
> >                                           PT64_ROOT_MAX_LEVEL);
> >  }
> >
> >  static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
> >  {
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
> > -       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> > +
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       kvm_mmu_free_memory_cache(&mmu_caches->pte_list_desc_cache);
> > +       kvm_mmu_free_memory_cache(&mmu_caches->shadow_page_cache);
> > +       kvm_mmu_free_memory_cache(&mmu_caches->gfn_array_cache);
> > +       kvm_mmu_free_memory_cache(&mmu_caches->page_header_cache);
> > +}
> > +
> > +static void mmu_init_memory_caches(struct kvm_mmu_memory_caches *caches)
> > +{
> > +       caches->pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > +       caches->pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > +
> > +       caches->page_header_cache.kmem_cache = mmu_page_header_cache;
> > +       caches->page_header_cache.gfp_zero = __GFP_ZERO;
> > +
> > +       caches->shadow_page_cache.gfp_zero = __GFP_ZERO;
> >  }
> >
> >  static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
> >  {
> > -       return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> > +
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       return kvm_mmu_memory_cache_alloc(&mmu_caches->pte_list_desc_cache);
> >  }
> >
> >  static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
> > @@ -1071,7 +1093,7 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
> >  {
> >         struct kvm_mmu_memory_cache *mc;
> >
> > -       mc = &vcpu->arch.mmu_pte_list_desc_cache;
> > +       mc = &vcpu->arch.mmu_caches.pte_list_desc_cache;
> >         return kvm_mmu_memory_cache_nr_free_objects(mc);
> >  }
> >
> > @@ -1742,12 +1764,15 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
> >
> >  static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
> >  {
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> >         struct kvm_mmu_page *sp;
> >
> > -       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
> > -       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
> > +       sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
> >         if (!direct)
> > -               sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
> > +               sp->gfns = kvm_mmu_memory_cache_alloc(&mmu_caches->gfn_array_cache);
> >         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
> >
> >         /*
> > @@ -5544,13 +5569,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> >  {
> >         int ret;
> >
> > -       vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > -       vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > -
> > -       vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> > -       vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> > -
> > -       vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > +       mmu_init_memory_caches(&vcpu->arch.mmu_caches);
> >
> >         vcpu->arch.mmu = &vcpu->arch.root_mmu;
> >         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
> > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> > index 9ee3f4f7fdf5..b70707a7fe87 100644
> > --- a/arch/x86/kvm/mmu/tdp_mmu.c
> > +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> > @@ -175,10 +175,13 @@ static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
> >  static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
> >                                                int level)
> >  {
> > +       struct kvm_mmu_memory_caches *mmu_caches;
> >         struct kvm_mmu_page *sp;
> >
> > -       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
> > -       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
> > +       mmu_caches = &vcpu->arch.mmu_caches;
> > +
> > +       sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
> > +       sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
> >         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
> >
> >         sp->role.word = page_role_for_level(vcpu, level).word;
> > --
> > 2.34.0.rc2.393.gf8c9666880-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1fcb345bc107..2a7564703ea6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -612,6 +612,13 @@  struct kvm_vcpu_xen {
 	u64 runstate_times[4];
 };
 
+struct kvm_mmu_memory_caches {
+	struct kvm_mmu_memory_cache pte_list_desc_cache;
+	struct kvm_mmu_memory_cache shadow_page_cache;
+	struct kvm_mmu_memory_cache gfn_array_cache;
+	struct kvm_mmu_memory_cache page_header_cache;
+};
+
 struct kvm_vcpu_arch {
 	/*
 	 * rip and regs accesses must go through
@@ -681,10 +688,7 @@  struct kvm_vcpu_arch {
 	 */
 	struct kvm_mmu *walk_mmu;
 
-	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
-	struct kvm_mmu_memory_cache mmu_shadow_page_cache;
-	struct kvm_mmu_memory_cache mmu_gfn_array_cache;
-	struct kvm_mmu_memory_cache mmu_page_header_cache;
+	struct kvm_mmu_memory_caches mmu_caches;
 
 	/*
 	 * QEMU userspace and the guest each have their own FPU state.
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1146f87044a6..537952574211 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -732,38 +732,60 @@  static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
 
 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
+	struct kvm_mmu_memory_caches *mmu_caches;
 	int r;
 
+	mmu_caches = &vcpu->arch.mmu_caches;
+
 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
-	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+	r = kvm_mmu_topup_memory_cache(&mmu_caches->pte_list_desc_cache,
 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
 	if (r)
 		return r;
-	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
+	r = kvm_mmu_topup_memory_cache(&mmu_caches->shadow_page_cache,
 				       PT64_ROOT_MAX_LEVEL);
 	if (r)
 		return r;
 	if (maybe_indirect) {
-		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+		r = kvm_mmu_topup_memory_cache(&mmu_caches->gfn_array_cache,
 					       PT64_ROOT_MAX_LEVEL);
 		if (r)
 			return r;
 	}
-	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
+	return kvm_mmu_topup_memory_cache(&mmu_caches->page_header_cache,
 					  PT64_ROOT_MAX_LEVEL);
 }
 
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
-	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
-	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
-	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
+	struct kvm_mmu_memory_caches *mmu_caches;
+
+	mmu_caches = &vcpu->arch.mmu_caches;
+
+	kvm_mmu_free_memory_cache(&mmu_caches->pte_list_desc_cache);
+	kvm_mmu_free_memory_cache(&mmu_caches->shadow_page_cache);
+	kvm_mmu_free_memory_cache(&mmu_caches->gfn_array_cache);
+	kvm_mmu_free_memory_cache(&mmu_caches->page_header_cache);
+}
+
+static void mmu_init_memory_caches(struct kvm_mmu_memory_caches *caches)
+{
+	caches->pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
+	caches->pte_list_desc_cache.gfp_zero = __GFP_ZERO;
+
+	caches->page_header_cache.kmem_cache = mmu_page_header_cache;
+	caches->page_header_cache.gfp_zero = __GFP_ZERO;
+
+	caches->shadow_page_cache.gfp_zero = __GFP_ZERO;
 }
 
 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
 {
-	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
+	struct kvm_mmu_memory_caches *mmu_caches;
+
+	mmu_caches = &vcpu->arch.mmu_caches;
+
+	return kvm_mmu_memory_cache_alloc(&mmu_caches->pte_list_desc_cache);
 }
 
 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
@@ -1071,7 +1093,7 @@  static bool rmap_can_add(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_memory_cache *mc;
 
-	mc = &vcpu->arch.mmu_pte_list_desc_cache;
+	mc = &vcpu->arch.mmu_caches.pte_list_desc_cache;
 	return kvm_mmu_memory_cache_nr_free_objects(mc);
 }
 
@@ -1742,12 +1764,15 @@  static void drop_parent_pte(struct kvm_mmu_page *sp,
 
 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
 {
+	struct kvm_mmu_memory_caches *mmu_caches;
 	struct kvm_mmu_page *sp;
 
-	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
-	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+	mmu_caches = &vcpu->arch.mmu_caches;
+
+	sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
+	sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
 	if (!direct)
-		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
+		sp->gfns = kvm_mmu_memory_cache_alloc(&mmu_caches->gfn_array_cache);
 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 
 	/*
@@ -5544,13 +5569,7 @@  int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
 	int ret;
 
-	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
-	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
-
-	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
-	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
-
-	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
+	mmu_init_memory_caches(&vcpu->arch.mmu_caches);
 
 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 9ee3f4f7fdf5..b70707a7fe87 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -175,10 +175,13 @@  static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
 					       int level)
 {
+	struct kvm_mmu_memory_caches *mmu_caches;
 	struct kvm_mmu_page *sp;
 
-	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
-	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+	mmu_caches = &vcpu->arch.mmu_caches;
+
+	sp = kvm_mmu_memory_cache_alloc(&mmu_caches->page_header_cache);
+	sp->spt = kvm_mmu_memory_cache_alloc(&mmu_caches->shadow_page_cache);
 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 
 	sp->role.word = page_role_for_level(vcpu, level).word;