diff mbox series

[06/23] KVM: x86/mmu: Separate shadow MMU sp allocation from initialization

Message ID 20220203010051.2813563-7-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series Extend Eager Page Splitting to the shadow MMU | expand

Commit Message

David Matlack Feb. 3, 2022, 1 a.m. UTC
Separate the code that allocates a new shadow page from the vCPU caches
from the code that initializes it. This is in preparation for creating
new shadow pages from VM ioctls for eager page splitting, where we do
not have access to the vCPU caches.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 44 +++++++++++++++++++++---------------------
 1 file changed, 22 insertions(+), 22 deletions(-)

Comments

Ben Gardon Feb. 16, 2022, 7:37 p.m. UTC | #1
On Wed, Feb 2, 2022 at 5:02 PM David Matlack <dmatlack@google.com> wrote:
>
> Separate the code that allocates a new shadow page from the vCPU caches
> from the code that initializes it. This is in preparation for creating
> new shadow pages from VM ioctls for eager page splitting, where we do
> not have access to the vCPU caches.
>
> No functional change intended.
>
> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 44 +++++++++++++++++++++---------------------
>  1 file changed, 22 insertions(+), 22 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 49f82addf4b5..d4f90a10b652 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1718,7 +1718,7 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
>         mmu_spte_clear_no_track(parent_pte);
>  }
>
> -static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, int direct)
> +static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, bool direct)
>  {
>         struct kvm_mmu_page *sp;
>
> @@ -1726,16 +1726,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, int direct)
>         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
>         if (!direct)
>                 sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
> -       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);

I'd be inclined to leave this in the allocation function instead of
moving it to the init function. It might not be any less code, but if
you're doing the sp -> page link here, you might as well do the page
-> sp link too.

>
>
> -       /*
> -        * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
> -        * depends on valid pages being added to the head of the list.  See
> -        * comments in kvm_zap_obsolete_pages().
> -        */
> -       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
> -       list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
> -       kvm_mod_used_mmu_pages(vcpu->kvm, +1);
>         return sp;
>  }
>
> @@ -2144,27 +2135,34 @@ static struct kvm_mmu_page *kvm_mmu_get_existing_sp(struct kvm_vcpu *vcpu,
>         return sp;
>  }
>
> -static struct kvm_mmu_page *kvm_mmu_create_sp(struct kvm_vcpu *vcpu,
> -                                             struct kvm_memory_slot *slot,
> -                                             gfn_t gfn,
> -                                             union kvm_mmu_page_role role)
> +
> +static void kvm_mmu_init_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
> +                           struct kvm_memory_slot *slot, gfn_t gfn,
> +                           union kvm_mmu_page_role role)
>  {
> -       struct kvm_mmu_page *sp;
>         struct hlist_head *sp_list;
>
> -       ++vcpu->kvm->stat.mmu_cache_miss;
> +       ++kvm->stat.mmu_cache_miss;
> +
> +       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
>
> -       sp = kvm_mmu_alloc_sp(vcpu, role.direct);
>         sp->gfn = gfn;
>         sp->role = role;
> +       sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
>
> -       sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
> +       /*
> +        * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
> +        * depends on valid pages being added to the head of the list.  See
> +        * comments in kvm_zap_obsolete_pages().
> +        */
> +       list_add(&sp->link, &kvm->arch.active_mmu_pages);
> +       kvm_mod_used_mmu_pages(kvm, 1);
> +
> +       sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
>         hlist_add_head(&sp->hash_link, sp_list);
>
>         if (!role.direct)
> -               account_shadowed(vcpu->kvm, slot, sp);
> -
> -       return sp;
> +               account_shadowed(kvm, slot, sp);
>  }
>
>  static struct kvm_mmu_page *kvm_mmu_get_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
> @@ -2179,8 +2177,10 @@ static struct kvm_mmu_page *kvm_mmu_get_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
>                 goto out;
>
>         created = true;
> +       sp = kvm_mmu_alloc_sp(vcpu, role.direct);
> +
>         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> -       sp = kvm_mmu_create_sp(vcpu, slot, gfn, role);
> +       kvm_mmu_init_sp(vcpu->kvm, sp, slot, gfn, role);
>
>  out:
>         trace_kvm_mmu_get_page(sp, created);
> --
> 2.35.0.rc2.247.g8bbb082509-goog
>
David Matlack Feb. 16, 2022, 9:42 p.m. UTC | #2
On Wed, Feb 16, 2022 at 11:37 AM Ben Gardon <bgardon@google.com> wrote:
>
> On Wed, Feb 2, 2022 at 5:02 PM David Matlack <dmatlack@google.com> wrote:
> >
> > Separate the code that allocates a new shadow page from the vCPU caches
> > from the code that initializes it. This is in preparation for creating
> > new shadow pages from VM ioctls for eager page splitting, where we do
> > not have access to the vCPU caches.
> >
> > No functional change intended.
> >
> > Signed-off-by: David Matlack <dmatlack@google.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c | 44 +++++++++++++++++++++---------------------
> >  1 file changed, 22 insertions(+), 22 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 49f82addf4b5..d4f90a10b652 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -1718,7 +1718,7 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
> >         mmu_spte_clear_no_track(parent_pte);
> >  }
> >
> > -static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, int direct)
> > +static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, bool direct)
> >  {
> >         struct kvm_mmu_page *sp;
> >
> > @@ -1726,16 +1726,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, int direct)
> >         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
> >         if (!direct)
> >                 sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
> > -       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
>
> I'd be inclined to leave this in the allocation function instead of
> moving it to the init function. It might not be any less code, but if
> you're doing the sp -> page link here, you might as well do the page
> -> sp link too.

Good suggestion. I'll include that change in the next version.
>
> >
> >
> > -       /*
> > -        * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
> > -        * depends on valid pages being added to the head of the list.  See
> > -        * comments in kvm_zap_obsolete_pages().
> > -        */
> > -       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
> > -       list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
> > -       kvm_mod_used_mmu_pages(vcpu->kvm, +1);
> >         return sp;
> >  }
> >
> > @@ -2144,27 +2135,34 @@ static struct kvm_mmu_page *kvm_mmu_get_existing_sp(struct kvm_vcpu *vcpu,
> >         return sp;
> >  }
> >
> > -static struct kvm_mmu_page *kvm_mmu_create_sp(struct kvm_vcpu *vcpu,
> > -                                             struct kvm_memory_slot *slot,
> > -                                             gfn_t gfn,
> > -                                             union kvm_mmu_page_role role)
> > +
> > +static void kvm_mmu_init_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
> > +                           struct kvm_memory_slot *slot, gfn_t gfn,
> > +                           union kvm_mmu_page_role role)
> >  {
> > -       struct kvm_mmu_page *sp;
> >         struct hlist_head *sp_list;
> >
> > -       ++vcpu->kvm->stat.mmu_cache_miss;
> > +       ++kvm->stat.mmu_cache_miss;
> > +
> > +       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
> >
> > -       sp = kvm_mmu_alloc_sp(vcpu, role.direct);
> >         sp->gfn = gfn;
> >         sp->role = role;
> > +       sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
> >
> > -       sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
> > +       /*
> > +        * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
> > +        * depends on valid pages being added to the head of the list.  See
> > +        * comments in kvm_zap_obsolete_pages().
> > +        */
> > +       list_add(&sp->link, &kvm->arch.active_mmu_pages);
> > +       kvm_mod_used_mmu_pages(kvm, 1);
> > +
> > +       sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
> >         hlist_add_head(&sp->hash_link, sp_list);
> >
> >         if (!role.direct)
> > -               account_shadowed(vcpu->kvm, slot, sp);
> > -
> > -       return sp;
> > +               account_shadowed(kvm, slot, sp);
> >  }
> >
> >  static struct kvm_mmu_page *kvm_mmu_get_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
> > @@ -2179,8 +2177,10 @@ static struct kvm_mmu_page *kvm_mmu_get_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
> >                 goto out;
> >
> >         created = true;
> > +       sp = kvm_mmu_alloc_sp(vcpu, role.direct);
> > +
> >         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> > -       sp = kvm_mmu_create_sp(vcpu, slot, gfn, role);
> > +       kvm_mmu_init_sp(vcpu->kvm, sp, slot, gfn, role);
> >
> >  out:
> >         trace_kvm_mmu_get_page(sp, created);
> > --
> > 2.35.0.rc2.247.g8bbb082509-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 49f82addf4b5..d4f90a10b652 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1718,7 +1718,7 @@  static void drop_parent_pte(struct kvm_mmu_page *sp,
 	mmu_spte_clear_no_track(parent_pte);
 }
 
-static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, int direct)
+static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, bool direct)
 {
 	struct kvm_mmu_page *sp;
 
@@ -1726,16 +1726,7 @@  static struct kvm_mmu_page *kvm_mmu_alloc_sp(struct kvm_vcpu *vcpu, int direct)
 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
 	if (!direct)
 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
-	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 
-	/*
-	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
-	 * depends on valid pages being added to the head of the list.  See
-	 * comments in kvm_zap_obsolete_pages().
-	 */
-	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
-	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
-	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
 	return sp;
 }
 
@@ -2144,27 +2135,34 @@  static struct kvm_mmu_page *kvm_mmu_get_existing_sp(struct kvm_vcpu *vcpu,
 	return sp;
 }
 
-static struct kvm_mmu_page *kvm_mmu_create_sp(struct kvm_vcpu *vcpu,
-					      struct kvm_memory_slot *slot,
-					      gfn_t gfn,
-					      union kvm_mmu_page_role role)
+
+static void kvm_mmu_init_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
+			    struct kvm_memory_slot *slot, gfn_t gfn,
+			    union kvm_mmu_page_role role)
 {
-	struct kvm_mmu_page *sp;
 	struct hlist_head *sp_list;
 
-	++vcpu->kvm->stat.mmu_cache_miss;
+	++kvm->stat.mmu_cache_miss;
+
+	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 
-	sp = kvm_mmu_alloc_sp(vcpu, role.direct);
 	sp->gfn = gfn;
 	sp->role = role;
+	sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
 
-	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
+	/*
+	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
+	 * depends on valid pages being added to the head of the list.  See
+	 * comments in kvm_zap_obsolete_pages().
+	 */
+	list_add(&sp->link, &kvm->arch.active_mmu_pages);
+	kvm_mod_used_mmu_pages(kvm, 1);
+
+	sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
 	hlist_add_head(&sp->hash_link, sp_list);
 
 	if (!role.direct)
-		account_shadowed(vcpu->kvm, slot, sp);
-
-	return sp;
+		account_shadowed(kvm, slot, sp);
 }
 
 static struct kvm_mmu_page *kvm_mmu_get_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
@@ -2179,8 +2177,10 @@  static struct kvm_mmu_page *kvm_mmu_get_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
 		goto out;
 
 	created = true;
+	sp = kvm_mmu_alloc_sp(vcpu, role.direct);
+
 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-	sp = kvm_mmu_create_sp(vcpu, slot, gfn, role);
+	kvm_mmu_init_sp(vcpu->kvm, sp, slot, gfn, role);
 
 out:
 	trace_kvm_mmu_get_page(sp, created);