Message ID | 20211213225918.672507-9-dmatlack@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand |
On Mon, Dec 13, 2021 at 10:59:13PM +0000, David Matlack wrote: > Separate the allocation of child pages from the initialization. This is > in preparation for doing page splitting outside of the vCPU fault > context which requires a different allocation mechanism. > > No functional changed intended. > > Signed-off-by: David Matlack <dmatlack@google.com> Reviewed-by: Peter Xu <peterx@redhat.com>
On Mon, Dec 13, 2021, David Matlack wrote: > Separate the allocation of child pages from the initialization. This is "from their initialization" so that it's not a dangling sentence. > in preparation for doing page splitting outside of the vCPU fault > context which requires a different allocation mechanism. > > No functional changed intended. > > Signed-off-by: David Matlack <dmatlack@google.com> > --- > arch/x86/kvm/mmu/tdp_mmu.c | 30 +++++++++++++++++++++++------- > 1 file changed, 23 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c > index 582d9a798899..a8354d8578f1 100644 > --- a/arch/x86/kvm/mmu/tdp_mmu.c > +++ b/arch/x86/kvm/mmu/tdp_mmu.c > @@ -157,13 +157,18 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, > if (kvm_mmu_page_as_id(_root) != _as_id) { \ > } else > > -static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, > - union kvm_mmu_page_role role) > +static struct kvm_mmu_page *alloc_tdp_mmu_page_from_caches(struct kvm_vcpu *vcpu) Hrm, this ends up being a rather poor name because the "from_kernel" variant also allocates from a cache, it's just a different cache: static struct kvm_mmu_page *alloc_tdp_mmu_page_from_kernel(gfp_t gfp) { struct kvm_mmu_page *sp; gfp |= __GFP_ZERO; sp = kmem_cache_alloc(mmu_page_header_cache, gfp); if (!sp) return NULL; ... } Given that the !vcpu path is the odd one, and the only user of the from_kernel variant is the split, maybe this? I.e. punt on naming until another user of the "split" variant comes along. static struct kvm_mmu_page *__alloc_tdp_mmu_page(struct kvm_vcpu *vcpu) and static struct kvm_mmu_page *__alloc_tdp_mmu_page_for_split(gfp_t gfp) > { > struct kvm_mmu_page *sp; > > sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); > sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); > + > + return sp; > +} > + > +static void init_tdp_mmu_page(struct kvm_mmu_page *sp, gfn_t gfn, union kvm_mmu_page_role role) Newline. I'm all in favor of running over when doing so improves readability, but that's not the case here. > +{ > set_page_private(virt_to_page(sp->spt), (unsigned long)sp); > > sp->role = role; > @@ -171,11 +176,9 @@ static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, > sp->tdp_mmu_page = true; > > trace_kvm_mmu_get_page(sp, true); > - > - return sp; > } > > -static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, struct tdp_iter *iter) > +static void init_child_tdp_mmu_page(struct kvm_mmu_page *child_sp, struct tdp_iter *iter) Newline. > { > struct kvm_mmu_page *parent_sp; > union kvm_mmu_page_role role; > @@ -185,7 +188,17 @@ static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, stru > role = parent_sp->role; > role.level--; > > - return alloc_tdp_mmu_page(vcpu, iter->gfn, role); > + init_tdp_mmu_page(child_sp, iter->gfn, role); > +} > + > +static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, struct tdp_iter *iter) Newline. > +{ > + struct kvm_mmu_page *child_sp; > + > + child_sp = alloc_tdp_mmu_page_from_caches(vcpu); > + init_child_tdp_mmu_page(child_sp, iter); > + > + return child_sp; > } > > hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) > @@ -210,7 +223,10 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) > goto out; > } > > - root = alloc_tdp_mmu_page(vcpu, 0, role); > + root = alloc_tdp_mmu_page_from_caches(vcpu); > + > + init_tdp_mmu_page(root, 0, role); > + > refcount_set(&root->tdp_mmu_root_count, 1); > > spin_lock(&kvm->arch.tdp_mmu_pages_lock); > -- > 2.34.1.173.g76aa8bc2d0-goog >
On Thu, Jan 6, 2022 at 12:59 PM Sean Christopherson <seanjc@google.com> wrote: > > On Mon, Dec 13, 2021, David Matlack wrote: > > Separate the allocation of child pages from the initialization. This is > > "from their initialization" so that it's not a dangling sentence. > > > in preparation for doing page splitting outside of the vCPU fault > > context which requires a different allocation mechanism. > > > > No functional changed intended. > > > > Signed-off-by: David Matlack <dmatlack@google.com> > > --- > > arch/x86/kvm/mmu/tdp_mmu.c | 30 +++++++++++++++++++++++------- > > 1 file changed, 23 insertions(+), 7 deletions(-) > > > > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c > > index 582d9a798899..a8354d8578f1 100644 > > --- a/arch/x86/kvm/mmu/tdp_mmu.c > > +++ b/arch/x86/kvm/mmu/tdp_mmu.c > > @@ -157,13 +157,18 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, > > if (kvm_mmu_page_as_id(_root) != _as_id) { \ > > } else > > > > -static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, > > - union kvm_mmu_page_role role) > > +static struct kvm_mmu_page *alloc_tdp_mmu_page_from_caches(struct kvm_vcpu *vcpu) > > Hrm, this ends up being a rather poor name because the "from_kernel" variant also > allocates from a cache, it's just a different cache: > > static struct kvm_mmu_page *alloc_tdp_mmu_page_from_kernel(gfp_t gfp) > { > struct kvm_mmu_page *sp; > > gfp |= __GFP_ZERO; > > sp = kmem_cache_alloc(mmu_page_header_cache, gfp); > if (!sp) > return NULL; > > ... > } > > Given that the !vcpu path is the odd one, and the only user of the from_kernel > variant is the split, maybe this? I.e. punt on naming until another user of the > "split" variant comes along. > > static struct kvm_mmu_page *__alloc_tdp_mmu_page(struct kvm_vcpu *vcpu) > > and > > static struct kvm_mmu_page *__alloc_tdp_mmu_page_for_split(gfp_t gfp) Will do. > > > { > > struct kvm_mmu_page *sp; > > > > sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); > > sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); > > + > > + return sp; > > +} > > + > > +static void init_tdp_mmu_page(struct kvm_mmu_page *sp, gfn_t gfn, union kvm_mmu_page_role role) > > Newline. I'm all in favor of running over when doing so improves readability, but > that's not the case here. Ah shoot. I had configured my editor to use a 100 char line limit for kernel code, but reading the kernel style guide more closely I see that 80 is still the preferred limit. I'll go back to preferring 80 and only go over when it explicitly makes the code more readable. > > > +{ > > set_page_private(virt_to_page(sp->spt), (unsigned long)sp); > > > > sp->role = role; > > @@ -171,11 +176,9 @@ static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, > > sp->tdp_mmu_page = true; > > > > trace_kvm_mmu_get_page(sp, true); > > - > > - return sp; > > } > > > > -static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, struct tdp_iter *iter) > > +static void init_child_tdp_mmu_page(struct kvm_mmu_page *child_sp, struct tdp_iter *iter) > > Newline. > > > { > > struct kvm_mmu_page *parent_sp; > > union kvm_mmu_page_role role; > > @@ -185,7 +188,17 @@ static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, stru > > role = parent_sp->role; > > role.level--; > > > > - return alloc_tdp_mmu_page(vcpu, iter->gfn, role); > > + init_tdp_mmu_page(child_sp, iter->gfn, role); > > +} > > + > > +static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, struct tdp_iter *iter) > > Newline. > > > +{ > > + struct kvm_mmu_page *child_sp; > > + > > + child_sp = alloc_tdp_mmu_page_from_caches(vcpu); > > + init_child_tdp_mmu_page(child_sp, iter); > > + > > + return child_sp; > > } > > > > hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) > > @@ -210,7 +223,10 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) > > goto out; > > } > > > > - root = alloc_tdp_mmu_page(vcpu, 0, role); > > + root = alloc_tdp_mmu_page_from_caches(vcpu); > > + > > + init_tdp_mmu_page(root, 0, role); > > + > > refcount_set(&root->tdp_mmu_root_count, 1); > > > > spin_lock(&kvm->arch.tdp_mmu_pages_lock); > > -- > > 2.34.1.173.g76aa8bc2d0-goog > >
On Thu, Jan 06, 2022, David Matlack wrote: > On Thu, Jan 6, 2022 at 12:59 PM Sean Christopherson <seanjc@google.com> wrote: > > Newline. I'm all in favor of running over when doing so improves readability, but > > that's not the case here. > > Ah shoot. I had configured my editor to use a 100 char line limit for > kernel code, but reading the kernel style guide more closely I see > that 80 is still the preferred limit. I'll go back to preferring 80 and > only go over when it explicitly makes the code more readable. Yeah, checkpatch was modified to warn at 100 chars so that people would stop interpreting 80 as a hard limit, e.g. wrapping due to being one character over, but 80 is still the soft limit.
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 582d9a798899..a8354d8578f1 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -157,13 +157,18 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, if (kvm_mmu_page_as_id(_root) != _as_id) { \ } else -static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, - union kvm_mmu_page_role role) +static struct kvm_mmu_page *alloc_tdp_mmu_page_from_caches(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); + + return sp; +} + +static void init_tdp_mmu_page(struct kvm_mmu_page *sp, gfn_t gfn, union kvm_mmu_page_role role) +{ set_page_private(virt_to_page(sp->spt), (unsigned long)sp); sp->role = role; @@ -171,11 +176,9 @@ static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, sp->tdp_mmu_page = true; trace_kvm_mmu_get_page(sp, true); - - return sp; } -static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, struct tdp_iter *iter) +static void init_child_tdp_mmu_page(struct kvm_mmu_page *child_sp, struct tdp_iter *iter) { struct kvm_mmu_page *parent_sp; union kvm_mmu_page_role role; @@ -185,7 +188,17 @@ static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, stru role = parent_sp->role; role.level--; - return alloc_tdp_mmu_page(vcpu, iter->gfn, role); + init_tdp_mmu_page(child_sp, iter->gfn, role); +} + +static struct kvm_mmu_page *alloc_child_tdp_mmu_page(struct kvm_vcpu *vcpu, struct tdp_iter *iter) +{ + struct kvm_mmu_page *child_sp; + + child_sp = alloc_tdp_mmu_page_from_caches(vcpu); + init_child_tdp_mmu_page(child_sp, iter); + + return child_sp; } hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) @@ -210,7 +223,10 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) goto out; } - root = alloc_tdp_mmu_page(vcpu, 0, role); + root = alloc_tdp_mmu_page_from_caches(vcpu); + + init_tdp_mmu_page(root, 0, role); + refcount_set(&root->tdp_mmu_root_count, 1); spin_lock(&kvm->arch.tdp_mmu_pages_lock);
Separate the allocation of child pages from the initialization. This is in preparation for doing page splitting outside of the vCPU fault context which requires a different allocation mechanism. No functional changed intended. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/tdp_mmu.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-)