@@ -274,6 +274,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+ set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
return sp;
}
@@ -281,8 +282,6 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
gfn_t gfn, union kvm_mmu_page_role role)
{
- set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
-
sp->role = role;
sp->gfn = gfn;
sp->ptep = sptep;
@@ -1435,6 +1434,8 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
return NULL;
}
+ set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
return sp;
}
Link the shadow page table to the sp (via set_page_private()) during allocation rather than initialization. This is a more logical place to do it because allocation time is also where we do the reverse link (setting sp->spt). This creates one extra call to set_page_private(), but having multiple calls to set_page_private() is unavoidable anyway. We either do set_page_private() during allocation, which requires 1 per allocation function, or we do it during initialization, which requires 1 per initialization function. No functional change intended. Suggested-by: Ben Gardon <bgardon@google.com> Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/tdp_mmu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)