@@ -135,4 +135,16 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
void tdp_iter_next(struct tdp_iter *iter);
void tdp_iter_restart(struct tdp_iter *iter);
+static inline union kvm_mmu_page_role tdp_iter_child_role(struct tdp_iter *iter)
+{
+ union kvm_mmu_page_role child_role;
+ struct kvm_mmu_page *parent_sp;
+
+ parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
+
+ child_role = parent_sp->role;
+ child_role.level--;
+ return child_role;
+}
+
#endif /* __KVM_X86_MMU_TDP_ITER_H */
@@ -177,24 +177,30 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
kvm_mmu_page_as_id(_root) != _as_id) { \
} else
-static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
+static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu,
+ union kvm_mmu_page_role role)
{
struct kvm_mmu_page *sp;
sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+ sp->role = role;
return sp;
}
static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
- gfn_t gfn, union kvm_mmu_page_role role)
+ gfn_t gfn)
{
INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
- sp->role = role;
+ /*
+ * role must be set before calling this function. At least role.level
+ * is not 0 (PG_LEVEL_NONE).
+ */
+ WARN_ON_ONCE(!sp->role.word);
sp->gfn = gfn;
sp->ptep = sptep;
sp->tdp_mmu_page = true;
@@ -202,20 +208,6 @@ static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
trace_kvm_mmu_get_page(sp, true);
}
-static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
- struct tdp_iter *iter)
-{
- struct kvm_mmu_page *parent_sp;
- union kvm_mmu_page_role role;
-
- parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
-
- role = parent_sp->role;
- role.level--;
-
- tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
-}
-
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
{
union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
@@ -234,8 +226,8 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
goto out;
}
- root = tdp_mmu_alloc_sp(vcpu);
- tdp_mmu_init_sp(root, NULL, 0, role);
+ root = tdp_mmu_alloc_sp(vcpu, role);
+ tdp_mmu_init_sp(root, NULL, 0);
/*
* TDP MMU roots are kept until they are explicitly invalidated, either
@@ -1068,8 +1060,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* The SPTE is either non-present or points to a huge page that
* needs to be split.
*/
- sp = tdp_mmu_alloc_sp(vcpu);
- tdp_mmu_init_child_sp(sp, &iter);
+ sp = tdp_mmu_alloc_sp(vcpu, tdp_iter_child_role(&iter));
+ tdp_mmu_init_sp(sp, iter.sptep, iter.gfn);
sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
@@ -1312,7 +1304,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
return spte_set;
}
-static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
+static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp, union kvm_mmu_page_role role)
{
struct kvm_mmu_page *sp;
@@ -1322,6 +1314,7 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
if (!sp)
return NULL;
+ sp->role = role;
sp->spt = (void *)__get_free_page(gfp);
if (!sp->spt) {
kmem_cache_free(mmu_page_header_cache, sp);
@@ -1335,6 +1328,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
struct tdp_iter *iter,
bool shared)
{
+ union kvm_mmu_page_role role = tdp_iter_child_role(iter);
struct kvm_mmu_page *sp;
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
@@ -1348,7 +1342,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
* If this allocation fails we drop the lock and retry with reclaim
* allowed.
*/
- sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
+ sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT, role);
if (sp)
return sp;
@@ -1360,7 +1354,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
write_unlock(&kvm->mmu_lock);
iter->yielded = true;
- sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
+ sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT, role);
if (shared)
read_lock(&kvm->mmu_lock);
@@ -1455,7 +1449,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
continue;
}
- tdp_mmu_init_child_sp(sp, &iter);
+ tdp_mmu_init_sp(sp, iter.sptep, iter.gfn);
if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
goto retry;