@@ -135,4 +135,16 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
void tdp_iter_next(struct tdp_iter *iter);
void tdp_iter_restart(struct tdp_iter *iter);
+static inline union kvm_mmu_page_role tdp_iter_child_role(struct tdp_iter *iter)
+{
+ union kvm_mmu_page_role child_role;
+ struct kvm_mmu_page *parent_sp;
+
+ parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
+
+ child_role = parent_sp->role;
+ child_role.level--;
+ return child_role;
+}
+
#endif /* __KVM_X86_MMU_TDP_ITER_H */
@@ -184,24 +184,30 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
#define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \
__for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
-static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
+static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu,
+ union kvm_mmu_page_role role)
{
struct kvm_mmu_page *sp;
sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+ sp->role = role;
return sp;
}
static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
- gfn_t gfn, union kvm_mmu_page_role role)
+ gfn_t gfn)
{
INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
- sp->role = role;
+ /*
+ * role must be set before calling this function. At least role.level
+ * is not 0 (PG_LEVEL_NONE).
+ */
+ WARN_ON_ONCE(!sp->role.word);
sp->gfn = gfn;
sp->ptep = sptep;
sp->tdp_mmu_page = true;
@@ -209,20 +215,6 @@ static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
trace_kvm_mmu_get_page(sp, true);
}
-static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
- struct tdp_iter *iter)
-{
- struct kvm_mmu_page *parent_sp;
- union kvm_mmu_page_role role;
-
- parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
-
- role = parent_sp->role;
- role.level--;
-
- tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
-}
-
int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -260,8 +252,8 @@ int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
goto out_spin_unlock;
}
- root = tdp_mmu_alloc_sp(vcpu);
- tdp_mmu_init_sp(root, NULL, 0, role);
+ root = tdp_mmu_alloc_sp(vcpu, role);
+ tdp_mmu_init_sp(root, NULL, 0);
/*
* TDP MMU roots are kept until they are explicitly invalidated, either
@@ -1118,8 +1110,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* The SPTE is either non-present or points to a huge page that
* needs to be split.
*/
- sp = tdp_mmu_alloc_sp(vcpu);
- tdp_mmu_init_child_sp(sp, &iter);
+ sp = tdp_mmu_alloc_sp(vcpu, tdp_iter_child_role(&iter));
+ tdp_mmu_init_sp(sp, iter.sptep, iter.gfn);
sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
@@ -1362,7 +1354,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
return spte_set;
}
-static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
+static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp, union kvm_mmu_page_role role)
{
struct kvm_mmu_page *sp;
@@ -1372,6 +1364,7 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
if (!sp)
return NULL;
+ sp->role = role;
sp->spt = (void *)__get_free_page(gfp);
if (!sp->spt) {
kmem_cache_free(mmu_page_header_cache, sp);
@@ -1385,6 +1378,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
struct tdp_iter *iter,
bool shared)
{
+ union kvm_mmu_page_role role = tdp_iter_child_role(iter);
struct kvm_mmu_page *sp;
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
@@ -1398,7 +1392,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
* If this allocation fails we drop the lock and retry with reclaim
* allowed.
*/
- sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
+ sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT, role);
if (sp)
return sp;
@@ -1410,7 +1404,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
write_unlock(&kvm->mmu_lock);
iter->yielded = true;
- sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
+ sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT, role);
if (shared)
read_lock(&kvm->mmu_lock);
@@ -1505,7 +1499,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
continue;
}
- tdp_mmu_init_child_sp(sp, &iter);
+ tdp_mmu_init_sp(sp, iter.sptep, iter.gfn);
if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
goto retry;