@@ -1033,6 +1033,14 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
return true;
}
+
+static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
+ struct tdp_iter *iter,
+ bool shared);
+
+static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
+ struct kvm_mmu_page *sp, bool shared);
+
/*
* If can_yield is true, will release the MMU lock and reschedule if the
* scheduler needs the CPU or there is contention on the MMU lock. If this
@@ -1075,6 +1083,24 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level))
continue;
+ if (kvm_gfn_shared_mask(kvm) && is_large_pte(iter.old_spte)) {
+ gfn_t gfn = iter.gfn & ~kvm_gfn_shared_mask(kvm);
+ gfn_t mask = KVM_PAGES_PER_HPAGE(iter.level) - 1;
+ struct kvm_memory_slot *slot;
+ struct kvm_mmu_page *sp;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (kvm_mem_attr_is_mixed(slot, gfn, iter.level) ||
+ (gfn & mask) < start ||
+ end < (gfn & mask) + KVM_PAGES_PER_HPAGE(iter.level)) {
+ sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, false);
+ WARN_ON(!sp);
+
+ tdp_mmu_split_huge_page(kvm, &iter, sp, false);
+ continue;
+ }
+ }
+
tdp_mmu_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
flush = true;
}
@@ -1642,8 +1668,6 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
WARN_ON(kvm_mmu_page_role_is_private(role) !=
is_private_sptep(iter->sptep));
- /* TODO: Large page isn't supported for private SPTE yet. */
- WARN_ON(kvm_mmu_page_role_is_private(role));
/*
* Since we are allocating while under the MMU lock we have to be