@@ -1722,6 +1722,43 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
return sp;
}
+static inline gfp_t gfp_flags_for_split(bool locked)
+{
+ /*
+ * If under the MMU lock, use GFP_NOWAIT to avoid direct reclaim (which
+ * is slow) and to avoid making any filesystem callbacks (which can end
+ * up invoking KVM MMU notifiers, resulting in a deadlock).
+ */
+ return (locked ? GFP_NOWAIT : GFP_KERNEL) | __GFP_ACCOUNT;
+}
+
+/*
+ * Allocate a new shadow page, potentially while holding the MMU lock.
+ *
+ * Huge page splitting always uses direct shadow pages since the huge page is
+ * being mapped directly with a lower level page table. Thus there's no need to
+ * allocate the gfns array.
+ */
+struct kvm_mmu_page *kvm_mmu_alloc_direct_sp_for_split(bool locked)
+{
+ gfp_t gfp = gfp_flags_for_split(locked) | __GFP_ZERO;
+ struct kvm_mmu_page *sp;
+
+ sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
+ if (!sp)
+ return NULL;
+
+ sp->spt = (void *)__get_free_page(gfp);
+ if (!sp->spt) {
+ kmem_cache_free(mmu_page_header_cache, sp);
+ return NULL;
+ }
+
+ set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
+ return sp;
+}
+
static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
@@ -171,4 +171,6 @@ void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+struct kvm_mmu_page *kvm_mmu_alloc_direct_sp_for_split(bool locked);
+
#endif /* __KVM_X86_MMU_INTERNAL_H */
@@ -1418,43 +1418,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
return spte_set;
}
-static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
-{
- struct kvm_mmu_page *sp;
-
- gfp |= __GFP_ZERO;
-
- sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
- if (!sp)
- return NULL;
-
- sp->spt = (void *)__get_free_page(gfp);
- if (!sp->spt) {
- kmem_cache_free(mmu_page_header_cache, sp);
- return NULL;
- }
-
- set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
-
- return sp;
-}
-
static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
struct tdp_iter *iter,
bool shared)
{
struct kvm_mmu_page *sp;
- /*
- * Since we are allocating while under the MMU lock we have to be
- * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
- * reclaim and to avoid making any filesystem callbacks (which can end
- * up invoking KVM MMU notifiers, resulting in a deadlock).
- *
- * If this allocation fails we drop the lock and retry with reclaim
- * allowed.
- */
- sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
+ sp = kvm_mmu_alloc_direct_sp_for_split(true);
if (sp)
return sp;
@@ -1466,7 +1436,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
write_unlock(&kvm->mmu_lock);
iter->yielded = true;
- sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
+ sp = kvm_mmu_alloc_direct_sp_for_split(false);
if (shared)
read_lock(&kvm->mmu_lock);