@@ -2902,59 +2902,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
}
}
-static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
-{
- return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
-}
-
-bool need_topup_split_caches_or_resched(struct kvm *kvm)
-{
- if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
- return true;
-
- /*
- * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
- * to split a single huge page. Calculating how many are actually needed
- * is possible but not worth the complexity.
- */
- return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
- need_topup(&kvm->arch.split_page_header_cache, 1) ||
- need_topup(&kvm->arch.split_shadow_page_cache, 1);
-}
-
-int topup_split_caches(struct kvm *kvm)
-{
- /*
- * Allocating rmap list entries when splitting huge pages for nested
- * MMUs is uncommon as KVM needs to use a list if and only if there is
- * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
- * aliased by multiple L2 gfns and/or from multiple nested roots with
- * different roles. Aliasing gfns when using TDP is atypical for VMMs;
- * a few gfns are often aliased during boot, e.g. when remapping BIOS,
- * but aliasing rarely occurs post-boot or for many gfns. If there is
- * only one rmap entry, rmap->val points directly at that one entry and
- * doesn't need to allocate a list. Buffer the cache by the default
- * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
- * encounters an aliased gfn or two.
- */
- const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
- KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
- int r;
-
- lockdep_assert_held(&kvm->slots_lock);
-
- r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
- SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
- if (r)
- return r;
-
- r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
- if (r)
- return r;
-
- return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
-}
-
/* Must be called with the mmu_lock held in write-mode. */
void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
@@ -348,8 +348,6 @@ void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu);
void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu);
int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect);
-bool need_topup_split_caches_or_resched(struct kvm *kvm);
-int topup_split_caches(struct kvm *kvm);
bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
@@ -3219,6 +3219,59 @@ bool slot_rmap_write_protect(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
return rmap_write_protect(rmap_head, false);
}
+static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
+{
+ return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
+}
+
+static bool need_topup_split_caches_or_resched(struct kvm *kvm)
+{
+ if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
+ return true;
+
+ /*
+ * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
+ * to split a single huge page. Calculating how many are actually needed
+ * is possible but not worth the complexity.
+ */
+ return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
+ need_topup(&kvm->arch.split_page_header_cache, 1) ||
+ need_topup(&kvm->arch.split_shadow_page_cache, 1);
+}
+
+static int topup_split_caches(struct kvm *kvm)
+{
+ /*
+ * Allocating rmap list entries when splitting huge pages for nested
+ * MMUs is uncommon as KVM needs to use a list if and only if there is
+ * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
+ * aliased by multiple L2 gfns and/or from multiple nested roots with
+ * different roles. Aliasing gfns when using TDP is atypical for VMMs;
+ * a few gfns are often aliased during boot, e.g. when remapping BIOS,
+ * but aliasing rarely occurs post-boot or for many gfns. If there is
+ * only one rmap entry, rmap->val points directly at that one entry and
+ * doesn't need to allocate a list. Buffer the cache by the default
+ * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
+ * encounters an aliased gfn or two.
+ */
+ const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
+ int r;
+
+ lockdep_assert_held(&kvm->slots_lock);
+
+ r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
+ SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
+ if (r)
+ return r;
+
+ r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
+ if (r)
+ return r;
+
+ return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
+}
+
static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
{
struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
The split cache topup functions are only used by the Shadow MMU and were left behind in mmu.c when splitting the Shadow MMU out to a separate file. Move them over as well. No functional change intended. Suggested-by: David Matlack <dmatlack@google.com> Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 53 --------------------------------- arch/x86/kvm/mmu/mmu_internal.h | 2 -- arch/x86/kvm/mmu/shadow_mmu.c | 53 +++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 55 deletions(-)