@@ -215,13 +215,13 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
return regs;
}
-static inline bool kvm_available_flush_tlb_with_range(void)
+inline bool kvm_available_flush_tlb_with_range(void)
{
return kvm_x86_ops.tlb_remote_flush_with_range;
}
-static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
- struct kvm_tlb_range *range)
+void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
{
int ret = -ENOTSUPP;
@@ -695,8 +695,8 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
return sp->role.access;
}
-static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
- gfn_t gfn, unsigned int access)
+void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
+ gfn_t gfn, unsigned int access)
{
if (sp_has_gptes(sp)) {
sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
@@ -1217,41 +1217,6 @@ static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
return false;
}
-#define RMAP_RECYCLE_THRESHOLD 1000
-
-static void __rmap_add(struct kvm *kvm,
- struct kvm_mmu_memory_cache *cache,
- const struct kvm_memory_slot *slot,
- u64 *spte, gfn_t gfn, unsigned int access)
-{
- struct kvm_mmu_page *sp;
- struct kvm_rmap_head *rmap_head;
- int rmap_count;
-
- sp = sptep_to_sp(spte);
- kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
- kvm_update_page_stats(kvm, sp->role.level, 1);
-
- rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
- rmap_count = pte_list_add(cache, spte, rmap_head);
-
- if (rmap_count > kvm->stat.max_mmu_rmap_size)
- kvm->stat.max_mmu_rmap_size = rmap_count;
- if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
- kvm_zap_all_rmap_sptes(kvm, rmap_head);
- kvm_flush_remote_tlbs_with_address(
- kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
- }
-}
-
-static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
- u64 *spte, gfn_t gfn, unsigned int access)
-{
- struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
-
- __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
-}
-
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
@@ -320,4 +320,10 @@ void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep);
+void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
+ gfn_t gfn, unsigned int access);
+
+inline bool kvm_available_flush_tlb_with_range(void);
+void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range);
#endif /* __KVM_X86_MMU_INTERNAL_H */
@@ -292,7 +292,8 @@ void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
}
/* Return true if at least one SPTE was zapped, false otherwise */
-bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc, *next;
int i;
@@ -331,3 +332,37 @@ bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
{
return __kvm_zap_rmap(kvm, rmap_head, slot);
}
+
+#define RMAP_RECYCLE_THRESHOLD 1000
+
+void __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn,
+ unsigned int access)
+{
+ struct kvm_mmu_page *sp;
+ struct kvm_rmap_head *rmap_head;
+ int rmap_count;
+
+ sp = sptep_to_sp(spte);
+ kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
+ kvm_update_page_stats(kvm, sp->role.level, 1);
+
+ rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
+ rmap_count = pte_list_add(cache, spte, rmap_head);
+
+ if (rmap_count > kvm->stat.max_mmu_rmap_size)
+ kvm->stat.max_mmu_rmap_size = rmap_count;
+ if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
+ kvm_zap_all_rmap_sptes(kvm, rmap_head);
+ kvm_flush_remote_tlbs_with_address(
+ kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
+ }
+}
+
+void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
+ u64 *spte, gfn_t gfn, unsigned int access)
+{
+ struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
+
+ __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
+}
@@ -91,10 +91,16 @@ typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
u64 *sptep);
-bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head);
bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot);
bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
pte_t unused);
+
+void __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn,
+ unsigned int access);
+void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
+ u64 *spte, gfn_t gfn, unsigned int access);
+
#endif /* __KVM_X86_MMU_RMAP_H */
Move rmap_add() to rmap.c to complete the migration of the various rmap operations out of mmu.c. No functional change intended. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 45 ++++----------------------------- arch/x86/kvm/mmu/mmu_internal.h | 6 +++++ arch/x86/kvm/mmu/rmap.c | 37 ++++++++++++++++++++++++++- arch/x86/kvm/mmu/rmap.h | 8 +++++- 4 files changed, 54 insertions(+), 42 deletions(-)