diff mbox series

[v4,18/18] KVM: x86/mmu: Reduce default mmu memory cache size

Message ID 20230306224127.1689967-19-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series NUMA aware page table allocation | expand

Commit Message

Vipin Sharma March 6, 2023, 10:41 p.m. UTC
Reduce KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE to PT64_ROOT_MAX_LEVEL - 1.
Opportunistically, use this reduced value for topping up caches.

There was no specific reason to set this value to 40. With addition of
multi NUMA node caches, it is good to save space and make these cachees
lean.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/x86/include/asm/kvm_types.h | 6 +++++-
 arch/x86/kvm/mmu/mmu.c           | 8 ++++----
 2 files changed, 9 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
index 08f1b57d3b62..80aff231b708 100644
--- a/arch/x86/include/asm/kvm_types.h
+++ b/arch/x86/include/asm/kvm_types.h
@@ -2,6 +2,10 @@ 
 #ifndef _ASM_X86_KVM_TYPES_H
 #define _ASM_X86_KVM_TYPES_H
 
-#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+/*
+ * For each fault only PT64_ROOT_MAX_LEVEL - 1 pages are needed. Root
+ * page is allocated in a separate flow.
+ */
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE (PT64_ROOT_MAX_LEVEL - 1)
 
 #endif /* _ASM_X86_KVM_TYPES_H */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6d44a4e08328..5463ce6e52fa 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -713,11 +713,11 @@  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 	if (kvm_numa_aware_page_table_enabled(vcpu->kvm)) {
 		for_each_online_node(nid) {
 			r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadow_page_cache[nid],
-						      PT64_ROOT_MAX_LEVEL);
+						      KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
 		}
 	} else {
 		r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadow_page_cache[nid],
-					      PT64_ROOT_MAX_LEVEL);
+					      KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
 	}
 
 	if (r)
@@ -725,12 +725,12 @@  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 
 	if (maybe_indirect) {
 		r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
-					      PT64_ROOT_MAX_LEVEL);
+					      KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
 		if (r)
 			return r;
 	}
 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
-					  PT64_ROOT_MAX_LEVEL);
+					  KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
 }
 
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)