@@ -2,6 +2,10 @@
#ifndef _ASM_X86_KVM_TYPES_H
#define _ASM_X86_KVM_TYPES_H
-#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+/*
+ * For each fault only PT64_ROOT_MAX_LEVEL - 1 pages are needed. Root
+ * page is allocated in a separate flow.
+ */
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE (PT64_ROOT_MAX_LEVEL - 1)
#endif /* _ASM_X86_KVM_TYPES_H */
@@ -713,11 +713,11 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
if (kvm_numa_aware_page_table_enabled(vcpu->kvm)) {
for_each_online_node(nid) {
r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadow_page_cache[nid],
- PT64_ROOT_MAX_LEVEL);
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
}
} else {
r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadow_page_cache[nid],
- PT64_ROOT_MAX_LEVEL);
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
}
if (r)
@@ -725,12 +725,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
if (maybe_indirect) {
r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
- PT64_ROOT_MAX_LEVEL);
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
if (r)
return r;
}
return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
- PT64_ROOT_MAX_LEVEL);
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE);
}
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
Reduce KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE to PT64_ROOT_MAX_LEVEL - 1. Opportunistically, use this reduced value for topping up caches. There was no specific reason to set this value to 40. With addition of multi NUMA node caches, it is good to save space and make these cachees lean. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- arch/x86/include/asm/kvm_types.h | 6 +++++- arch/x86/kvm/mmu/mmu.c | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-)