@@ -473,9 +473,6 @@ struct kvm_vcpu_arch {
/* vcpu power state */
struct kvm_mp_state mp_state;
- /* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
-
/* Target CPU and feature flags */
int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
@@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
- vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+ vcpu->mmu_page_table_cache.gfp_zero = __GFP_ZERO;
/*
* Default value for the FP state, will be overloaded at load
@@ -375,7 +375,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use);
- kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_mmu_free_memory_cache(&vcpu->mmu_page_table_cache);
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
@@ -1196,7 +1196,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool device = false;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct kvm_mmu_memory_cache *memcache = &vcpu->mmu_page_table_cache;
struct vm_area_struct *vma;
short vma_shift;
gfn_t gfn;
@@ -344,9 +344,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- /* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
-
/* vcpu's vzguestid is different on each host cpu in an smp system */
u32 vzguestid[NR_CPUS];
@@ -27,7 +27,7 @@
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
- kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_mmu_free_memory_cache(&vcpu->mmu_page_table_cache);
}
/**
@@ -589,7 +589,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
pte_t *out_entry, pte_t *out_buddy)
{
struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct kvm_mmu_memory_cache *memcache = &vcpu->mmu_page_table_cache;
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
@@ -219,9 +219,6 @@ struct kvm_vcpu_arch {
/* SBI context */
struct kvm_sbi_context sbi_context;
- /* Cache pages needed to program page tables with spinlock held */
- struct kvm_mmu_memory_cache mmu_page_cache;
-
/* VCPU power-off state */
bool power_off;
@@ -625,7 +625,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
gfn_t gfn = gpa >> PAGE_SHIFT;
struct vm_area_struct *vma;
struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
+ struct kvm_mmu_memory_cache *pcache = &vcpu->mmu_page_table_cache;
bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;
@@ -160,7 +160,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
- vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+ vcpu->mmu_page_table_cache.gfp_zero = __GFP_ZERO;
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */
@@ -211,7 +211,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_timer_deinit(vcpu);
/* Free unused pages pre-allocated for G-stage page table mappings */
- kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_mmu_free_memory_cache(&vcpu->mmu_page_table_cache);
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -714,7 +714,6 @@ struct kvm_vcpu_arch {
struct kvm_mmu *walk_mmu;
struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
- struct kvm_mmu_memory_cache mmu_shadow_page_cache;
struct kvm_mmu_memory_cache mmu_shadowed_info_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;
@@ -664,7 +664,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
if (r)
return r;
- r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
+ r = kvm_mmu_topup_memory_cache(&vcpu->mmu_page_table_cache,
PT64_ROOT_MAX_LEVEL);
if (r)
return r;
@@ -681,7 +681,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
- kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
+ kvm_mmu_free_memory_cache(&vcpu->mmu_page_table_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}
@@ -2218,7 +2218,7 @@ static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
{
struct shadow_page_caches caches = {
.page_header_cache = &vcpu->arch.mmu_page_header_cache,
- .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
+ .shadow_page_cache = &vcpu->mmu_page_table_cache,
.shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
};
@@ -5920,7 +5920,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
- vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
+ vcpu->mmu_page_table_cache.gfp_zero = __GFP_ZERO;
vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
@@ -264,7 +264,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
struct kvm_mmu_page *sp;
sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
- sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+ sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->mmu_page_table_cache);
return sp;
}
@@ -387,6 +387,11 @@ struct kvm_vcpu {
*/
struct kvm_memory_slot *last_used_slot;
u64 last_used_slot_gen;
+
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+ /* Cache used to allocate pages for use as page tables. */
+ struct kvm_mmu_memory_cache mmu_page_table_cache;
+#endif
};
/*
Add a kvm_mmu_memory_cache to struct kvm_vcpu for allocating pages for page tables, replacing existing architecture-specific page table caches. Purposely do not make management of this cache architecture-neutral though to reduce churn and since not all architectures configure it the same way (MIPS does not set __GFP_ZERO.) This eliminates a dependency of the TDP MMU on an architecture-specific field, which will be used in a future commit to move the TDP MMU to common code. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/arm64/include/asm/kvm_host.h | 3 --- arch/arm64/kvm/arm.c | 4 ++-- arch/arm64/kvm/mmu.c | 2 +- arch/mips/include/asm/kvm_host.h | 3 --- arch/mips/kvm/mmu.c | 4 ++-- arch/riscv/include/asm/kvm_host.h | 3 --- arch/riscv/kvm/mmu.c | 2 +- arch/riscv/kvm/vcpu.c | 4 ++-- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 8 ++++---- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- include/linux/kvm_host.h | 5 +++++ 12 files changed, 18 insertions(+), 23 deletions(-)