diff mbox series

[RFC,13/17] KVM: arm64: Setup cache for stage2 page headers

Message ID 20220415215901.1737897-14-oupton@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Parallelize stage 2 fault handling | expand

Commit Message

Oliver Upton April 15, 2022, 9:58 p.m. UTC
In order to punt the last reference drop on a page to an RCU
synchronization we need to get a pointer to the page to handle the
callback.

Set up a memcache for stage2 page headers, but do nothing with it for
now. Note that the kmem_cache is never destoyed as it is currently not
possible to build KVM/arm64 as a module.

Signed-off-by: Oliver Upton <oupton@google.com>
---
 arch/arm64/include/asm/kvm_host.h |  1 +
 arch/arm64/kvm/mmu.c              | 20 ++++++++++++++++++++
 2 files changed, 21 insertions(+)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c8947597a619..a640d015790e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -374,6 +374,7 @@  struct kvm_vcpu_arch {
 	/* Cache some mmu pages needed inside spinlock regions */
 	struct kvm_mmu_caches {
 		struct kvm_mmu_memory_cache page_cache;
+		struct kvm_mmu_memory_cache header_cache;
 	} mmu_caches;
 
 	/* Target CPU and feature flags */
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7a588928740a..cc6ed6b06ec2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -31,6 +31,12 @@  static phys_addr_t hyp_idmap_vector;
 
 static unsigned long io_map_base;
 
+static struct kmem_cache *stage2_page_header_cache;
+
+struct stage2_page_header {
+	struct rcu_head rcu_head;
+	struct page *page;
+};
 
 /*
  * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
@@ -1164,6 +1170,11 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 						 kvm_mmu_cache_min_pages(kvm));
 		if (ret)
 			return ret;
+
+		ret = kvm_mmu_topup_memory_cache(&mmu_caches->header_cache,
+						 kvm_mmu_cache_min_pages(kvm));
+		if (ret)
+			return ret;
 	}
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
@@ -1589,6 +1600,13 @@  int kvm_mmu_init(u32 *hyp_va_bits)
 	if (err)
 		goto out_destroy_pgtable;
 
+	stage2_page_header_cache = kmem_cache_create("stage2_page_header",
+						     sizeof(struct stage2_page_header),
+						     0, SLAB_ACCOUNT, NULL);
+
+	if (!stage2_page_header_cache)
+		goto out_destroy_pgtable;
+
 	io_map_base = hyp_idmap_start;
 	return 0;
 
@@ -1604,11 +1622,13 @@  int kvm_mmu_init(u32 *hyp_va_bits)
 void kvm_mmu_vcpu_init(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.mmu_caches.page_cache.gfp_zero = __GFP_ZERO;
+	vcpu->arch.mmu_caches.header_cache.kmem_cache = stage2_page_header_cache;
 }
 
 void kvm_mmu_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_caches.page_cache);
+	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_caches.header_cache);
 }
 
 void kvm_arch_commit_memory_region(struct kvm *kvm,