diff mbox series

[RFC,11/45] KVM: arm64: pkvm: Expose pkvm_admit_host_page()

Message ID 20230201125328.2186498-12-jean-philippe@linaro.org (mailing list archive)
State New, archived
Headers show
Series KVM: Arm SMMUv3 driver for pKVM | expand

Commit Message

Jean-Philippe Brucker Feb. 1, 2023, 12:52 p.m. UTC
Since the IOMMU driver will need admit_host_page(), make it non static.
As a result we can drop refill_memcache() and call admit_host_page()
directly from pkvm_refill_memcache().

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  2 --
 arch/arm64/kvm/hyp/include/nvhe/mm.h          |  1 +
 arch/arm64/kvm/hyp/nvhe/hyp-main.c            | 16 ++++++++---
 arch/arm64/kvm/hyp/nvhe/mm.c                  | 27 +++++--------------
 4 files changed, 20 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 40decbe4cc70..d4f4ffbb7dbb 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -83,8 +83,6 @@  void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 int hyp_pin_shared_mem(void *from, void *to);
 void hyp_unpin_shared_mem(void *from, void *to);
 void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
-int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
-		    struct kvm_hyp_memcache *host_mc);
 
 void *pkvm_map_donated_memory(unsigned long host_va, size_t size);
 void pkvm_unmap_donated_memory(void *va, size_t size);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index 84db840f2057..a8c46a0ebc4a 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -26,6 +26,7 @@  int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot
 int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
 				  enum kvm_pgtable_prot prot,
 				  unsigned long *haddr);
+void *pkvm_admit_host_page(struct kvm_hyp_memcache *mc);
 int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
 int pkvm_create_hyp_device_mapping(u64 base, u64 size, void __iomem *haddr);
 
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index e8328f54200e..29ce7b09edbb 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -766,14 +766,24 @@  static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
 	cpu_reg(host_ctxt, 1) =  ret;
 }
 
+static void *admit_host_page(void *arg)
+{
+	return pkvm_admit_host_page(arg);
+}
+
 static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
 {
+	int ret;
 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
 	u64 nr_pages = VTCR_EL2_LVLS(hyp_vm->kvm.arch.vtcr) - 1;
-	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+	struct kvm_hyp_memcache host_mc = hyp_vcpu->host_vcpu->arch.pkvm_memcache;
+
+	ret =  __topup_hyp_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache,
+				    nr_pages, admit_host_page,
+				    hyp_virt_to_phys, &host_mc);
 
-	return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache, nr_pages,
-			       &host_vcpu->arch.pkvm_memcache);
+	hyp_vcpu->host_vcpu->arch.pkvm_memcache = host_mc;
+	return ret;
 }
 
 static void handle___pkvm_host_map_guest(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 318298eb3d6b..9daaf2b2b191 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -340,35 +340,20 @@  int hyp_create_idmap(u32 hyp_va_bits)
 	return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
 }
 
-static void *admit_host_page(void *arg)
+void *pkvm_admit_host_page(struct kvm_hyp_memcache *mc)
 {
-	struct kvm_hyp_memcache *host_mc = arg;
-
-	if (!host_mc->nr_pages)
+	if (!mc->nr_pages)
 		return NULL;
 
 	/*
 	 * The host still owns the pages in its memcache, so we need to go
 	 * through a full host-to-hyp donation cycle to change it. Fortunately,
 	 * __pkvm_host_donate_hyp() takes care of races for us, so if it
-	 * succeeds we're good to go.
+	 * succeeds we're good to go. Because mc is a copy of the memcache
+	 * struct, the host cannot modify mc->head between donate and pop.
 	 */
-	if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
+	if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(mc->head), 1))
 		return NULL;
 
-	return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
-}
-
-/* Refill our local memcache by poping pages from the one provided by the host. */
-int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
-		    struct kvm_hyp_memcache *host_mc)
-{
-	struct kvm_hyp_memcache tmp = *host_mc;
-	int ret;
-
-	ret =  __topup_hyp_memcache(mc, min_pages, admit_host_page,
-				    hyp_virt_to_phys, &tmp);
-	*host_mc = tmp;
-
-	return ret;
+	return pop_hyp_memcache(mc, hyp_phys_to_virt);
 }