@@ -83,8 +83,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
int hyp_pin_shared_mem(void *from, void *to);
void hyp_unpin_shared_mem(void *from, void *to);
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
-int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
- struct kvm_hyp_memcache *host_mc);
void *pkvm_map_donated_memory(unsigned long host_va, size_t size);
void pkvm_unmap_donated_memory(void *va, size_t size);
@@ -26,6 +26,7 @@ int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
enum kvm_pgtable_prot prot,
unsigned long *haddr);
+void *pkvm_admit_host_page(struct kvm_hyp_memcache *mc);
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
int pkvm_create_hyp_device_mapping(u64 base, u64 size, void __iomem *haddr);
@@ -766,14 +766,24 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = ret;
}
+static void *admit_host_page(void *arg)
+{
+ return pkvm_admit_host_page(arg);
+}
+
static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
{
+ int ret;
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
u64 nr_pages = VTCR_EL2_LVLS(hyp_vm->kvm.arch.vtcr) - 1;
- struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+ struct kvm_hyp_memcache host_mc = hyp_vcpu->host_vcpu->arch.pkvm_memcache;
+
+ ret = __topup_hyp_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache,
+ nr_pages, admit_host_page,
+ hyp_virt_to_phys, &host_mc);
- return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache, nr_pages,
- &host_vcpu->arch.pkvm_memcache);
+ hyp_vcpu->host_vcpu->arch.pkvm_memcache = host_mc;
+ return ret;
}
static void handle___pkvm_host_map_guest(struct kvm_cpu_context *host_ctxt)
@@ -340,35 +340,20 @@ int hyp_create_idmap(u32 hyp_va_bits)
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
}
-static void *admit_host_page(void *arg)
+void *pkvm_admit_host_page(struct kvm_hyp_memcache *mc)
{
- struct kvm_hyp_memcache *host_mc = arg;
-
- if (!host_mc->nr_pages)
+ if (!mc->nr_pages)
return NULL;
/*
* The host still owns the pages in its memcache, so we need to go
* through a full host-to-hyp donation cycle to change it. Fortunately,
* __pkvm_host_donate_hyp() takes care of races for us, so if it
- * succeeds we're good to go.
+ * succeeds we're good to go. Because mc is a copy of the memcache
+ * struct, the host cannot modify mc->head between donate and pop.
*/
- if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
+ if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(mc->head), 1))
return NULL;
- return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
-}
-
-/* Refill our local memcache by poping pages from the one provided by the host. */
-int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
- struct kvm_hyp_memcache *host_mc)
-{
- struct kvm_hyp_memcache tmp = *host_mc;
- int ret;
-
- ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
- hyp_virt_to_phys, &tmp);
- *host_mc = tmp;
-
- return ret;
+ return pop_hyp_memcache(mc, hyp_phys_to_virt);
}
Since the IOMMU driver will need admit_host_page(), make it non static. As a result we can drop refill_memcache() and call admit_host_page() directly from pkvm_refill_memcache(). Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 -- arch/arm64/kvm/hyp/include/nvhe/mm.h | 1 + arch/arm64/kvm/hyp/nvhe/hyp-main.c | 16 ++++++++--- arch/arm64/kvm/hyp/nvhe/mm.c | 27 +++++-------------- 4 files changed, 20 insertions(+), 26 deletions(-)