@@ -28,6 +28,8 @@ hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
hyp-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
hyp-obj-y += $(lib-objs)
+hyp-obj-$(CONFIG_KVM_IOMMU) += iommu/iommu.o
+
##
## Build rules for compiling nVHE hyp code
## Output of this folder is `kvm_nvhe.o`, a partially linked object
@@ -3,6 +3,10 @@
#define __ARM64_KVM_NVHE_IOMMU_H__
#if IS_ENABLED(CONFIG_KVM_IOMMU)
+int kvm_iommu_init(void);
+void *kvm_iommu_donate_page(void);
+void kvm_iommu_reclaim_page(void *p);
+
/* Hypercall handlers */
int kvm_iommu_alloc_domain(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
unsigned long pgd_hva);
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_IOMMU_H
+#define __KVM_IOMMU_H
+
+#include <asm/kvm_host.h>
+
+struct kvm_hyp_iommu_memcache {
+ struct kvm_hyp_memcache pages;
+ bool needs_page;
+} ____cacheline_aligned_in_smp;
+
+extern struct kvm_hyp_iommu_memcache *kvm_nvhe_sym(kvm_hyp_iommu_memcaches);
+#define kvm_hyp_iommu_memcaches kvm_nvhe_sym(kvm_hyp_iommu_memcaches)
+
+#endif /* __KVM_IOMMU_H */
new file mode 100644
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOMMU operations for pKVM
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ */
+
+#include <asm/kvm_hyp.h>
+#include <kvm/iommu.h>
+#include <nvhe/iommu.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/mm.h>
+
+struct kvm_hyp_iommu_memcache __ro_after_init *kvm_hyp_iommu_memcaches;
+
+void *kvm_iommu_donate_page(void)
+{
+ void *p;
+ int cpu = hyp_smp_processor_id();
+ struct kvm_hyp_memcache tmp = kvm_hyp_iommu_memcaches[cpu].pages;
+
+ if (!tmp.nr_pages) {
+ kvm_hyp_iommu_memcaches[cpu].needs_page = true;
+ return NULL;
+ }
+
+ p = pkvm_admit_host_page(&tmp);
+ if (!p)
+ return NULL;
+
+ kvm_hyp_iommu_memcaches[cpu].pages = tmp;
+ memset(p, 0, PAGE_SIZE);
+ return p;
+}
+
+void kvm_iommu_reclaim_page(void *p)
+{
+ int cpu = hyp_smp_processor_id();
+
+ pkvm_teardown_donated_memory(&kvm_hyp_iommu_memcaches[cpu].pages, p,
+ PAGE_SIZE);
+}
+
+int kvm_iommu_init(void)
+{
+ enum kvm_pgtable_prot prot;
+
+ /* The memcache is shared with the host */
+ prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_OWNED);
+ return pkvm_create_mappings(kvm_hyp_iommu_memcaches,
+ kvm_hyp_iommu_memcaches + NR_CPUS, prot);
+}
The hyp driver will need to allocate pages when handling some hypercalls, to populate page, stream and domain tables. Add a per-cpu page queue that will contain host pages to be donated and reclaimed. When the driver needs a new page, it sets the needs_page bit and returns to the host with an error. The host pushes a page and retries the hypercall. The queue is per-cpu to ensure that IOMMU map()/unmap() requests from different CPUs don't step on each others. It is populated on demand rather than upfront to avoid wasting memory, as these allocations should be relatively rare. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- arch/arm64/kvm/hyp/nvhe/Makefile | 2 + arch/arm64/kvm/hyp/include/nvhe/iommu.h | 4 ++ include/kvm/iommu.h | 15 +++++++ arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 52 +++++++++++++++++++++++++ 4 files changed, 73 insertions(+) create mode 100644 include/kvm/iommu.h create mode 100644 arch/arm64/kvm/hyp/nvhe/iommu/iommu.c