@@ -92,3 +92,6 @@ config KVM_XFER_TO_GUEST_WORK
config HAVE_KVM_PM_NOTIFIER
bool
+
+config KVM_IOMMU
+ bool
@@ -81,6 +81,13 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_sync_state,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_alloc_domain,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_free_domain,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_attach_dev,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_detach_dev,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_map_pages,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_unmap_pages,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
@@ -2,6 +2,74 @@
#ifndef __ARM64_KVM_NVHE_IOMMU_H__
#define __ARM64_KVM_NVHE_IOMMU_H__
+#if IS_ENABLED(CONFIG_KVM_IOMMU)
+/* Hypercall handlers */
+int kvm_iommu_alloc_domain(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ unsigned long pgd_hva);
+int kvm_iommu_free_domain(pkvm_handle_t iommu_id, pkvm_handle_t domain_id);
+int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id);
+int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id);
+int kvm_iommu_map_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ unsigned long iova, phys_addr_t paddr, size_t pgsize,
+ size_t pgcount, int prot);
+int kvm_iommu_unmap_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ unsigned long iova, size_t pgsize, size_t pgcount);
+phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id, unsigned long iova);
+#else /* !CONFIG_KVM_IOMMU */
+static inline int kvm_iommu_alloc_domain(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id,
+ unsigned long pgd_hva)
+{
+ return -ENODEV;
+}
+
+static inline int kvm_iommu_free_domain(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id)
+{
+ return -ENODEV;
+}
+
+static inline int kvm_iommu_attach_dev(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id,
+ u32 endpoint_id)
+{
+ return -ENODEV;
+}
+
+static inline int kvm_iommu_detach_dev(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id,
+ u32 endpoint_id)
+{
+ return -ENODEV;
+}
+
+static inline int kvm_iommu_map_pages(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id,
+ unsigned long iova, phys_addr_t paddr,
+ size_t pgsize, size_t pgcount, int prot)
+{
+ return -ENODEV;
+}
+
+static inline int kvm_iommu_unmap_pages(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id,
+ unsigned long iova, size_t pgsize,
+ size_t pgcount)
+{
+ return 0;
+}
+
+static inline phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t iommu_id,
+ pkvm_handle_t domain_id,
+ unsigned long iova)
+{
+ return 0;
+}
+#endif /* CONFIG_KVM_IOMMU */
+
struct kvm_iommu_ops {
int (*init)(void);
};
@@ -1059,6 +1059,76 @@ static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
}
+static void handle___pkvm_host_iommu_alloc_domain(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_alloc_domain(iommu, domain, pgd_hva);
+}
+
+static void handle___pkvm_host_iommu_free_domain(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_free_domain(iommu, domain);
+}
+
+static void handle___pkvm_host_iommu_attach_dev(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_attach_dev(iommu, domain, endpoint);
+}
+
+static void handle___pkvm_host_iommu_detach_dev(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_detach_dev(iommu, domain, endpoint);
+}
+
+static void handle___pkvm_host_iommu_map_pages(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 3);
+ DECLARE_REG(phys_addr_t, paddr, host_ctxt, 4);
+ DECLARE_REG(size_t, pgsize, host_ctxt, 5);
+ DECLARE_REG(size_t, pgcount, host_ctxt, 6);
+ DECLARE_REG(unsigned int, prot, host_ctxt, 7);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_map_pages(iommu, domain, iova, paddr,
+ pgsize, pgcount, prot);
+}
+
+static void handle___pkvm_host_iommu_unmap_pages(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 3);
+ DECLARE_REG(size_t, pgsize, host_ctxt, 4);
+ DECLARE_REG(size_t, pgcount, host_ctxt, 5);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_unmap_pages(iommu, domain, iova,
+ pgsize, pgcount);
+}
+
+static void handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 3);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_iova_to_phys(iommu, domain, iova);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -1093,6 +1163,13 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
HANDLE_FUNC(__pkvm_vcpu_sync_state),
+ HANDLE_FUNC(__pkvm_host_iommu_alloc_domain),
+ HANDLE_FUNC(__pkvm_host_iommu_free_domain),
+ HANDLE_FUNC(__pkvm_host_iommu_attach_dev),
+ HANDLE_FUNC(__pkvm_host_iommu_detach_dev),
+ HANDLE_FUNC(__pkvm_host_iommu_map_pages),
+ HANDLE_FUNC(__pkvm_host_iommu_unmap_pages),
+ HANDLE_FUNC(__pkvm_host_iommu_iova_to_phys),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
The unprivileged host IOMMU driver forwards some of the IOMMU API calls to the hypervisor, which installs and populates the page tables. Note that this is not a stable ABI. Those hypercalls change with the kernel just like internal function calls. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- virt/kvm/Kconfig | 3 + arch/arm64/include/asm/kvm_asm.h | 7 +++ arch/arm64/kvm/hyp/include/nvhe/iommu.h | 68 ++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/hyp-main.c | 77 +++++++++++++++++++++++++ 4 files changed, 155 insertions(+)