@@ -1628,4 +1628,15 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
unsigned long __pkvm_reclaim_hyp_alloc_mgt(unsigned long nr_pages);
+struct kvm_iommu_driver {
+ int (*init_driver)(void);
+ void (*remove_driver)(void);
+};
+
+struct kvm_iommu_ops;
+int kvm_iommu_register_driver(struct kvm_iommu_driver *kern_ops,
+ struct kvm_iommu_ops *el2_ops);
+int kvm_iommu_init_driver(void);
+void kvm_iommu_remove_driver(void);
+
#endif /* __ARM64_KVM_HOST_H__ */
@@ -23,7 +23,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
vgic/vgic-v3.o vgic/vgic-v4.o \
vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
- vgic/vgic-its.o vgic/vgic-debug.o
+ vgic/vgic-its.o vgic/vgic-debug.o iommu.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
@@ -2510,9 +2510,15 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
if (ret)
return ret;
+ ret = kvm_iommu_init_driver();
+ if (ret < 0)
+ return ret;
+
ret = do_pkvm_init(hyp_va_bits);
- if (ret)
+ if (ret) {
+ kvm_iommu_remove_driver();
return ret;
+ }
free_hyp_pgds();
new file mode 100644
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARM64_KVM_NVHE_IOMMU_H__
+#define __ARM64_KVM_NVHE_IOMMU_H__
+
+#include <asm/kvm_host.h>
+
+struct kvm_iommu_ops {
+ int (*init)(void);
+};
+
+int kvm_iommu_init(void);
+
+#endif /* __ARM64_KVM_NVHE_IOMMU_H__ */
@@ -8,7 +8,7 @@ CFLAGS_switch.nvhe.o += -Wno-override-init
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o alloc.o early_alloc.o page_alloc.o \
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o \
- serial.o alloc_mgt.o
+ serial.o alloc_mgt.o iommu/iommu.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
new file mode 100644
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOMMU operations for pKVM
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ */
+#include <nvhe/iommu.h>
+
+/* Only one set of ops supported, similary to the kernel */
+struct kvm_iommu_ops *kvm_iommu_ops;
+
+int kvm_iommu_init(void)
+{
+ if (!kvm_iommu_ops || !kvm_iommu_ops->init)
+ return -ENODEV;
+
+ return kvm_iommu_ops->init();
+}
@@ -14,6 +14,7 @@
#include <nvhe/early_alloc.h>
#include <nvhe/ffa.h>
#include <nvhe/gfp.h>
+#include <nvhe/iommu.h>
#include <nvhe/memory.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
@@ -360,6 +361,10 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
+ ret = kvm_iommu_init();
+ if (ret)
+ goto out;
+
ret = fix_host_ownership();
if (ret)
goto out;
new file mode 100644
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Google LLC
+ * Author: Mostafa Saleh <smostafa@google.com>
+ */
+
+#include <asm/kvm_mmu.h>
+#include <linux/kvm_host.h>
+
+struct kvm_iommu_driver *iommu_driver;
+extern struct kvm_iommu_ops *kvm_nvhe_sym(kvm_iommu_ops);
+
+int kvm_iommu_register_driver(struct kvm_iommu_driver *kern_ops, struct kvm_iommu_ops *el2_ops)
+{
+ int ret;
+
+ if (WARN_ON(!kern_ops || !el2_ops))
+ return -EINVAL;
+
+ /*
+ * Paired with smp_load_acquire(&iommu_driver)
+ * Ensure memory stores happening during a driver
+ * init are observed before executing kvm iommu callbacks.
+ */
+ ret = cmpxchg_release(&iommu_driver, NULL, kern_ops) ? -EBUSY : 0;
+ if (ret)
+ return ret;
+
+ kvm_nvhe_sym(kvm_iommu_ops) = el2_ops;
+ return 0;
+}
+
+int kvm_iommu_init_driver(void)
+{
+ if (WARN_ON(!smp_load_acquire(&iommu_driver))) {
+ kvm_err("pKVM enabled without an IOMMU driver, do not run confidential workloads in virtual machines\n");
+ return -ENODEV;
+ }
+
+ return iommu_driver->init_driver();
+}
+
+void kvm_iommu_remove_driver(void)
+{
+ if (smp_load_acquire(&iommu_driver))
+ iommu_driver->remove_driver();
+}