@@ -107,6 +107,13 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_refill,
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaimable,
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaim,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_alloc_domain,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_free_domain,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_attach_dev,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_detach_dev,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_map_pages,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_unmap_pages,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys,
/*
* Start of the dynamically registered hypercalls. Start a bit
@@ -4,6 +4,20 @@
#include <asm/kvm_host.h>
+/* Hypercall handlers */
+int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type);
+int kvm_iommu_free_domain(pkvm_handle_t domain_id);
+int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid, u32 pasid_bits);
+int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid);
+size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
+ unsigned long iova, phys_addr_t paddr, size_t pgsize,
+ size_t pgcount, int prot);
+size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
+ size_t pgsize, size_t pgcount);
+phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova);
+
struct kvm_iommu_ops {
int (*init)(void);
};
@@ -19,6 +19,7 @@
#include <nvhe/alloc.h>
#include <nvhe/alloc_mgt.h>
#include <nvhe/ffa.h>
+#include <nvhe/iommu.h>
#include <nvhe/mem_protect.h>
#include <nvhe/modules.h>
#include <nvhe/mm.h>
@@ -1592,6 +1593,87 @@ static void handle___pkvm_hyp_alloc_mgt_reclaim(struct kvm_cpu_context *host_ctx
cpu_reg(host_ctxt, 2) = mc.nr_pages;
}
+static void handle___pkvm_host_iommu_alloc_domain(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(int, type, host_ctxt, 2);
+
+ ret = kvm_iommu_alloc_domain(domain, type);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_free_domain(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+
+ ret = kvm_iommu_free_domain(domain);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_attach_dev(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
+ DECLARE_REG(unsigned int, pasid, host_ctxt, 4);
+ DECLARE_REG(unsigned int, pasid_bits, host_ctxt, 5);
+
+ ret = kvm_iommu_attach_dev(iommu, domain, endpoint,
+ pasid, pasid_bits);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_detach_dev(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
+ DECLARE_REG(unsigned int, pasid, host_ctxt, 4);
+
+ ret = kvm_iommu_detach_dev(iommu, domain, endpoint, pasid);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_map_pages(struct kvm_cpu_context *host_ctxt)
+{
+ unsigned long ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 2);
+ DECLARE_REG(phys_addr_t, paddr, host_ctxt, 3);
+ DECLARE_REG(size_t, pgsize, host_ctxt, 4);
+ DECLARE_REG(size_t, pgcount, host_ctxt, 5);
+ DECLARE_REG(unsigned int, prot, host_ctxt, 6);
+
+ ret = kvm_iommu_map_pages(domain, iova, paddr,
+ pgsize, pgcount, prot);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_unmap_pages(struct kvm_cpu_context *host_ctxt)
+{
+ unsigned long ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 2);
+ DECLARE_REG(size_t, pgsize, host_ctxt, 3);
+ DECLARE_REG(size_t, pgcount, host_ctxt, 4);
+
+ ret = kvm_iommu_unmap_pages(domain, iova,
+ pgsize, pgcount);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_iova_to_phys(domain, iova);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -1649,6 +1731,13 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_refill),
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_reclaimable),
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_reclaim),
+ HANDLE_FUNC(__pkvm_host_iommu_alloc_domain),
+ HANDLE_FUNC(__pkvm_host_iommu_free_domain),
+ HANDLE_FUNC(__pkvm_host_iommu_attach_dev),
+ HANDLE_FUNC(__pkvm_host_iommu_detach_dev),
+ HANDLE_FUNC(__pkvm_host_iommu_map_pages),
+ HANDLE_FUNC(__pkvm_host_iommu_unmap_pages),
+ HANDLE_FUNC(__pkvm_host_iommu_iova_to_phys),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
@@ -16,3 +16,43 @@ int kvm_iommu_init(void)
return kvm_iommu_ops->init();
}
+
+int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type)
+{
+ return -ENODEV;
+}
+
+int kvm_iommu_free_domain(pkvm_handle_t domain_id)
+{
+ return -ENODEV;
+}
+
+int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid, u32 pasid_bits)
+{
+ return -ENODEV;
+}
+
+int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid)
+{
+ return -ENODEV;
+}
+
+size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
+ unsigned long iova, phys_addr_t paddr, size_t pgsize,
+ size_t pgcount, int prot)
+{
+ return 0;
+}
+
+size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
+ size_t pgsize, size_t pgcount)
+{
+ return 0;
+}
+
+phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova)
+{
+ return 0;
+}