@@ -331,6 +331,75 @@ static bool kvm_arm_smmu_capable(struct device *dev, enum iommu_cap cap)
}
}
+static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t pgsize, size_t pgcount, int prot,
+ gfp_t gfp, size_t *total_mapped)
+{
+ size_t mapped;
+ size_t size = pgsize * pgcount;
+ struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+ struct arm_smccc_res res;
+
+ do {
+ res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_pages,
+ kvm_smmu_domain->id,
+ iova, paddr, pgsize, pgcount, prot);
+ mapped = res.a1;
+ iova += mapped;
+ paddr += mapped;
+ WARN_ON(mapped % pgsize);
+ WARN_ON(mapped > pgcount * pgsize);
+ pgcount -= mapped / pgsize;
+ *total_mapped += mapped;
+ } while (*total_mapped < size && !kvm_arm_smmu_topup_memcache(&res, gfp));
+ if (*total_mapped < size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
+ unsigned long iova, size_t pgsize,
+ size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ size_t unmapped;
+ size_t total_unmapped = 0;
+ size_t size = pgsize * pgcount;
+ struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+ struct arm_smccc_res res;
+
+ do {
+ res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_unmap_pages,
+ kvm_smmu_domain->id,
+ iova, pgsize, pgcount);
+ unmapped = res.a1;
+ total_unmapped += unmapped;
+ iova += unmapped;
+ WARN_ON(unmapped % pgsize);
+ pgcount -= unmapped / pgsize;
+
+ /*
+ * The page table driver can unmap less than we asked for. If it
+ * didn't unmap anything at all, then it either reached the end
+ * of the range, or it needs a page in the memcache to break a
+ * block mapping.
+ */
+ } while (total_unmapped < size &&
+ (unmapped || !kvm_arm_smmu_topup_memcache(&res, GFP_ATOMIC)));
+
+ return total_unmapped;
+}
+
+static phys_addr_t kvm_arm_smmu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
+
+ return kvm_call_hyp_nvhe(__pkvm_host_iommu_iova_to_phys, kvm_smmu_domain->id, iova);
+}
+
static struct iommu_ops kvm_arm_smmu_ops = {
.capable = kvm_arm_smmu_capable,
.device_group = arm_smmu_device_group,
@@ -344,6 +413,9 @@ static struct iommu_ops kvm_arm_smmu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = kvm_arm_smmu_attach_dev,
.free = kvm_arm_smmu_domain_free,
+ .map_pages = kvm_arm_smmu_map_pages,
+ .unmap_pages = kvm_arm_smmu_unmap_pages,
+ .iova_to_phys = kvm_arm_smmu_iova_to_phys,
}
};