@@ -35,10 +35,39 @@ struct kvm_iommu_ops {
int (*init)(void);
int (*alloc_domain)(struct kvm_hyp_iommu_domain *domain, int type);
void (*free_domain)(struct kvm_hyp_iommu_domain *domain);
+ struct kvm_hyp_iommu *(*get_iommu_by_id)(pkvm_handle_t iommu_id);
+ int (*attach_dev)(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_domain *domain,
+ u32 endpoint_id, u32 pasid, u32 pasid_bits);
+ int (*detach_dev)(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_domain *domain,
+ u32 endpoint_id, u32 pasid);
};
int kvm_iommu_init(void);
+int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu);
+
+static inline hyp_spinlock_t *kvm_iommu_get_lock(struct kvm_hyp_iommu *iommu)
+{
+ /* See struct kvm_hyp_iommu */
+ BUILD_BUG_ON(sizeof(iommu->lock) != sizeof(hyp_spinlock_t));
+ return (hyp_spinlock_t *)(&iommu->lock);
+}
+
+static inline void kvm_iommu_lock_init(struct kvm_hyp_iommu *iommu)
+{
+ hyp_spin_lock_init(kvm_iommu_get_lock(iommu));
+}
+
+static inline void kvm_iommu_lock(struct kvm_hyp_iommu *iommu)
+{
+ hyp_spin_lock(kvm_iommu_get_lock(iommu));
+}
+
+static inline void kvm_iommu_unlock(struct kvm_hyp_iommu *iommu)
+{
+ hyp_spin_unlock(kvm_iommu_get_lock(iommu));
+}
+
extern struct hyp_mgt_allocator_ops kvm_iommu_allocator_ops;
#endif /* __ARM64_KVM_NVHE_IOMMU_H__ */
@@ -127,6 +127,19 @@ handle_to_domain(pkvm_handle_t domain_id)
return &domains[domain_id % KVM_IOMMU_DOMAINS_PER_PAGE];
}
+static int domain_get(struct kvm_hyp_iommu_domain *domain)
+{
+ int old = atomic_fetch_inc_acquire(&domain->refs);
+
+ BUG_ON(!old || (old + 1 < 0));
+ return 0;
+}
+
+static void domain_put(struct kvm_hyp_iommu_domain *domain)
+{
+ BUG_ON(!atomic_dec_return_release(&domain->refs));
+}
+
int kvm_iommu_init(void)
{
int ret;
@@ -210,13 +223,44 @@ int kvm_iommu_free_domain(pkvm_handle_t domain_id)
int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
u32 endpoint_id, u32 pasid, u32 pasid_bits)
{
- return -ENODEV;
+ int ret;
+ struct kvm_hyp_iommu *iommu;
+ struct kvm_hyp_iommu_domain *domain;
+
+ iommu = kvm_iommu_ops->get_iommu_by_id(iommu_id);
+ if (!iommu)
+ return -EINVAL;
+
+ domain = handle_to_domain(domain_id);
+ if (!domain || domain_get(domain))
+ return -EINVAL;
+
+ ret = kvm_iommu_ops->attach_dev(iommu, domain, endpoint_id, pasid, pasid_bits);
+ if (ret)
+ domain_put(domain);
+ return ret;
}
int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
u32 endpoint_id, u32 pasid)
{
- return -ENODEV;
+ int ret;
+ struct kvm_hyp_iommu *iommu;
+ struct kvm_hyp_iommu_domain *domain;
+
+ iommu = kvm_iommu_ops->get_iommu_by_id(iommu_id);
+ if (!iommu)
+ return -EINVAL;
+
+ domain = handle_to_domain(domain_id);
+ if (!domain || atomic_read(&domain->refs) <= 1)
+ return -EINVAL;
+
+ ret = kvm_iommu_ops->detach_dev(iommu, domain, endpoint_id, pasid);
+ if (ret)
+ return ret;
+ domain_put(domain);
+ return ret;
}
size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
@@ -236,3 +280,11 @@ phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova)
{
return 0;
}
+
+/* Must be called from the IOMMU driver per IOMMU */
+int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
+{
+ kvm_iommu_lock_init(iommu);
+
+ return 0;
+}
@@ -45,4 +45,12 @@ extern void **kvm_nvhe_sym(kvm_hyp_iommu_domains);
#define KVM_IOMMU_DOMAINS_ROOT_ORDER_NR \
(1 << get_order(KVM_IOMMU_DOMAINS_ROOT_SIZE))
+struct kvm_hyp_iommu {
+#ifdef __KVM_NVHE_HYPERVISOR__
+ hyp_spinlock_t lock;
+#else
+ u32 unused;
+#endif
+};
+
#endif /* __KVM_IOMMU_H */