@@ -78,6 +78,8 @@
struct dma_iova_state {
struct device *dev;
+ dma_addr_t addr;
+ size_t size;
enum dma_data_direction dir;
};
@@ -115,6 +117,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size);
+void dma_free_iova(struct dma_iova_state *state);
+
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
size_t offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);
@@ -164,6 +170,14 @@ void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt);
#else /* CONFIG_HAS_DMA */
+static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state,
+ phys_addr_t phys, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_free_iova(struct dma_iova_state *state)
+{
+}
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
@@ -370,6 +384,10 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
return false;
}
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
+static inline int dma_alloc_iova(struct dma_iova_state *state, size_t size)
+{
+ return dma_alloc_iova_unaligned(state, 0, size);
+}
struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
@@ -951,3 +951,38 @@ unsigned long dma_get_merge_boundary(struct device *dev)
return ops->get_merge_boundary(dev);
}
EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
+
+/**
+ * dma_alloc_iova_unaligned - Allocate an IOVA space
+ * @state: IOVA state
+ * @phys: physical address
+ * @size: IOVA size
+ *
+ * Allocate an IOVA space for the given IOVA state and size. The IOVA space
+ * is allocated to the worst case when whole range is going to be used.
+ */
+int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size)
+{
+ if (!use_dma_iommu(state->dev))
+ return 0;
+
+ WARN_ON_ONCE(!size);
+ return iommu_dma_alloc_iova(state, phys, size);
+}
+EXPORT_SYMBOL_GPL(dma_alloc_iova_unaligned);
+
+/**
+ * dma_free_iova - Free an IOVA space
+ * @state: IOVA state
+ *
+ * Free an IOVA space for the given IOVA attributes.
+ */
+void dma_free_iova(struct dma_iova_state *state)
+{
+ if (!use_dma_iommu(state->dev))
+ return;
+
+ iommu_dma_free_iova(state);
+}
+EXPORT_SYMBOL_GPL(dma_free_iova);