@@ -86,6 +86,9 @@ struct dma_map_ops {
size_t (*max_mapping_size)(struct device *dev);
size_t (*opt_mapping_size)(void);
unsigned long (*get_merge_boundary)(struct device *dev);
+
+ dma_addr_t (*alloc_iova)(struct device *dev, size_t size);
+ void (*free_iova)(struct device *dev, dma_addr_t dma_addr, size_t size);
};
#ifdef CONFIG_DMA_OPS
@@ -90,6 +90,16 @@ struct dma_memory_type {
struct dev_pagemap *p2p_pgmap;
};
+struct dma_iova_attrs {
+ /* OUT field */
+ dma_addr_t addr;
+ /* IN fields */
+ struct device *dev;
+ size_t size;
+ enum dma_data_direction dir;
+ unsigned long attrs;
+};
+
#ifdef CONFIG_DMA_API_DEBUG
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
void debug_dma_map_single(struct device *dev, const void *addr,
@@ -115,6 +125,9 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+int dma_alloc_iova(struct dma_iova_attrs *iova);
+void dma_free_iova(struct dma_iova_attrs *iova);
+
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
size_t offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);
@@ -166,6 +179,13 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
void dma_get_memory_type(struct page *page, struct dma_memory_type *type);
#else /* CONFIG_HAS_DMA */
+static inline int dma_alloc_iova(struct dma_iova_attrs *iova)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_free_iova(struct dma_iova_attrs *iova)
+{
+}
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
@@ -924,3 +924,47 @@ void dma_get_memory_type(struct page *page, struct dma_memory_type *type)
type->type = DMA_MEMORY_TYPE_NORMAL;
}
EXPORT_SYMBOL_GPL(dma_get_memory_type);
+
+/**
+ * dma_alloc_iova - Allocate an IOVA space
+ * @iova: IOVA attributes
+ *
+ * Allocate an IOVA space for the given IOVA attributes. The IOVA space
+ * is allocated to the worst case when whole range is going to be used.
+ */
+int dma_alloc_iova(struct dma_iova_attrs *iova)
+{
+ struct device *dev = iova->dev;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_map_direct(dev, ops) || !ops->alloc_iova) {
+ /* dma_map_direct(..) check is for HMM range fault callers */
+ iova->addr = 0;
+ return 0;
+ }
+
+ iova->addr = ops->alloc_iova(dev, iova->size);
+ if (dma_mapping_error(dev, iova->addr))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_alloc_iova);
+
+/**
+ * dma_free_iova - Free an IOVA space
+ * @iova: IOVA attributes
+ *
+ * Free an IOVA space for the given IOVA attributes.
+ */
+void dma_free_iova(struct dma_iova_attrs *iova)
+{
+ struct device *dev = iova->dev;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_map_direct(dev, ops) || !ops->free_iova || !iova->addr)
+ return;
+
+ ops->free_iova(dev, iova->addr, iova->size);
+}
+EXPORT_SYMBOL_GPL(dma_free_iova);