diff mbox series

[RFC,6/9] DMA-API: Add dma_sync_bvecs_for_cpu() and dma_sync_bvecs_for_device()

Message ID 169772917192.5232.2827727564287466466.stgit@klimt.1015granger.net (mailing list archive)
State Not Applicable
Headers show
Series Exploring biovec support in (R)DMA API | expand

Commit Message

Chuck Lever Oct. 19, 2023, 3:26 p.m. UTC
From: Chuck Lever <chuck.lever@oracle.com>

Cc: iommu@lists.linux.dev
Cc: linux-rdma@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/linux/dma-map-ops.h |    4 ++++
 include/linux/dma-mapping.h |    4 ++++
 kernel/dma/mapping.c        |   28 ++++++++++++++++++++++++++++
 3 files changed, 36 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index f2fc203fb8a1..de2a50d9207a 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -75,6 +75,10 @@  struct dma_map_ops {
 			int nents, enum dma_data_direction dir);
 	void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
 			int nents, enum dma_data_direction dir);
+	void (*sync_bvecs_for_cpu)(struct device *dev, struct bio_vec *bvecs,
+			int nents, enum dma_data_direction dir);
+	void (*sync_bvecs_for_device)(struct device *dev, struct bio_vec *bvecs,
+			int nents, enum dma_data_direction dir);
 	void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
 			enum dma_data_direction direction);
 	int (*dma_supported)(struct device *dev, u64 mask);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f511ec546f4d..9fb422f376b6 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -126,6 +126,10 @@  void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 		    int nelems, enum dma_data_direction dir);
 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 		       int nelems, enum dma_data_direction dir);
+void dma_sync_bvecs_for_cpu(struct device *dev, struct bio_vec *bvecs,
+			    int nelems, enum dma_data_direction dir);
+void dma_sync_bvecs_for_device(struct device *dev, struct bio_vec *bvecs,
+			       int nelems, enum dma_data_direction dir);
 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		gfp_t flag, unsigned long attrs);
 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index e323ca48f7f2..94cffc9b45a5 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -385,6 +385,34 @@  void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 }
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
+void dma_sync_bvecs_for_cpu(struct device *dev, struct bio_vec *bvecs,
+			    int nelems, enum dma_data_direction dir)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	BUG_ON(!valid_dma_direction(dir));
+	if (dma_map_direct(dev, ops))
+		dma_direct_sync_bvecs_for_cpu(dev, bvecs, nelems, dir);
+	else if (ops->sync_bvecs_for_cpu)
+		ops->sync_bvecs_for_cpu(dev, bvecs, nelems, dir);
+	debug_dma_sync_bvecs_for_cpu(dev, bvecs, nelems, dir);
+}
+EXPORT_SYMBOL(dma_sync_bvecs_for_cpu);
+
+void dma_sync_bvecs_for_device(struct device *dev, struct bio_vec *bvecs,
+			       int nelems, enum dma_data_direction dir)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	BUG_ON(!valid_dma_direction(dir));
+	if (dma_map_direct(dev, ops))
+		dma_direct_sync_bvecs_for_device(dev, bvecs, nelems, dir);
+	else if (ops->sync_bvecs_for_device)
+		ops->sync_bvecs_for_device(dev, bvecs, nelems, dir);
+	debug_dma_sync_bvecs_for_device(dev, bvecs, nelems, dir);
+}
+EXPORT_SYMBOL(dma_sync_bvecs_for_device);
+
 /*
  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
  * that the intention is to allow exporting memory allocated via the