diff mbox series

[RFC,5/9] dma-direct: Support direct mapping bio_vec arrays

Message ID 169772916546.5232.14817964507475231582.stgit@klimt.1015granger.net (mailing list archive)
State Not Applicable
Headers show
Series Exploring biovec support in (R)DMA API | expand

Commit Message

Chuck Lever Oct. 19, 2023, 3:26 p.m. UTC
From: Chuck Lever <chuck.lever@oracle.com>

Cc: iommu@lists.linux.dev
Cc: linux-rdma@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 kernel/dma/direct.c |   92 +++++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/dma/direct.h |   17 +++++++++
 2 files changed, 109 insertions(+)
diff mbox series

Patch

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 9596ae1aa0da..7587c5c3d051 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -423,6 +423,26 @@  void dma_direct_sync_sg_for_device(struct device *dev,
 					dir);
 	}
 }
+
+void dma_direct_sync_bvecs_for_device(struct device *dev,
+		struct bio_vec *bvecs, int nents, enum dma_data_direction dir)
+{
+	struct bio_vec *bv;
+	int i;
+
+	for (i = 0; i < nents; i++) {
+		bv = &bvecs[i];
+		phys_addr_t paddr = dma_to_phys(dev, bv_dma_address(bv));
+
+		if (unlikely(is_swiotlb_buffer(dev, paddr)))
+			swiotlb_sync_single_for_device(dev, paddr, bv->bv_len,
+						       dir);
+
+		if (!dev_is_dma_coherent(dev))
+			arch_sync_dma_for_device(paddr, bv->bv_len,
+					dir);
+	}
+}
 #endif
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
@@ -516,6 +536,78 @@  int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 	return ret;
 }
 
+void dma_direct_sync_bvecs_for_cpu(struct device *dev,
+		struct bio_vec *bvecs, int nents, enum dma_data_direction dir)
+{
+	struct bio_vec *bv;
+	int i;
+
+	for (i = 0; i < nents; i++) {
+		phys_addr_t paddr;
+
+		bv = &bvecs[i];
+		paddr = dma_to_phys(dev, bv_dma_address(bv));
+
+		if (!dev_is_dma_coherent(dev))
+			arch_sync_dma_for_cpu(paddr, bv->bv_len, dir);
+
+		if (unlikely(is_swiotlb_buffer(dev, paddr)))
+			swiotlb_sync_single_for_cpu(dev, paddr, bv->bv_len,
+						    dir);
+
+		if (dir == DMA_FROM_DEVICE)
+			arch_dma_mark_clean(paddr, bv->bv_len);
+	}
+
+	if (!dev_is_dma_coherent(dev))
+		arch_sync_dma_for_cpu_all();
+}
+
+/*
+ * Unmaps segments, except for ones marked as pci_p2pdma which do not
+ * require any further action as they contain a bus address.
+ */
+void dma_direct_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+			    int nents, enum dma_data_direction dir,
+			    unsigned long attrs)
+{
+	struct bio_vec *bv;
+	int i;
+
+	for (i = 0; i < nents; i++) {
+		bv = &bvecs[i];
+		if (bv_dma_is_bus_address(bv))
+			bv_dma_unmark_bus_address(bv);
+		else
+			dma_direct_unmap_page(dev, bv_dma_address(bv),
+					      bv_dma_len(bv), dir, attrs);
+	}
+
+}
+
+int dma_direct_map_bvecs(struct device *dev, struct bio_vec *bvecs, int nents,
+			 enum dma_data_direction dir, unsigned long attrs)
+{
+	struct bio_vec *bv;
+	int i;
+
+	/* p2p DMA mapping support can be added later */
+	for (i = 0; i < nents; i++) {
+		bv = &bvecs[i];
+		bv->bv_dma_address = dma_direct_map_page(dev, bv->bv_page,
+				bv->bv_offset, bv->bv_len, dir, attrs);
+		if (bv->bv_dma_address == DMA_MAPPING_ERROR)
+			goto out_unmap;
+		bv_dma_len(bv) = bv->bv_len;
+	}
+
+	return nents;
+
+out_unmap:
+	dma_direct_unmap_bvecs(dev, bvecs, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+	return -EIO;
+}
+
 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
 		size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 97ec892ea0b5..6db1ccd04d21 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -20,17 +20,26 @@  int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 		enum dma_data_direction dir, unsigned long attrs);
+int dma_direct_map_bvecs(struct device *dev, struct bio_vec *bvecs, int nents,
+		enum dma_data_direction dir, unsigned long attrs);
 size_t dma_direct_max_mapping_size(struct device *dev);
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
     defined(CONFIG_SWIOTLB)
 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
 		int nents, enum dma_data_direction dir);
+void dma_direct_sync_bvecs_for_device(struct device *dev, struct bio_vec *bvecs,
+		int nents, enum dma_data_direction dir);
 #else
 static inline void dma_direct_sync_sg_for_device(struct device *dev,
 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 {
 }
+
+static inline void dma_direct_sync_bvecs_for_device(struct device *dev,
+		struct bio_vec *bvecs, int nents, enum dma_data_direction dir)
+{
+}
 #endif
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
@@ -40,6 +49,10 @@  void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
 		int nents, enum dma_data_direction dir, unsigned long attrs);
 void dma_direct_sync_sg_for_cpu(struct device *dev,
 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+void dma_direct_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+		int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_sync_bvecs_for_cpu(struct device *dev,
+		struct bio_vec *bvecs, int nents, enum dma_data_direction dir);
 #else
 static inline void dma_direct_unmap_sg(struct device *dev,
 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
@@ -50,6 +63,10 @@  static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 {
 }
+static inline void dma_direct_sync_bvecs_for_cpu(struct device *dev,
+		struct bio_vec *bvecs, int nents, enum dma_data_direction dir)
+{
+}
 #endif
 
 static inline void dma_direct_sync_single_for_device(struct device *dev,