diff mbox series

[v3,01/25] dma-mapping: add generic helpers for mapping sgtable objects

Message ID 20200505084614.30424-1-m.szyprowski@samsung.com (mailing list archive)
State New, archived
Headers show
Series DRM: fix struct sg_table nents vs. orig_nents misuse | expand

Commit Message

Marek Szyprowski May 5, 2020, 8:45 a.m. UTC
struct sg_table is a common structure used for describing a memory
buffer. It consists of a scatterlist with memory pages and DMA addresses
(sgl entry), as well as the number of scatterlist entries: CPU pages
(orig_nents entry) and DMA pages (nents entry).

It turned out that it was a common mistake to misuse nents and orig_nents
entries, call dma-mapping functions with a wrong number of entries or
ignoring the number of mapped entries returned by the dma_map_sg
function.

To avoid such issues, lets introduce a common wrappers operating directly
on the struct sg_table objects, which take care of the proper use of
the nents and orig_nents entries.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
---
 include/linux/dma-mapping.h | 32 ++++++++++++++++++++++++++++++++
 include/linux/iommu.h       |  6 ++++++
 2 files changed, 38 insertions(+)

Comments

Marek Szyprowski May 5, 2020, 10:44 a.m. UTC | #1
Hi Christoph,

On 05.05.2020 12:22, Christoph Hellwig wrote:
>> +static inline int dma_map_sgtable_attrs(struct device *dev,
>> +	struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs)
> Two tab indents for parameter continuation, please.
>
> Can we also skip the separate _attrs version?  The existing ones have the
> separate _attrs variant as there were pre-existing versions without the
> attrs argument and lots of users, but that doesn't really apply here as
> an extra 0 argument isn't really an issue.

Okay.

>> +static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
>> +			unsigned long iova, struct sg_table *sgt, int prot)
>> +{
>> +	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
>> +}
> Should this be a separate patch due to the different subsystems?
>
> FYI, I'll happily pick up the prep patches in an immutable branch of
> the dma-mapping tree one we have settled on the details.

Okay.

Best regards
Hans Verkuil May 7, 2020, 8:47 a.m. UTC | #2
Hi Marek,

On 05/05/2020 10:45, Marek Szyprowski wrote:
> struct sg_table is a common structure used for describing a memory
> buffer. It consists of a scatterlist with memory pages and DMA addresses
> (sgl entry), as well as the number of scatterlist entries: CPU pages
> (orig_nents entry) and DMA pages (nents entry).
> 
> It turned out that it was a common mistake to misuse nents and orig_nents
> entries, call dma-mapping functions with a wrong number of entries or
> ignoring the number of mapped entries returned by the dma_map_sg
> function.
> 
> To avoid such issues, lets introduce a common wrappers operating directly
> on the struct sg_table objects, which take care of the proper use of
> the nents and orig_nents entries.
> 
> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
> ---
>  include/linux/dma-mapping.h | 32 ++++++++++++++++++++++++++++++++
>  include/linux/iommu.h       |  6 ++++++
>  2 files changed, 38 insertions(+)
> 
> diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
> index b43116a..8364c20d 100644
> --- a/include/linux/dma-mapping.h
> +++ b/include/linux/dma-mapping.h
> @@ -609,6 +609,36 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
>  	return dma_sync_single_for_device(dev, addr + offset, size, dir);
>  }
>  
> +static inline int dma_map_sgtable_attrs(struct device *dev,
> +	struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs)

Please document these new inline functions!

The core reason IMHO why so many drivers do this wrong is that the struct sg_table
documentation is very poor. In particular w.r.t. how the nents and orig_nents
fields should be used.

It confused me, and clearly I am not alone in that.

So while this is a very nice improvement, it doesn't address the core issue, i.e.
the documentation.

Especially some examples on how to use these new functions would be welcome.

Regards,

	Hans

> +{
> +	int n = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
> +
> +	if (n > 0) {
> +		sgt->nents = n;
> +		return 0;
> +	}
> +	return -EINVAL;
> +}
> +
> +static inline void dma_unmap_sgtable_attrs(struct device *dev,
> +	struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs)
> +{
> +	dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
> +}
> +
> +static inline void dma_sync_sgtable_for_cpu(struct device *dev,
> +	struct sg_table *sgt, enum dma_data_direction dir)
> +{
> +	dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
> +}
> +
> +static inline void dma_sync_sgtable_for_device(struct device *dev,
> +	struct sg_table *sgt, enum dma_data_direction dir)
> +{
> +	dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
> +}
> +
>  #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
>  #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
>  #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
> @@ -617,6 +647,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
>  #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
>  #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
>  #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
> +#define dma_map_sgtable(d, s, r) dma_map_sgtable_attrs(d, s, r, 0)
> +#define dma_unmap_sgtable(d, s, r) dma_unmap_sgtable_attrs(d, s, r, 0)
>  
>  extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
>  		void *cpu_addr, dma_addr_t dma_addr, size_t size,
> diff --git a/include/linux/iommu.h b/include/linux/iommu.h
> index 7ef8b0b..5106b65 100644
> --- a/include/linux/iommu.h
> +++ b/include/linux/iommu.h
> @@ -466,6 +466,12 @@ extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
>  extern void iommu_set_fault_handler(struct iommu_domain *domain,
>  			iommu_fault_handler_t handler, void *token);
>  
> +static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
> +			unsigned long iova, struct sg_table *sgt, int prot)
> +{
> +	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
> +}
> +
>  extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
>  extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
>  extern void generic_iommu_put_resv_regions(struct device *dev,
>
diff mbox series

Patch

diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index b43116a..8364c20d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -609,6 +609,36 @@  static inline void dma_sync_single_range_for_device(struct device *dev,
 	return dma_sync_single_for_device(dev, addr + offset, size, dir);
 }
 
+static inline int dma_map_sgtable_attrs(struct device *dev,
+	struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs)
+{
+	int n = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+
+	if (n > 0) {
+		sgt->nents = n;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static inline void dma_unmap_sgtable_attrs(struct device *dev,
+	struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs)
+{
+	dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+}
+
+static inline void dma_sync_sgtable_for_cpu(struct device *dev,
+	struct sg_table *sgt, enum dma_data_direction dir)
+{
+	dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
+static inline void dma_sync_sgtable_for_device(struct device *dev,
+	struct sg_table *sgt, enum dma_data_direction dir)
+{
+	dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
@@ -617,6 +647,8 @@  static inline void dma_sync_single_range_for_device(struct device *dev,
 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
+#define dma_map_sgtable(d, s, r) dma_map_sgtable_attrs(d, s, r, 0)
+#define dma_unmap_sgtable(d, s, r) dma_unmap_sgtable_attrs(d, s, r, 0)
 
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7ef8b0b..5106b65 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -466,6 +466,12 @@  extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
 			iommu_fault_handler_t handler, void *token);
 
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+			unsigned long iova, struct sg_table *sgt, int prot)
+{
+	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+}
+
 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern void generic_iommu_put_resv_regions(struct device *dev,