diff mbox

[19/31] arc: handle page-less SG entries

Message ID 1439363150-8661-20-git-send-email-hch@lst.de (mailing list archive)
State Awaiting Upstream
Headers show

Commit Message

Christoph Hellwig Aug. 12, 2015, 7:05 a.m. UTC
Make all cache invalidation conditional on sg_has_page() and use
sg_phys to get the physical address directly.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/arc/include/asm/dma-mapping.h | 26 +++++++++++++++++++-------
 1 file changed, 19 insertions(+), 7 deletions(-)

Comments

Vineet Gupta Aug. 12, 2015, 10:28 a.m. UTC | #1
On Wednesday 12 August 2015 12:39 PM, Christoph Hellwig wrote:
> Make all cache invalidation conditional on sg_has_page() and use
> sg_phys to get the physical address directly.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>

With a minor nit below.

Acked-by: Vineet Gupta <vgupta@synopsys.com>

> ---
>  arch/arc/include/asm/dma-mapping.h | 26 +++++++++++++++++++-------
>  1 file changed, 19 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
> index 2d28ba9..42eb526 100644
> --- a/arch/arc/include/asm/dma-mapping.h
> +++ b/arch/arc/include/asm/dma-mapping.h
> @@ -108,9 +108,13 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
>  	struct scatterlist *s;
>  	int i;
>  
> -	for_each_sg(sg, s, nents, i)
> -		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
> -					       s->length, dir);
> +	for_each_sg(sg, s, nents, i) {
> +		if (sg_has_page(s)) {
> +			_dma_cache_sync((unsigned long)sg_virt(s), s->length,
> +					dir);
> +		}
> +		s->dma_address = sg_phys(s);
> +	}
>  
>  	return nents;
>  }
> @@ -163,8 +167,12 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
>  	int i;
>  	struct scatterlist *sg;
>  
> -	for_each_sg(sglist, sg, nelems, i)
> -		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
> +	for_each_sg(sglist, sg, nelems, i) {
> +		if (sg_has_page(sg)) {
> +			_dma_cache_sync((unsigned int)sg_virt(sg), sg->length,
> +					dir);
> +		}
> +	}
>  }
>  
>  static inline void
> @@ -174,8 +182,12 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
>  	int i;
>  	struct scatterlist *sg;
>  
> -	for_each_sg(sglist, sg, nelems, i)
> -		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
> +	for_each_sg(sglist, sg, nelems, i) {
> +		if (sg_has_page(sg)) {
> +			_dma_cache_sync((unsigned int)sg_virt(sg), sg->length,
> +				dir);

For consistency, could u please fix the left alignment of @dir above - another tab
perhaps ?

> +		}
> +	}
>  }
>  
>  static inline int dma_supported(struct device *dev, u64 dma_mask)

--
To unsubscribe from this list: send the line "unsubscribe linux-parisc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 2d28ba9..42eb526 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -108,9 +108,13 @@  dma_map_sg(struct device *dev, struct scatterlist *sg,
 	struct scatterlist *s;
 	int i;
 
-	for_each_sg(sg, s, nents, i)
-		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
-					       s->length, dir);
+	for_each_sg(sg, s, nents, i) {
+		if (sg_has_page(s)) {
+			_dma_cache_sync((unsigned long)sg_virt(s), s->length,
+					dir);
+		}
+		s->dma_address = sg_phys(s);
+	}
 
 	return nents;
 }
@@ -163,8 +167,12 @@  dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
 	int i;
 	struct scatterlist *sg;
 
-	for_each_sg(sglist, sg, nelems, i)
-		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+	for_each_sg(sglist, sg, nelems, i) {
+		if (sg_has_page(sg)) {
+			_dma_cache_sync((unsigned int)sg_virt(sg), sg->length,
+					dir);
+		}
+	}
 }
 
 static inline void
@@ -174,8 +182,12 @@  dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
 	int i;
 	struct scatterlist *sg;
 
-	for_each_sg(sglist, sg, nelems, i)
-		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+	for_each_sg(sglist, sg, nelems, i) {
+		if (sg_has_page(sg)) {
+			_dma_cache_sync((unsigned int)sg_virt(sg), sg->length,
+				dir);
+		}
+	}
 }
 
 static inline int dma_supported(struct device *dev, u64 dma_mask)