diff mbox series

[4/4] swiotlb-xen: ensure we have a single callsite for xen_dma_map_page

Message ID 20190411072000.4306-5-hch@lst.de (mailing list archive)
State Accepted
Commit 063b8271ec8f706d833e61dfca40c512504a62c1
Headers show
Series [1/4] swiotlb-xen: make instances match their method names | expand

Commit Message

Christoph Hellwig April 11, 2019, 7:20 a.m. UTC
Refactor the code a bit to make further changes easier.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/xen/swiotlb-xen.c | 31 ++++++++++++++++---------------
 1 file changed, 16 insertions(+), 15 deletions(-)

Comments

Stefano Stabellini April 15, 2019, 10:56 p.m. UTC | #1
On Thu, 11 Apr 2019, Christoph Hellwig wrote:
> Refactor the code a bit to make further changes easier.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

> ---
>  drivers/xen/swiotlb-xen.c | 31 ++++++++++++++++---------------
>  1 file changed, 16 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index 9a951504dc12..5dcb06fe9667 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -391,13 +391,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
>  	if (dma_capable(dev, dev_addr, size) &&
>  	    !range_straddles_page_boundary(phys, size) &&
>  		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
> -		(swiotlb_force != SWIOTLB_FORCE)) {
> -		/* we are not interested in the dma_addr returned by
> -		 * xen_dma_map_page, only in the potential cache flushes executed
> -		 * by the function. */
> -		xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
> -		return dev_addr;
> -	}
> +		swiotlb_force != SWIOTLB_FORCE)
> +		goto done;
>  
>  	/*
>  	 * Oh well, have to allocate and map a bounce buffer.
> @@ -410,19 +405,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
>  		return DMA_MAPPING_ERROR;
>  
>  	dev_addr = xen_phys_to_bus(map);
> -	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
> -					dev_addr, map & ~PAGE_MASK, size, dir, attrs);
>  
>  	/*
>  	 * Ensure that the address returned is DMA'ble
>  	 */
> -	if (dma_capable(dev, dev_addr, size))
> -		return dev_addr;
> -
> -	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
> -	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
> +	if (unlikely(!dma_capable(dev, dev_addr, size))) {
> +		swiotlb_tbl_unmap_single(dev, map, size, dir,
> +				attrs | DMA_ATTR_SKIP_CPU_SYNC);
> +		return DMA_MAPPING_ERROR;
> +	}
>  
> -	return DMA_MAPPING_ERROR;
> +	page = pfn_to_page(map >> PAGE_SHIFT);
> +	offset = map & ~PAGE_MASK;
> +done:
> +	/*
> +	 * we are not interested in the dma_addr returned by xen_dma_map_page,
> +	 * only in the potential cache flushes executed by the function.
> +	 */
> +	xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
> +	return dev_addr;
>  }
>  
>  /*
> -- 
> 2.20.1
>
diff mbox series

Patch

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 9a951504dc12..5dcb06fe9667 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -391,13 +391,8 @@  static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 	if (dma_capable(dev, dev_addr, size) &&
 	    !range_straddles_page_boundary(phys, size) &&
 		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
-		(swiotlb_force != SWIOTLB_FORCE)) {
-		/* we are not interested in the dma_addr returned by
-		 * xen_dma_map_page, only in the potential cache flushes executed
-		 * by the function. */
-		xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
-		return dev_addr;
-	}
+		swiotlb_force != SWIOTLB_FORCE)
+		goto done;
 
 	/*
 	 * Oh well, have to allocate and map a bounce buffer.
@@ -410,19 +405,25 @@  static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 		return DMA_MAPPING_ERROR;
 
 	dev_addr = xen_phys_to_bus(map);
-	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
-					dev_addr, map & ~PAGE_MASK, size, dir, attrs);
 
 	/*
 	 * Ensure that the address returned is DMA'ble
 	 */
-	if (dma_capable(dev, dev_addr, size))
-		return dev_addr;
-
-	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+	if (unlikely(!dma_capable(dev, dev_addr, size))) {
+		swiotlb_tbl_unmap_single(dev, map, size, dir,
+				attrs | DMA_ATTR_SKIP_CPU_SYNC);
+		return DMA_MAPPING_ERROR;
+	}
 
-	return DMA_MAPPING_ERROR;
+	page = pfn_to_page(map >> PAGE_SHIFT);
+	offset = map & ~PAGE_MASK;
+done:
+	/*
+	 * we are not interested in the dma_addr returned by xen_dma_map_page,
+	 * only in the potential cache flushes executed by the function.
+	 */
+	xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
+	return dev_addr;
 }
 
 /*