diff mbox series

scsi: ibmvscsi_tgt: Use dma_alloc_coherent() instead of get_zeroed_page/dma_map_single()

Message ID 20211010160055.488-1-caihuoqing@baidu.com (mailing list archive)
State Superseded
Headers show
Series scsi: ibmvscsi_tgt: Use dma_alloc_coherent() instead of get_zeroed_page/dma_map_single() | expand

Commit Message

Cai,Huoqing Oct. 10, 2021, 4 p.m. UTC
Replacing get_zeroed_page/free_page/dma_map_single/dma_unmap_single()
with dma_alloc_coherent/dma_free_coherent() helps to reduce
code size, and simplify the code, and coherent DMA will not
clear the cache every time.

Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
---
 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 41 ++++++------------------
 1 file changed, 10 insertions(+), 31 deletions(-)

Comments

Christoph Hellwig Oct. 11, 2021, 6:42 a.m. UTC | #1
On Mon, Oct 11, 2021 at 12:00:53AM +0800, Cai Huoqing wrote:
> Replacing get_zeroed_page/free_page/dma_map_single/dma_unmap_single()
> with dma_alloc_coherent/dma_free_coherent() helps to reduce
> code size, and simplify the code, and coherent DMA will not
> clear the cache every time.

This explanation does not make any sense whatsoever.  Please explain
why it would an show numbers.
Cai,Huoqing Oct. 11, 2021, 7:04 a.m. UTC | #2
On 11 10月 21 07:42:28, Christoph Hellwig wrote:
> On Mon, Oct 11, 2021 at 12:00:53AM +0800, Cai Huoqing wrote:
> > Replacing get_zeroed_page/free_page/dma_map_single/dma_unmap_single()
> > with dma_alloc_coherent/dma_free_coherent() helps to reduce
> > code size, and simplify the code, and coherent DMA will not
> > clear the cache every time.
> 
> This explanation does not make any sense whatsoever.  Please explain
> why it would an show numbers.
Hi Christoph,
thanks for your feedback.

In this case, just simplify the code.
10 insertions(+), 31 deletions(-)

dma_sync_ API is not called, I think the hardware may keep cache coherent
directly or is a no cache system. No need to make perfermance compare.

thanks
Cai
Christoph Hellwig Oct. 11, 2021, 7:26 a.m. UTC | #3
On Mon, Oct 11, 2021 at 03:04:05PM +0800, Cai Huoqing wrote:
> dma_sync_ API is not called, I think the hardware may keep cache coherent
> directly or is a no cache system. No need to make perfermance compare.

On a device that is not attached in a cache coherent way (and that is
the only one that matters here), dma_alloc_coherent will force every
access to the memory to be uncached, while using dma_sync will only
do a cache maintainance operation for each dma submission and
completion.  So yes, it matters.  And Bart who has actually looked into
the number has seen the sync case to be faster consistently for a SCSI
ULP.

Note that you can simplify and improve this case by using
dma_alloc_noncoherent instead of a kernel allocator + dma_map_*.
diff mbox series

Patch

diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 61f06f6885a5..24aa0a0d49a6 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3007,20 +3007,12 @@  static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
 
 	vscsi->cmd_q.size = pages;
 
-	vscsi->cmd_q.base_addr =
-		(struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
-	if (!vscsi->cmd_q.base_addr)
-		return -ENOMEM;
-
 	vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
 
-	vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
-						vscsi->cmd_q.base_addr,
-						PAGE_SIZE, DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
-		free_page((unsigned long)vscsi->cmd_q.base_addr);
+	vscsi->cmd_q.base_addr = dma_alloc_coherent(&vdev->dev, PAGE_SIZE,
+						    &vscsi->cmd_q.crq_token, GFP_KERNEL);
+	if (!vscsi->cmd_q.base_addr)
 		return -ENOMEM;
-	}
 
 	return 0;
 }
@@ -3036,9 +3028,8 @@  static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
  */
 static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
 {
-	dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
-			 PAGE_SIZE, DMA_BIDIRECTIONAL);
-	free_page((unsigned long)vscsi->cmd_q.base_addr);
+	dma_free_coherent(&vscsi->dma_dev->dev, PAGE_SIZE,
+			  vscsi->cmd_q.base_addr, vscsi->cmd_q.crq_token);
 	vscsi->cmd_q.base_addr = NULL;
 	vscsi->state = NO_QUEUE;
 }
@@ -3504,18 +3495,11 @@  static int ibmvscsis_probe(struct vio_dev *vdev,
 		goto free_timer;
 	}
 
-	vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	vscsi->map_buf = dma_alloc_coherent(&vdev->dev, PAGE_SIZE,
+					    &vscsi->map_ioba, GFP_KERNEL);
 	if (!vscsi->map_buf) {
 		rc = -ENOMEM;
 		dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
-		goto destroy_queue;
-	}
-
-	vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
-					 DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
-		rc = -ENOMEM;
-		dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
 		goto free_buf;
 	}
 
@@ -3544,7 +3528,7 @@  static int ibmvscsis_probe(struct vio_dev *vdev,
 	if (!vscsi->work_q) {
 		rc = -ENOMEM;
 		dev_err(&vscsi->dev, "create_workqueue failed\n");
-		goto unmap_buf;
+		goto destroy_queue;
 	}
 
 	rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
@@ -3562,11 +3546,8 @@  static int ibmvscsis_probe(struct vio_dev *vdev,
 
 destroy_WQ:
 	destroy_workqueue(vscsi->work_q);
-unmap_buf:
-	dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
-			 DMA_BIDIRECTIONAL);
 free_buf:
-	kfree(vscsi->map_buf);
+	dma_free_coherent(&vdev->dev, PAGE_SIZE, vscsi->map_buf, vscsi->map_ioba);
 destroy_queue:
 	tasklet_kill(&vscsi->work_task);
 	ibmvscsis_unregister_command_q(vscsi);
@@ -3602,9 +3583,7 @@  static void ibmvscsis_remove(struct vio_dev *vdev)
 	vio_disable_interrupts(vdev);
 	free_irq(vdev->irq, vscsi);
 	destroy_workqueue(vscsi->work_q);
-	dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
-			 DMA_BIDIRECTIONAL);
-	kfree(vscsi->map_buf);
+	dma_free_coherent(&vdev->dev, PAGE_SIZE, vscsi->map_buf, vscsi->map_ioba);
 	tasklet_kill(&vscsi->work_task);
 	ibmvscsis_destroy_command_q(vscsi);
 	ibmvscsis_freetimer(vscsi);