Message ID | 20220110103739.118426-1-alain.volmat@foss.st.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | media: stm32: dcmi: create a dma scatterlist based on DMA max_sg_burst value | expand |
Hi, could you put this on hold for the moment, I've noticed that a sg_free_table is actually missing in this patch, leading to a memory leak. I'll thus post a v2 of the patch shortly. Cheers, Alain On Mon, Jan 10, 2022 at 11:37:39AM +0100, Alain Volmat wrote: > Prior to submitting a transfer to the DMA, the client should first check > the capabilities of the DMA channel in term of maximum of each segment. > This is given by the max_sg_burst value reported by dma_get_slave_caps API. > Based on that, if the transfer is larger than what can be handled by the > DMA channel, we split the transfer into several scatterlist elements. > > Signed-off-by: Alain Volmat <alain.volmat@foss.st.com> > --- > drivers/media/platform/stm32/stm32-dcmi.c | 47 ++++++++++++++++++----- > 1 file changed, 37 insertions(+), 10 deletions(-) > > diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c > index e1b17c05229c..ee170e999a88 100644 > --- a/drivers/media/platform/stm32/stm32-dcmi.c > +++ b/drivers/media/platform/stm32/stm32-dcmi.c > @@ -113,7 +113,7 @@ struct dcmi_framesize { > struct dcmi_buf { > struct vb2_v4l2_buffer vb; > bool prepared; > - dma_addr_t paddr; > + struct sg_table sgt; > size_t size; > struct list_head list; > }; > @@ -157,6 +157,7 @@ struct stm32_dcmi { > enum state state; > struct dma_chan *dma_chan; > dma_cookie_t dma_cookie; > + u32 dma_max_burst; > u32 misr; > int errors_count; > int overrun_count; > @@ -326,13 +327,11 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, > mutex_lock(&dcmi->dma_lock); > > /* Prepare a DMA transaction */ > - desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr, > - buf->size, > + desc = dmaengine_prep_slave_sg(dcmi->dma_chan, buf->sgt.sgl, buf->sgt.nents, > DMA_DEV_TO_MEM, > DMA_PREP_INTERRUPT); > if (!desc) { > - dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n", > - __func__, &buf->paddr, buf->size); > + dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_sg failed\n", __func__); > mutex_unlock(&dcmi->dma_lock); > return -EINVAL; > } > @@ -524,6 +523,10 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb) > struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); > struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb); > unsigned long size; > + unsigned int num_sgs = 1; > + dma_addr_t dma_buf; > + struct scatterlist *sg; > + int i, ret; > > size = dcmi->fmt.fmt.pix.sizeimage; > > @@ -537,15 +540,33 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb) > > if (!buf->prepared) { > /* Get memory addresses */ > - buf->paddr = > - vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); > buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0); > - buf->prepared = true; > + if (buf->size > dcmi->dma_max_burst) > + num_sgs = DIV_ROUND_UP(buf->size, dcmi->dma_max_burst); > > - vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size); > + ret = sg_alloc_table(&buf->sgt, num_sgs, GFP_ATOMIC); > + if (ret) { > + dev_err(dcmi->dev, "sg table alloc failed\n"); > + return ret; > + } > + > + dma_buf = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); > > dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n", > - vb->index, &buf->paddr, buf->size); > + vb->index, &dma_buf, buf->size); > + > + for_each_sg(buf->sgt.sgl, sg, num_sgs, i) { > + size_t bytes = min_t(size_t, size, dcmi->dma_max_burst); > + > + sg_dma_address(sg) = dma_buf; > + sg_dma_len(sg) = bytes; > + dma_buf += bytes; > + size -= bytes; > + } > + > + buf->prepared = true; > + > + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size); > } > > return 0; > @@ -1866,6 +1887,7 @@ static int dcmi_probe(struct platform_device *pdev) > struct stm32_dcmi *dcmi; > struct vb2_queue *q; > struct dma_chan *chan; > + struct dma_slave_caps caps; > struct clk *mclk; > int irq; > int ret = 0; > @@ -1953,6 +1975,11 @@ static int dcmi_probe(struct platform_device *pdev) > return ret; > } > > + dcmi->dma_max_burst = UINT_MAX; > + ret = dma_get_slave_caps(chan, &caps); > + if (!ret && caps.max_sg_burst) > + dcmi->dma_max_burst = caps.max_sg_burst * DMA_SLAVE_BUSWIDTH_4_BYTES; > + > spin_lock_init(&dcmi->irqlock); > mutex_init(&dcmi->lock); > mutex_init(&dcmi->dma_lock); > -- > 2.25.1 >
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index e1b17c05229c..ee170e999a88 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -113,7 +113,7 @@ struct dcmi_framesize { struct dcmi_buf { struct vb2_v4l2_buffer vb; bool prepared; - dma_addr_t paddr; + struct sg_table sgt; size_t size; struct list_head list; }; @@ -157,6 +157,7 @@ struct stm32_dcmi { enum state state; struct dma_chan *dma_chan; dma_cookie_t dma_cookie; + u32 dma_max_burst; u32 misr; int errors_count; int overrun_count; @@ -326,13 +327,11 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, mutex_lock(&dcmi->dma_lock); /* Prepare a DMA transaction */ - desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr, - buf->size, + desc = dmaengine_prep_slave_sg(dcmi->dma_chan, buf->sgt.sgl, buf->sgt.nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!desc) { - dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n", - __func__, &buf->paddr, buf->size); + dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_sg failed\n", __func__); mutex_unlock(&dcmi->dma_lock); return -EINVAL; } @@ -524,6 +523,10 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb) struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb); unsigned long size; + unsigned int num_sgs = 1; + dma_addr_t dma_buf; + struct scatterlist *sg; + int i, ret; size = dcmi->fmt.fmt.pix.sizeimage; @@ -537,15 +540,33 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb) if (!buf->prepared) { /* Get memory addresses */ - buf->paddr = - vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0); - buf->prepared = true; + if (buf->size > dcmi->dma_max_burst) + num_sgs = DIV_ROUND_UP(buf->size, dcmi->dma_max_burst); - vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size); + ret = sg_alloc_table(&buf->sgt, num_sgs, GFP_ATOMIC); + if (ret) { + dev_err(dcmi->dev, "sg table alloc failed\n"); + return ret; + } + + dma_buf = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n", - vb->index, &buf->paddr, buf->size); + vb->index, &dma_buf, buf->size); + + for_each_sg(buf->sgt.sgl, sg, num_sgs, i) { + size_t bytes = min_t(size_t, size, dcmi->dma_max_burst); + + sg_dma_address(sg) = dma_buf; + sg_dma_len(sg) = bytes; + dma_buf += bytes; + size -= bytes; + } + + buf->prepared = true; + + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size); } return 0; @@ -1866,6 +1887,7 @@ static int dcmi_probe(struct platform_device *pdev) struct stm32_dcmi *dcmi; struct vb2_queue *q; struct dma_chan *chan; + struct dma_slave_caps caps; struct clk *mclk; int irq; int ret = 0; @@ -1953,6 +1975,11 @@ static int dcmi_probe(struct platform_device *pdev) return ret; } + dcmi->dma_max_burst = UINT_MAX; + ret = dma_get_slave_caps(chan, &caps); + if (!ret && caps.max_sg_burst) + dcmi->dma_max_burst = caps.max_sg_burst * DMA_SLAVE_BUSWIDTH_4_BYTES; + spin_lock_init(&dcmi->irqlock); mutex_init(&dcmi->lock); mutex_init(&dcmi->dma_lock);
Prior to submitting a transfer to the DMA, the client should first check the capabilities of the DMA channel in term of maximum of each segment. This is given by the max_sg_burst value reported by dma_get_slave_caps API. Based on that, if the transfer is larger than what can be handled by the DMA channel, we split the transfer into several scatterlist elements. Signed-off-by: Alain Volmat <alain.volmat@foss.st.com> --- drivers/media/platform/stm32/stm32-dcmi.c | 47 ++++++++++++++++++----- 1 file changed, 37 insertions(+), 10 deletions(-)