Message ID | 20201027063858.4877-10-jee.heng.sia@intel.com (mailing list archive) |
---|---|
State | Changes Requested |
Headers | show |
Series | dmaengine: dw-axi-dmac: support Intel KeemBay AxiDMA | expand |
On 27-10-20, 14:38, Sia Jee Heng wrote: > Add support for DMA_RESIDUE_GRANULARITY_BURST so that AxiDMA can report > DMA residue. > > Existing AxiDMA driver only support data transfer between > memory to memory operation, therefore reporting DMA residue > to the DMA clients is not supported. > > Reporting DMA residue to the DMA clients is important as DMA clients > shall invoke dmaengine_tx_status() to understand the number of bytes > been transferred so that the buffer pointer can be updated accordingly. > > Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> > Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com> > --- > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 44 ++++++++++++++++--- > drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 2 + > 2 files changed, 39 insertions(+), 7 deletions(-) > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > index 011cf7134f25..cd99557a716c 100644 > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > @@ -265,14 +265,36 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, > struct dma_tx_state *txstate) > { > struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); > - enum dma_status ret; > + struct virt_dma_desc *vdesc; > + enum dma_status status; > + u32 completed_length; > + unsigned long flags; > + u32 completed_blocks; > + size_t bytes = 0; > + u32 length; > + u32 len; > > - ret = dma_cookie_status(dchan, cookie, txstate); > + status = dma_cookie_status(dchan, cookie, txstate); txstate can be null, so please check that as well in the below condition and return if that is the case
> -----Original Message----- > From: Vinod Koul <vkoul@kernel.org> > Sent: 09 November 2020 5:51 PM > To: Sia, Jee Heng <jee.heng.sia@intel.com> > Cc: Eugeniy.Paltsev@synopsys.com; andriy.shevchenko@linux.intel.com; > dmaengine@vger.kernel.org; linux-kernel@vger.kernel.org > Subject: Re: [PATCH v2 09/15] dmaengine: dw-axi-dmac: Support burst residue > granularity > > On 27-10-20, 14:38, Sia Jee Heng wrote: > > Add support for DMA_RESIDUE_GRANULARITY_BURST so that AxiDMA can > > report DMA residue. > > > > Existing AxiDMA driver only support data transfer between memory to > > memory operation, therefore reporting DMA residue to the DMA clients > > is not supported. > > > > Reporting DMA residue to the DMA clients is important as DMA clients > > shall invoke dmaengine_tx_status() to understand the number of bytes > > been transferred so that the buffer pointer can be updated accordingly. > > > > Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> > > Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com> > > --- > > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 44 ++++++++++++++++--- > > drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 2 + > > 2 files changed, 39 insertions(+), 7 deletions(-) > > > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > index 011cf7134f25..cd99557a716c 100644 > > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > @@ -265,14 +265,36 @@ dma_chan_tx_status(struct dma_chan *dchan, > dma_cookie_t cookie, > > struct dma_tx_state *txstate) > > { > > struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); > > - enum dma_status ret; > > + struct virt_dma_desc *vdesc; > > + enum dma_status status; > > + u32 completed_length; > > + unsigned long flags; > > + u32 completed_blocks; > > + size_t bytes = 0; > > + u32 length; > > + u32 len; > > > > - ret = dma_cookie_status(dchan, cookie, txstate); > > + status = dma_cookie_status(dchan, cookie, txstate); > > txstate can be null, so please check that as well in the below condition and > return if that is the case [>>] noted. Will factor in the null condition check in v3 > > -- > ~Vinod
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 011cf7134f25..cd99557a716c 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -265,14 +265,36 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); - enum dma_status ret; + struct virt_dma_desc *vdesc; + enum dma_status status; + u32 completed_length; + unsigned long flags; + u32 completed_blocks; + size_t bytes = 0; + u32 length; + u32 len; - ret = dma_cookie_status(dchan, cookie, txstate); + status = dma_cookie_status(dchan, cookie, txstate); + if (status == DMA_COMPLETE) + return status; - if (chan->is_paused && ret == DMA_IN_PROGRESS) - ret = DMA_PAUSED; + spin_lock_irqsave(&chan->vc.lock, flags); - return ret; + vdesc = vchan_find_desc(&chan->vc, cookie); + if (vdesc) { + length = vd_to_axi_desc(vdesc)->length; + completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks; + len = vd_to_axi_desc(vdesc)->hw_desc[0].len; + completed_length = completed_blocks * len; + bytes = length - completed_length; + } else { + bytes = vd_to_axi_desc(vdesc)->length; + } + + spin_unlock_irqrestore(&chan->vc.lock, flags); + dma_set_residue(txstate, bytes); + + return status; } static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr) @@ -497,6 +519,7 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, desc->chan = chan; num = 0; + desc->length = 0; while (len) { xfer_len = len; @@ -549,7 +572,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, set_desc_src_master(hw_desc); set_desc_dest_master(hw_desc, desc); - + hw_desc->len = xfer_len; + desc->length += hw_desc->len; /* update the length and addresses for the next loop cycle */ len -= xfer_len; dst_adr += xfer_len; @@ -612,6 +636,7 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, chan->direction = direction; desc->chan = chan; chan->cyclic = true; + desc->length = 0; switch (direction) { case DMA_MEM_TO_DEV: @@ -677,6 +702,8 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, set_desc_src_master(hw_desc); + hw_desc->len = period_len; + desc->length += hw_desc->len; /* * Set end-of-link to the linked descriptor, so that cyclic * callback function can be triggered during interrupt. @@ -757,6 +784,7 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, } desc->chan = chan; + desc->length = 0; for_each_sg(sgl, sg, sg_len, i) { mem = sg_dma_address(sg); @@ -806,6 +834,8 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, hw_desc->lli->ctl_lo = cpu_to_le32(ctllo); set_desc_src_master(hw_desc); + hw_desc->len = len; + desc->length += hw_desc->len; } if (unlikely(!desc)) @@ -1269,7 +1299,7 @@ static int dw_probe(struct platform_device *pdev) dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS; dw->dma.directions = BIT(DMA_MEM_TO_MEM); dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); - dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; dw->dma.dev = chip->dev; dw->dma.device_tx_status = dma_chan_tx_status; diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index 651874e5c88f..bdb66d775125 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -88,6 +88,7 @@ struct __packed axi_dma_lli { struct axi_dma_hw_desc { struct axi_dma_lli *lli; dma_addr_t llp; + u32 len; }; struct axi_dma_desc { @@ -96,6 +97,7 @@ struct axi_dma_desc { struct virt_dma_desc vd; struct axi_dma_chan *chan; u32 completed_blocks; + u32 length; }; static inline struct device *dchan2dev(struct dma_chan *dchan)