Message ID | 20201229044713.28464-14-jee.heng.sia@intel.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | dmaengine: dw-axi-dmac: support Intel KeemBay AxiDMA | expand |
Hi Sia Jee Heng, see my comments inlined: > From: Sia Jee Heng <jee.heng.sia@intel.com> > Sent: Tuesday, December 29, 2020 07:47 > To: vkoul@kernel.org; Eugeniy Paltsev; robh+dt@kernel.org > Cc: andriy.shevchenko@linux.intel.com; dmaengine@vger.kernel.org; linux-kernel@vger.kernel.org; devicetree@vger.kernel.org > Subject: [PATCH v8 13/16] dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA handshake > > Add support for Intel KeemBay AxiDMA device handshake programming. > Device handshake number passed in to the AxiDMA shall be written to > the Intel KeemBay AxiDMA hardware handshake registers before DMA > operations are started. > > Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> > Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com> > --- > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 52 +++++++++++++++++++ > 1 file changed, 52 insertions(+) > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > index 062d27c61983..5e77eb3d040f 100644 > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c [snip] > + > + return 0; > +} > + > /* > * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI > * as 1, it understands that the current block is the final block in the > @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, > llp = hw_desc->llp; > } while (num_periods); > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true)) > + goto err_desc_get; > + In this implementation 'dw_axi_dma_chan_prep_cyclic' callback will fail if we don't have APB registers which are only specific for KeemBay. Looking for the code of 'dw_axi_dma_chan_prep_cyclic' I don't see the reason why it shouldn't work for vanila DW AXI DMAC without this extension. So, could you please change this so we wouldn't reject dw_axi_dma_chan_prep_cyclic in case of APB registers are missed. > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > err_desc_get: > @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, > llp = hw_desc->llp; > } while (sg_len); > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true)) > + goto err_desc_get; > + Same here. > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > err_desc_get: > @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) > dev_warn(dchan2dev(dchan), > "%s failed to stop\n", axi_chan_name(chan)); > > + if (chan->direction != DMA_MEM_TO_MEM) > + dw_axi_dma_set_hw_channel(chan->chip, > + chan->hw_handshake_num, false); > + > spin_lock_irqsave(&chan->vc.lock, flags); > > vchan_get_all_descriptors(&chan->vc, &head); > -- > 2.18.0 >
> -----Original Message----- > From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> > Sent: 31 December 2020 8:44 PM > To: Sia, Jee Heng <jee.heng.sia@intel.com>; Vinod Koul > <vkoul@kernel.org> > Cc: andriy.shevchenko@linux.intel.com; dmaengine@vger.kernel.org; > linux-kernel@vger.kernel.org; devicetree@vger.kernel.org; > robh+dt@kernel.org > Subject: Re: [PATCH v8 13/16] dmaengine: dw-axi-dmac: Add Intel > KeemBay AxiDMA handshake > > Hi Sia Jee Heng, > > see my comments inlined: > > > From: Sia Jee Heng <jee.heng.sia@intel.com> > > Sent: Tuesday, December 29, 2020 07:47 > > To: vkoul@kernel.org; Eugeniy Paltsev; robh+dt@kernel.org > > Cc: andriy.shevchenko@linux.intel.com; > dmaengine@vger.kernel.org; > > linux-kernel@vger.kernel.org; devicetree@vger.kernel.org > > Subject: [PATCH v8 13/16] dmaengine: dw-axi-dmac: Add Intel > KeemBay > > AxiDMA handshake > > > > Add support for Intel KeemBay AxiDMA device handshake > programming. > > Device handshake number passed in to the AxiDMA shall be written > to > > the Intel KeemBay AxiDMA hardware handshake registers before > DMA > > operations are started. > > > > Reviewed-by: Andy Shevchenko > <andriy.shevchenko@linux.intel.com> > > Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com> > > --- > > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 52 > +++++++++++++++++++ > > 1 file changed, 52 insertions(+) > > > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > index 062d27c61983..5e77eb3d040f 100644 > > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > [snip] > > + > > + return 0; > > +} > > + > > /* > > * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of > the fetched LLI > > * as 1, it understands that the current block is the final block in > > the @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct > dma_chan *dchan, dma_addr_t dma_addr, > > llp = hw_desc->llp; > > } while (num_periods); > > > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan- > >hw_handshake_num, true)) > > + goto err_desc_get; > > + > > In this implementation 'dw_axi_dma_chan_prep_cyclic' callback will > fail if we don't have APB registers which are only specific for KeemBay. > Looking for the code of 'dw_axi_dma_chan_prep_cyclic' I don't see > the reason why it shouldn't work for vanila DW AXI DMAC without this > extension. So, could you please change this so we wouldn't reject > dw_axi_dma_chan_prep_cyclic in case of APB registers are missed. [>>] OK, I can change the code in such a way that dw_axi_dma_set_hw_channel() will be executed only if apb_reg is valid. > > > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > > > err_desc_get: > > @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct > dma_chan *dchan, struct scatterlist *sgl, > > llp = hw_desc->llp; > > } while (sg_len); > > > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan- > >hw_handshake_num, true)) > > + goto err_desc_get; > > + > > Same here. [>>] Sure, same method described above will be used. > > > > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > > > err_desc_get: > > @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct > dma_chan *dchan) > > dev_warn(dchan2dev(dchan), > > "%s failed to stop\n", axi_chan_name(chan)); > > > > + if (chan->direction != DMA_MEM_TO_MEM) > > + dw_axi_dma_set_hw_channel(chan->chip, > > + chan->hw_handshake_num, > > + false); > > + > > spin_lock_irqsave(&chan->vc.lock, flags); > > > > vchan_get_all_descriptors(&chan->vc, &head); > > -- > > 2.18.0 > >
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 062d27c61983..5e77eb3d040f 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -445,6 +445,48 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) pm_runtime_put(chan->chip->dev); } +static int dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip, + u32 handshake_num, bool set) +{ + unsigned long start = 0; + unsigned long reg_value; + unsigned long reg_mask; + unsigned long reg_set; + unsigned long mask; + unsigned long val; + + if (!chip->apb_regs) + return -ENODEV; + + /* + * An unused DMA channel has a default value of 0x3F. + * Lock the DMA channel by assign a handshake number to the channel. + * Unlock the DMA channel by assign 0x3F to the channel. + */ + if (set) { + reg_set = UNUSED_CHANNEL; + val = handshake_num; + } else { + reg_set = handshake_num; + val = UNUSED_CHANNEL; + } + + reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0); + + for_each_set_clump8(start, reg_mask, ®_value, 64) { + if (reg_mask == reg_set) { + mask = GENMASK_ULL(start + 7, start); + reg_value &= ~mask; + reg_value |= rol64(val, start); + lo_hi_writeq(reg_value, + chip->apb_regs + DMAC_APB_HW_HS_SEL_0); + break; + } + } + + return 0; +} + /* * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI * as 1, it understands that the current block is the final block in the @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, llp = hw_desc->llp; } while (num_periods); + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true)) + goto err_desc_get; + return vchan_tx_prep(&chan->vc, &desc->vd, flags); err_desc_get: @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, llp = hw_desc->llp; } while (sg_len); + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true)) + goto err_desc_get; + return vchan_tx_prep(&chan->vc, &desc->vd, flags); err_desc_get: @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) dev_warn(dchan2dev(dchan), "%s failed to stop\n", axi_chan_name(chan)); + if (chan->direction != DMA_MEM_TO_MEM) + dw_axi_dma_set_hw_channel(chan->chip, + chan->hw_handshake_num, false); + spin_lock_irqsave(&chan->vc.lock, flags); vchan_get_all_descriptors(&chan->vc, &head);