Message ID | 20201117022215.2461-14-jee.heng.sia@intel.com (mailing list archive) |
---|---|
State | Changes Requested |
Headers | show |
Series | dmaengine: dw-axi-dmac: support Intel KeemBay AxiDMA | expand |
Hi Sia, > Subject: [PATCH v4 13/15] dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA handshake > > Add support for Intel KeemBay AxiDMA device handshake programming. > Device handshake number passed in to the AxiDMA shall be written to > the Intel KeemBay AxiDMA hardware handshake registers before DMA > operations are started. > > Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> > Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com> > --- > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 52 +++++++++++++++++++ > 1 file changed, 52 insertions(+) > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > index c2ffc5d44b6e..d44a5c9eb9c1 100644 > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > @@ -445,6 +445,48 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) > pm_runtime_put(chan->chip->dev); > } > > +static int dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip, u32 hs_number, > + bool set) > +{ > + unsigned long start = 0; > + unsigned long reg_value; > + unsigned long reg_mask; > + unsigned long reg_set; > + unsigned long mask; > + unsigned long val; > + > + if (!chip->apb_regs) > + return -ENODEV; In some places you check for this region existence using if (IS_ERR(chip->regs)) and in other places you use if (!chip->apb_regs) I guess it isn't correct. NOTE that this comment valid for other patches as well. > + > + /* > + * An unused DMA channel has a default value of 0x3F. > + * Lock the DMA channel by assign a handshake number to the channel. > + * Unlock the DMA channel by assign 0x3F to the channel. > + */ > + if (set) { > + reg_set = UNUSED_CHANNEL; > + val = hs_number; > + } else { > + reg_set = hs_number; > + val = UNUSED_CHANNEL; > + } > + > + reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0); > + > + for_each_set_clump8(start, reg_mask, ®_value, 64) { > + if (reg_mask == reg_set) { > + mask = GENMASK_ULL(start + 7, start); > + reg_value &= ~mask; > + reg_value |= rol64(val, start); > + lo_hi_writeq(reg_value, > + chip->apb_regs + DMAC_APB_HW_HS_SEL_0); > + break; > + } > + } > + > + return 0; > +} > + > /* > * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI > * as 1, it understands that the current block is the final block in the > @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, > llp = hw_desc->llp; > } while (num_periods); > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > + goto err_desc_get; > + > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > err_desc_get: > @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, > llp = hw_desc->llp; > } while (sg_len); > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > + goto err_desc_get; > + > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > err_desc_get: > @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) > dev_warn(dchan2dev(dchan), > "%s failed to stop\n", axi_chan_name(chan)); > > + if (chan->direction != DMA_MEM_TO_MEM) > + dw_axi_dma_set_hw_channel(chan->chip, > + chan->hw_hs_num, false); > + > spin_lock_irqsave(&chan->vc.lock, flags); > > vchan_get_all_descriptors(&chan->vc, &head); > -- > 2.18.0 >
> -----Original Message----- > From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> > Sent: 19 November 2020 7:59 AM > To: Sia, Jee Heng <jee.heng.sia@intel.com> > Cc: andriy.shevchenko@linux.intel.com; dmaengine@vger.kernel.org; linux- > kernel@vger.kernel.org; devicetree@vger.kernel.org > Subject: Re: [PATCH v4 13/15] dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA > handshake > > Hi Sia, > > > Subject: [PATCH v4 13/15] dmaengine: dw-axi-dmac: Add Intel KeemBay > > AxiDMA handshake > > > > Add support for Intel KeemBay AxiDMA device handshake programming. > > Device handshake number passed in to the AxiDMA shall be written to > > the Intel KeemBay AxiDMA hardware handshake registers before DMA > > operations are started. > > > > Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> > > Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com> > > --- > > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 52 +++++++++++++++++++ > > 1 file changed, 52 insertions(+) > > > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > index c2ffc5d44b6e..d44a5c9eb9c1 100644 > > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > @@ -445,6 +445,48 @@ static void dma_chan_free_chan_resources(struct > dma_chan *dchan) > > pm_runtime_put(chan->chip->dev); } > > > > +static int dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip, u32 > hs_number, > > + bool set) { > > + unsigned long start = 0; > > + unsigned long reg_value; > > + unsigned long reg_mask; > > + unsigned long reg_set; > > + unsigned long mask; > > + unsigned long val; > > + > > + if (!chip->apb_regs) > > + return -ENODEV; > > In some places you check for this region existence using if (IS_ERR(chip->regs)) and > in other places you use if (!chip->apb_regs) > > I guess it isn't correct. NOTE that this comment valid for other patches as well. [>>] Thanks for the invaluable comment, will make sure the consistency in the code. > > > + > > + /* > > + * An unused DMA channel has a default value of 0x3F. > > + * Lock the DMA channel by assign a handshake number to the channel. > > + * Unlock the DMA channel by assign 0x3F to the channel. > > + */ > > + if (set) { > > + reg_set = UNUSED_CHANNEL; > > + val = hs_number; > > + } else { > > + reg_set = hs_number; > > + val = UNUSED_CHANNEL; > > + } > > + > > + reg_value = lo_hi_readq(chip->apb_regs + > > + DMAC_APB_HW_HS_SEL_0); > > + > > + for_each_set_clump8(start, reg_mask, ®_value, 64) { > > + if (reg_mask == reg_set) { > > + mask = GENMASK_ULL(start + 7, start); > > + reg_value &= ~mask; > > + reg_value |= rol64(val, start); > > + lo_hi_writeq(reg_value, > > + chip->apb_regs + DMAC_APB_HW_HS_SEL_0); > > + break; > > + } > > + } > > + > > + return 0; > > +} > > + > > /* > > * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI > > * as 1, it understands that the current block is the final block in > > the @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan > *dchan, dma_addr_t dma_addr, > > llp = hw_desc->llp; > > } while (num_periods); > > > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > > + goto err_desc_get; > > + > > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > > > err_desc_get: > > @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan > *dchan, struct scatterlist *sgl, > > llp = hw_desc->llp; > > } while (sg_len); > > > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > > + goto err_desc_get; > > + > > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > > > err_desc_get: > > @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct dma_chan > *dchan) > > dev_warn(dchan2dev(dchan), > > "%s failed to stop\n", axi_chan_name(chan)); > > > > + if (chan->direction != DMA_MEM_TO_MEM) > > + dw_axi_dma_set_hw_channel(chan->chip, > > + chan->hw_hs_num, false); > > + > > spin_lock_irqsave(&chan->vc.lock, flags); > > > > vchan_get_all_descriptors(&chan->vc, &head); > > -- > > 2.18.0 > >
Hi Eugeniy, With regards to the below comment > > In some places you check for this region existence using if > > (IS_ERR(chip->regs)) and in other places you use if (!chip->apb_regs) The main reason of using IS_ERR() is because of the ioremap() function return an error value if the mapping failed. And now with your suggestion to add conditional checking to the compatible property, the chip->apb will remain NULL on non Intel KeemBay SoC. Therefore, the "if (!chip->apb_regs)" condition will be used instead. Please let me know if you have other concern. > -----Original Message----- > From: Sia, Jee Heng > Sent: 20 November 2020 8:47 AM > To: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> > Cc: andriy.shevchenko@linux.intel.com; dmaengine@vger.kernel.org; linux- > kernel@vger.kernel.org; devicetree@vger.kernel.org > Subject: RE: [PATCH v4 13/15] dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA > handshake > > > > > -----Original Message----- > > From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> > > Sent: 19 November 2020 7:59 AM > > To: Sia, Jee Heng <jee.heng.sia@intel.com> > > Cc: andriy.shevchenko@linux.intel.com; dmaengine@vger.kernel.org; > > linux- kernel@vger.kernel.org; devicetree@vger.kernel.org > > Subject: Re: [PATCH v4 13/15] dmaengine: dw-axi-dmac: Add Intel > > KeemBay AxiDMA handshake > > > > Hi Sia, > > > > > Subject: [PATCH v4 13/15] dmaengine: dw-axi-dmac: Add Intel KeemBay > > > AxiDMA handshake > > > > > > Add support for Intel KeemBay AxiDMA device handshake programming. > > > Device handshake number passed in to the AxiDMA shall be written to > > > the Intel KeemBay AxiDMA hardware handshake registers before DMA > > > operations are started. > > > > > > Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> > > > Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com> > > > --- > > > .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 52 +++++++++++++++++++ > > > 1 file changed, 52 insertions(+) > > > > > > diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > > b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > > index c2ffc5d44b6e..d44a5c9eb9c1 100644 > > > --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > > +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c > > > @@ -445,6 +445,48 @@ static void dma_chan_free_chan_resources(struct > > dma_chan *dchan) > > > pm_runtime_put(chan->chip->dev); } > > > > > > +static int dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip, u32 > > hs_number, > > > + bool set) { > > > + unsigned long start = 0; > > > + unsigned long reg_value; > > > + unsigned long reg_mask; > > > + unsigned long reg_set; > > > + unsigned long mask; > > > + unsigned long val; > > > + > > > + if (!chip->apb_regs) > > > + return -ENODEV; > > > > In some places you check for this region existence using if > > (IS_ERR(chip->regs)) and in other places you use if (!chip->apb_regs) > > > > I guess it isn't correct. NOTE that this comment valid for other patches as well. > [>>] Thanks for the invaluable comment, will make sure the consistency in the code. > > > > > + > > > + /* > > > + * An unused DMA channel has a default value of 0x3F. > > > + * Lock the DMA channel by assign a handshake number to the channel. > > > + * Unlock the DMA channel by assign 0x3F to the channel. > > > + */ > > > + if (set) { > > > + reg_set = UNUSED_CHANNEL; > > > + val = hs_number; > > > + } else { > > > + reg_set = hs_number; > > > + val = UNUSED_CHANNEL; > > > + } > > > + > > > + reg_value = lo_hi_readq(chip->apb_regs + > > > + DMAC_APB_HW_HS_SEL_0); > > > + > > > + for_each_set_clump8(start, reg_mask, ®_value, 64) { > > > + if (reg_mask == reg_set) { > > > + mask = GENMASK_ULL(start + 7, start); > > > + reg_value &= ~mask; > > > + reg_value |= rol64(val, start); > > > + lo_hi_writeq(reg_value, > > > + chip->apb_regs + DMAC_APB_HW_HS_SEL_0); > > > + break; > > > + } > > > + } > > > + > > > + return 0; > > > +} > > > + > > > /* > > > * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI > > > * as 1, it understands that the current block is the final block > > > in the @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct > > > dma_chan > > *dchan, dma_addr_t dma_addr, > > > llp = hw_desc->llp; > > > } while (num_periods); > > > > > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > > > + goto err_desc_get; > > > + > > > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > > > > > err_desc_get: > > > @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan > > *dchan, struct scatterlist *sgl, > > > llp = hw_desc->llp; > > > } while (sg_len); > > > > > > + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) > > > + goto err_desc_get; > > > + > > > return vchan_tx_prep(&chan->vc, &desc->vd, flags); > > > > > > err_desc_get: > > > @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct > > > dma_chan > > *dchan) > > > dev_warn(dchan2dev(dchan), > > > "%s failed to stop\n", > > > axi_chan_name(chan)); > > > > > > + if (chan->direction != DMA_MEM_TO_MEM) > > > + dw_axi_dma_set_hw_channel(chan->chip, > > > + chan->hw_hs_num, false); > > > + > > > spin_lock_irqsave(&chan->vc.lock, flags); > > > > > > vchan_get_all_descriptors(&chan->vc, &head); > > > -- > > > 2.18.0 > > >
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index c2ffc5d44b6e..d44a5c9eb9c1 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -445,6 +445,48 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) pm_runtime_put(chan->chip->dev); } +static int dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip, u32 hs_number, + bool set) +{ + unsigned long start = 0; + unsigned long reg_value; + unsigned long reg_mask; + unsigned long reg_set; + unsigned long mask; + unsigned long val; + + if (!chip->apb_regs) + return -ENODEV; + + /* + * An unused DMA channel has a default value of 0x3F. + * Lock the DMA channel by assign a handshake number to the channel. + * Unlock the DMA channel by assign 0x3F to the channel. + */ + if (set) { + reg_set = UNUSED_CHANNEL; + val = hs_number; + } else { + reg_set = hs_number; + val = UNUSED_CHANNEL; + } + + reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0); + + for_each_set_clump8(start, reg_mask, ®_value, 64) { + if (reg_mask == reg_set) { + mask = GENMASK_ULL(start + 7, start); + reg_value &= ~mask; + reg_value |= rol64(val, start); + lo_hi_writeq(reg_value, + chip->apb_regs + DMAC_APB_HW_HS_SEL_0); + break; + } + } + + return 0; +} + /* * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI * as 1, it understands that the current block is the final block in the @@ -626,6 +668,9 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, llp = hw_desc->llp; } while (num_periods); + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) + goto err_desc_get; + return vchan_tx_prep(&chan->vc, &desc->vd, flags); err_desc_get: @@ -684,6 +729,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, llp = hw_desc->llp; } while (sg_len); + if (dw_axi_dma_set_hw_channel(chan->chip, chan->hw_hs_num, true)) + goto err_desc_get; + return vchan_tx_prep(&chan->vc, &desc->vd, flags); err_desc_get: @@ -959,6 +1007,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) dev_warn(dchan2dev(dchan), "%s failed to stop\n", axi_chan_name(chan)); + if (chan->direction != DMA_MEM_TO_MEM) + dw_axi_dma_set_hw_channel(chan->chip, + chan->hw_hs_num, false); + spin_lock_irqsave(&chan->vc.lock, flags); vchan_get_all_descriptors(&chan->vc, &head);