Message ID | 1638459806-27600-8-git-send-email-srivasam@codeaurora.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Add support for audio on SC7280 based targets | expand |
On 02/12/2021 15:43, Srinivasa Rao Mandadapu wrote: > From: Srinivasa Rao Mandadapu <srivasam@codeaurora.org> > > Update regmap configuration for supporting headset playback and > capture and DMIC capture using codec dma interface > > Signed-off-by: Srinivasa Rao Mandadapu <srivasam@codeaurora.org> > Co-developed-by: Venkata Prasad Potturu <potturu@codeaurora.org> > Signed-off-by: Venkata Prasad Potturu <potturu@codeaurora.org> LGTM, Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> > --- > sound/soc/qcom/lpass-cpu.c | 185 +++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 185 insertions(+) > > diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c > index 81818f0..a5a46bc 100644 > --- a/sound/soc/qcom/lpass-cpu.c > +++ b/sound/soc/qcom/lpass-cpu.c > @@ -28,6 +28,8 @@ > #define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2) > #define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0) > #define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0) > +#define LPASS_REG_READ 1 > +#define LPASS_REG_WRITE 0 > > /* > * Channel maps for Quad channel playbacks on MI2S Secondary > @@ -798,6 +800,189 @@ static struct regmap_config lpass_hdmi_regmap_config = { > .cache_type = REGCACHE_FLAT, > }; > > +static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw) > +{ > + struct lpass_data *drvdata = dev_get_drvdata(dev); > + struct lpass_variant *v = drvdata->variant; > + int i; > + > + for (i = 0; i < v->rxtx_irq_ports; ++i) { > + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i)) > + return true; > + if (reg == LPAIF_RXTX_IRQEN_REG(v, i)) > + return true; > + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i)) > + return true; > + } > + > + for (i = 0; i < v->rxtx_rdma_channels; ++i) { > + if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + if (rw == LPASS_REG_READ) { > + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + } > + if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + } > + > + for (i = 0; i < v->rxtx_wrdma_channels; ++i) { > + if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start, > + LPASS_CDC_DMA_TX3)) > + return true; > + if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start, > + LPASS_CDC_DMA_TX3)) > + return true; > + if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start, > + LPASS_CDC_DMA_TX3)) > + return true; > + if (rw == LPASS_REG_READ) { > + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + } > + if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start, > + LPASS_CDC_DMA_TX3)) > + return true; > + if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start, > + LPASS_CDC_DMA_TX3)) > + return true; > + } > + return false; > +} > + > +static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg) > +{ > + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE); > +} > + > +static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg) > +{ > + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ); > +} > + > +static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg) > +{ > + struct lpass_data *drvdata = dev_get_drvdata(dev); > + struct lpass_variant *v = drvdata->variant; > + int i; > + > + for (i = 0; i < v->rxtx_irq_ports; ++i) { > + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i)) > + return true; > + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i)) > + return true; > + } > + > + for (i = 0; i < v->rxtx_rdma_channels; ++i) > + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) > + return true; > + > + for (i = 0; i < v->rxtx_wrdma_channels; ++i) > + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start, > + LPASS_CDC_DMA_TX3)) > + return true; > + > + return false; > +} > + > +static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw) > +{ > + struct lpass_data *drvdata = dev_get_drvdata(dev); > + struct lpass_variant *v = drvdata->variant; > + int i; > + > + for (i = 0; i < v->va_irq_ports; ++i) { > + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i)) > + return true; > + if (reg == LPAIF_VA_IRQEN_REG(v, i)) > + return true; > + if (reg == LPAIF_VA_IRQSTAT_REG(v, i)) > + return true; > + } > + > + for (i = 0; i < v->va_wrdma_channels; ++i) { > + if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start, > + LPASS_CDC_DMA_VA_TX0)) > + return true; > + if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start, > + LPASS_CDC_DMA_VA_TX0)) > + return true; > + if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start, > + LPASS_CDC_DMA_VA_TX0)) > + return true; > + if (rw == LPASS_REG_READ) { > + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start, > + LPASS_CDC_DMA_VA_TX0)) > + return true; > + } > + if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start, > + LPASS_CDC_DMA_VA_TX0)) > + return true; > + if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start, > + LPASS_CDC_DMA_VA_TX0)) > + return true; > + } > + return false; > +} > + > +static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg) > +{ > + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE); > +} > + > +static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg) > +{ > + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ); > +} > + > +static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg) > +{ > + struct lpass_data *drvdata = dev_get_drvdata(dev); > + struct lpass_variant *v = drvdata->variant; > + int i; > + > + for (i = 0; i < v->va_irq_ports; ++i) { > + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i)) > + return true; > + if (reg == LPAIF_VA_IRQSTAT_REG(v, i)) > + return true; > + } > + > + for (i = 0; i < v->va_wrdma_channels; ++i) { > + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start, > + LPASS_CDC_DMA_VA_TX0)) > + return true; > + } > + > + return false; > +} > + > +static struct regmap_config lpass_rxtx_regmap_config = { > + .reg_bits = 32, > + .reg_stride = 4, > + .val_bits = 32, > + .writeable_reg = lpass_rxtx_regmap_writeable, > + .readable_reg = lpass_rxtx_regmap_readable, > + .volatile_reg = lpass_rxtx_regmap_volatile, > + .cache_type = REGCACHE_FLAT, > +}; > + > +static struct regmap_config lpass_va_regmap_config = { > + .reg_bits = 32, > + .reg_stride = 4, > + .val_bits = 32, > + .writeable_reg = lpass_va_regmap_writeable, > + .readable_reg = lpass_va_regmap_readable, > + .volatile_reg = lpass_va_regmap_volatile, > + .cache_type = REGCACHE_FLAT, > +}; > + > static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev, > struct device_node *node, > const char *name) >
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index 81818f0..a5a46bc 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -28,6 +28,8 @@ #define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2) #define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0) #define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0) +#define LPASS_REG_READ 1 +#define LPASS_REG_WRITE 0 /* * Channel maps for Quad channel playbacks on MI2S Secondary @@ -798,6 +800,189 @@ static struct regmap_config lpass_hdmi_regmap_config = { .cache_type = REGCACHE_FLAT, }; +static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->rxtx_irq_ports; ++i) { + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_RXTX_IRQEN_REG(v, i)) + return true; + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->rxtx_rdma_channels; ++i) { + if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (rw == LPASS_REG_READ) { + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + } + if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + } + + for (i = 0; i < v->rxtx_wrdma_channels; ++i) { + if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (rw == LPASS_REG_READ) { + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + } + if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + } + return false; +} + +static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg) +{ + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE); +} + +static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg) +{ + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ); +} + +static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->rxtx_irq_ports; ++i) { + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->rxtx_rdma_channels; ++i) + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + + for (i = 0; i < v->rxtx_wrdma_channels; ++i) + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + + return false; +} + +static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->va_irq_ports; ++i) { + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_VA_IRQEN_REG(v, i)) + return true; + if (reg == LPAIF_VA_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->va_wrdma_channels; ++i) { + if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (rw == LPASS_REG_READ) { + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + } + if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + } + return false; +} + +static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg) +{ + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE); +} + +static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg) +{ + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ); +} + +static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->va_irq_ports; ++i) { + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_VA_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->va_wrdma_channels; ++i) { + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + } + + return false; +} + +static struct regmap_config lpass_rxtx_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .writeable_reg = lpass_rxtx_regmap_writeable, + .readable_reg = lpass_rxtx_regmap_readable, + .volatile_reg = lpass_rxtx_regmap_volatile, + .cache_type = REGCACHE_FLAT, +}; + +static struct regmap_config lpass_va_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .writeable_reg = lpass_va_regmap_writeable, + .readable_reg = lpass_va_regmap_readable, + .volatile_reg = lpass_va_regmap_volatile, + .cache_type = REGCACHE_FLAT, +}; + static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev, struct device_node *node, const char *name)