Message ID | 1595063974-24228-4-git-send-email-amittomer25@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add MMC and DMA support for Actions S700 | expand |
Hi Amit, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on robh/for-next] [also build test WARNING on clk/clk-next pza/reset/next linus/master v5.8-rc5 next-20200717] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Amit-Singh-Tomar/Add-MMC-and-DMA-support-for-Actions-S700/20200718-172310 base: https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next config: arm64-randconfig-r014-20200717 (attached as .config) compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project ed6b578040a85977026c93bf4188f996148f3218) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install arm64 cross compiling tool for clang build # apt-get install binutils-aarch64-linux-gnu # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All warnings (new ones prefixed by >>): >> drivers/dma/owl-dma.c:1117:14: warning: cast to smaller integer type 'enum owl_dma_id' from 'const void *' [-Wvoid-pointer-to-enum-cast] od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1 warning generated. vim +1117 drivers/dma/owl-dma.c 1087 1088 static int owl_dma_probe(struct platform_device *pdev) 1089 { 1090 struct device_node *np = pdev->dev.of_node; 1091 struct owl_dma *od; 1092 int ret, i, nr_channels, nr_requests; 1093 1094 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 1095 if (!od) 1096 return -ENOMEM; 1097 1098 od->base = devm_platform_ioremap_resource(pdev, 0); 1099 if (IS_ERR(od->base)) 1100 return PTR_ERR(od->base); 1101 1102 ret = of_property_read_u32(np, "dma-channels", &nr_channels); 1103 if (ret) { 1104 dev_err(&pdev->dev, "can't get dma-channels\n"); 1105 return ret; 1106 } 1107 1108 ret = of_property_read_u32(np, "dma-requests", &nr_requests); 1109 if (ret) { 1110 dev_err(&pdev->dev, "can't get dma-requests\n"); 1111 return ret; 1112 } 1113 1114 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", 1115 nr_channels, nr_requests); 1116 > 1117 od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev); 1118 1119 od->nr_pchans = nr_channels; 1120 od->nr_vchans = nr_requests; 1121 1122 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 1123 1124 platform_set_drvdata(pdev, od); 1125 spin_lock_init(&od->lock); 1126 1127 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); 1128 dma_cap_set(DMA_SLAVE, od->dma.cap_mask); 1129 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); 1130 1131 od->dma.dev = &pdev->dev; 1132 od->dma.device_free_chan_resources = owl_dma_free_chan_resources; 1133 od->dma.device_tx_status = owl_dma_tx_status; 1134 od->dma.device_issue_pending = owl_dma_issue_pending; 1135 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; 1136 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; 1137 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; 1138 od->dma.device_config = owl_dma_config; 1139 od->dma.device_pause = owl_dma_pause; 1140 od->dma.device_resume = owl_dma_resume; 1141 od->dma.device_terminate_all = owl_dma_terminate_all; 1142 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1143 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1144 od->dma.directions = BIT(DMA_MEM_TO_MEM); 1145 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1146 1147 INIT_LIST_HEAD(&od->dma.channels); 1148 1149 od->clk = devm_clk_get(&pdev->dev, NULL); 1150 if (IS_ERR(od->clk)) { 1151 dev_err(&pdev->dev, "unable to get clock\n"); 1152 return PTR_ERR(od->clk); 1153 } 1154 1155 /* 1156 * Eventhough the DMA controller is capable of generating 4 1157 * IRQ's for DMA priority feature, we only use 1 IRQ for 1158 * simplification. 1159 */ 1160 od->irq = platform_get_irq(pdev, 0); 1161 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, 1162 dev_name(&pdev->dev), od); 1163 if (ret) { 1164 dev_err(&pdev->dev, "unable to request IRQ\n"); 1165 return ret; 1166 } 1167 1168 /* Init physical channel */ 1169 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, 1170 sizeof(struct owl_dma_pchan), GFP_KERNEL); 1171 if (!od->pchans) 1172 return -ENOMEM; 1173 1174 for (i = 0; i < od->nr_pchans; i++) { 1175 struct owl_dma_pchan *pchan = &od->pchans[i]; 1176 1177 pchan->id = i; 1178 pchan->base = od->base + OWL_DMA_CHAN_BASE(i); 1179 } 1180 1181 /* Init virtual channel */ 1182 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, 1183 sizeof(struct owl_dma_vchan), GFP_KERNEL); 1184 if (!od->vchans) 1185 return -ENOMEM; 1186 1187 for (i = 0; i < od->nr_vchans; i++) { 1188 struct owl_dma_vchan *vchan = &od->vchans[i]; 1189 1190 vchan->vc.desc_free = owl_dma_desc_free; 1191 vchan_init(&vchan->vc, &od->dma); 1192 } 1193 1194 /* Create a pool of consistent memory blocks for hardware descriptors */ 1195 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, 1196 sizeof(struct owl_dma_lli), 1197 __alignof__(struct owl_dma_lli), 1198 0); 1199 if (!od->lli_pool) { 1200 dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); 1201 return -ENOMEM; 1202 } 1203 1204 clk_prepare_enable(od->clk); 1205 1206 ret = dma_async_device_register(&od->dma); 1207 if (ret) { 1208 dev_err(&pdev->dev, "failed to register DMA engine device\n"); 1209 goto err_pool_free; 1210 } 1211 1212 /* Device-tree DMA controller registration */ 1213 ret = of_dma_controller_register(pdev->dev.of_node, 1214 owl_dma_of_xlate, od); 1215 if (ret) { 1216 dev_err(&pdev->dev, "of_dma_controller_register failed\n"); 1217 goto err_dma_unregister; 1218 } 1219 1220 return 0; 1221 1222 err_dma_unregister: 1223 dma_async_device_unregister(&od->dma); 1224 err_pool_free: 1225 clk_disable_unprepare(od->clk); 1226 dma_pool_destroy(od->lli_pool); 1227 1228 return ret; 1229 } 1230 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 948d1bead860..331c8d8b10a3 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -149,6 +149,11 @@ enum owl_dmadesc_offsets { OWL_DMADESC_SIZE }; +enum owl_dma_id { + S900_DMA, + S700_DMA, +}; + /** * struct owl_dma_lli - Link list for dma transfer * @hw: hardware link list @@ -213,6 +218,7 @@ struct owl_dma_vchan { * @pchans: array of data for the physical channels * @nr_vchans: the number of physical channels * @vchans: array of data for the physical channels + * @devid: device id based on OWL SoC */ struct owl_dma { struct dma_device dma; @@ -227,6 +233,7 @@ struct owl_dma { unsigned int nr_vchans; struct owl_dma_vchan *vchans; + enum owl_dma_id devid; }; static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, @@ -316,6 +323,10 @@ static inline u32 llc_hw_ctrlb(u32 int_ctl) { u32 ctl; + /* + * Irrespective of the SoC, ctrlb value starts filling from + * bit 18. + */ ctl = BIT_FIELD(int_ctl, 7, 0, 18); return ctl; @@ -372,6 +383,7 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, struct dma_slave_config *sconfig, bool is_cyclic) { + struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); u32 mode, ctrlb; mode = OWL_DMA_MODE_PW(0); @@ -427,14 +439,26 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, lli->hw[OWL_DMADESC_DADDR] = dst; lli->hw[OWL_DMADESC_SRC_STRIDE] = 0; lli->hw[OWL_DMADESC_DST_STRIDE] = 0; - /* - * Word starts from offset 0xC is shared between frame length - * (max frame length is 1MB) and frame count, where first 20 - * bits are for frame length and rest of 12 bits are for frame - * count. - */ - lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; - lli->hw[OWL_DMADESC_CTRLB] = ctrlb; + + if (od->devid == S700_DMA) { + /* Max frame length is 1MB */ + lli->hw[OWL_DMADESC_FLEN] = len; + /* + * On S700, word starts from offset 0x1C is shared between + * frame count and ctrlb, where first 12 bits are for frame + * count and rest of 20 bits are for ctrlb. + */ + lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb; + } else { + /* + * On S900, word starts from offset 0xC is shared between + * frame length (max frame length is 1MB) and frame count, + * where first 20 bits are for frame length and rest of + * 12 bits are for frame count. + */ + lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; + lli->hw[OWL_DMADESC_CTRLB] = ctrlb; + } return 0; } @@ -596,7 +620,7 @@ static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); - if (chan_irq_pending && !(global_irq_pending & BIT(i))) { + if (chan_irq_pending && !(global_irq_pending & BIT(i))) { dev_dbg(od->dma.dev, "global and channel IRQ pending match err\n"); @@ -1054,6 +1078,13 @@ static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, return chan; } +static const struct of_device_id owl_dma_match[] = { + { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,}, + { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,}, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, owl_dma_match); + static int owl_dma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1083,6 +1114,8 @@ static int owl_dma_probe(struct platform_device *pdev) dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", nr_channels, nr_requests); + od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev); + od->nr_pchans = nr_channels; od->nr_vchans = nr_requests; @@ -1215,12 +1248,6 @@ static int owl_dma_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id owl_dma_match[] = { - { .compatible = "actions,s900-dma", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, owl_dma_match); - static struct platform_driver owl_dma_driver = { .probe = owl_dma_probe, .remove = owl_dma_remove,
DMA controller present on S700 SoC is compatible with the one on S900 (as most of registers are same), but it has different DMA descriptor structure where registers "fcnt" and "ctrlb" uses different encoding. For instance, on S900 "fcnt" starts at offset 0x0c and uses upper 12 bits whereas on S700, it starts at offset 0x1c and uses lower 12 bits. This commit adds support for DMA controller present on S700. Signed-off-by: Amit Singh Tomar <amittomer25@gmail.com> --- Changes since v5: * No change. Changes since v4: * Reordered it from 02/10 to 03/10. * Used of_device_get_match_data() instead of of_match_device(). * Removed the uintptr_t used for typecast. Changes since v3: * Added description for enum fields. * Restored the old comment. * Added detailed comment about, the way FLEN and FCNT values are filled. Changes since v2: * No change. Changes since v1: * Defined macro for frame count value. * Introduced llc_hw_flen() from patch 2/9. * Removed the unnecessary line break. Changes since rfc: * No change. --- drivers/dma/owl-dma.c | 57 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 15 deletions(-)