diff mbox series

[v3,02/10] dmaengine: Actions: Add support for S700 DMA engine

Message ID 1591119192-18538-3-git-send-email-amittomer25@gmail.com (mailing list archive)
State New, archived
Headers show
Series Add MMC and DMA support for Actions S700 | expand

Commit Message

Amit Tomer June 2, 2020, 5:33 p.m. UTC
DMA controller present on S700 SoC is compatible with the one on S900
(as most of registers are same), but it has different DMA descriptor
structure where registers "fcnt" and "ctrlb" uses different encoding.

For instance, on S900 "fcnt" starts at offset 0x0c and uses upper 12
bits whereas on S700, it starts at offset 0x1c and uses lower 12 bits.

This commit adds support for DMA controller present on S700.

Signed-off-by: Amit Singh Tomar <amittomer25@gmail.com>
---
Changes since v2:
	* No changes.
Changes since v1:
        * Moved llc_hw_flen() to patch 1/9.
        * provided comments about dma descriptor difference.
          between S700 and S900.
Changes since RFC:
        * Added accessor function to get the frame lenght.
        * Removed the SoC specific check in IRQ routine.
---
 drivers/dma/owl-dma.c | 46 +++++++++++++++++++++++++++++++++++++---------
 1 file changed, 37 insertions(+), 9 deletions(-)

Comments

kernel test robot June 6, 2020, 3:17 a.m. UTC | #1
Hi Amit,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on robh/for-next]
[also build test WARNING on clk/clk-next pza/reset/next linus/master v5.7 next-20200605]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Amit-Singh-Tomar/Add-MMC-and-DMA-support-for-Actions-S700/20200603-013935
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
config: arm64-randconfig-r024-20200605 (attached as .config)
compiler: clang version 11.0.0 (https://github.com/llvm/llvm-project 6dd738e2f0609f7d3313b574a1d471263d2d3ba1)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm64 cross compiling tool for clang build
        # apt-get install binutils-aarch64-linux-gnu
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>, old ones prefixed by <<):

>> drivers/dma/owl-dma.c:1102:14: warning: cast to smaller integer type 'enum owl_dma_id' from 'const void *' [-Wvoid-pointer-to-enum-cast]
od->devid = (enum owl_dma_id)of_id->data;
^~~~~~~~~~~~~~~~~~~~~~~~~~~~
1 warning generated.

vim +1102 drivers/dma/owl-dma.c

  1070	
  1071	static int owl_dma_probe(struct platform_device *pdev)
  1072	{
  1073		struct device_node *np = pdev->dev.of_node;
  1074		struct owl_dma *od;
  1075		int ret, i, nr_channels, nr_requests;
  1076		const struct of_device_id *of_id =
  1077					of_match_device(owl_dma_match, &pdev->dev);
  1078	
  1079		od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
  1080		if (!od)
  1081			return -ENOMEM;
  1082	
  1083		od->base = devm_platform_ioremap_resource(pdev, 0);
  1084		if (IS_ERR(od->base))
  1085			return PTR_ERR(od->base);
  1086	
  1087		ret = of_property_read_u32(np, "dma-channels", &nr_channels);
  1088		if (ret) {
  1089			dev_err(&pdev->dev, "can't get dma-channels\n");
  1090			return ret;
  1091		}
  1092	
  1093		ret = of_property_read_u32(np, "dma-requests", &nr_requests);
  1094		if (ret) {
  1095			dev_err(&pdev->dev, "can't get dma-requests\n");
  1096			return ret;
  1097		}
  1098	
  1099		dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
  1100			 nr_channels, nr_requests);
  1101	
> 1102		od->devid = (enum owl_dma_id)of_id->data;
  1103	
  1104		od->nr_pchans = nr_channels;
  1105		od->nr_vchans = nr_requests;
  1106	
  1107		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  1108	
  1109		platform_set_drvdata(pdev, od);
  1110		spin_lock_init(&od->lock);
  1111	
  1112		dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
  1113		dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
  1114		dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
  1115	
  1116		od->dma.dev = &pdev->dev;
  1117		od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
  1118		od->dma.device_tx_status = owl_dma_tx_status;
  1119		od->dma.device_issue_pending = owl_dma_issue_pending;
  1120		od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
  1121		od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
  1122		od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
  1123		od->dma.device_config = owl_dma_config;
  1124		od->dma.device_pause = owl_dma_pause;
  1125		od->dma.device_resume = owl_dma_resume;
  1126		od->dma.device_terminate_all = owl_dma_terminate_all;
  1127		od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1128		od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1129		od->dma.directions = BIT(DMA_MEM_TO_MEM);
  1130		od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1131	
  1132		INIT_LIST_HEAD(&od->dma.channels);
  1133	
  1134		od->clk = devm_clk_get(&pdev->dev, NULL);
  1135		if (IS_ERR(od->clk)) {
  1136			dev_err(&pdev->dev, "unable to get clock\n");
  1137			return PTR_ERR(od->clk);
  1138		}
  1139	
  1140		/*
  1141		 * Eventhough the DMA controller is capable of generating 4
  1142		 * IRQ's for DMA priority feature, we only use 1 IRQ for
  1143		 * simplification.
  1144		 */
  1145		od->irq = platform_get_irq(pdev, 0);
  1146		ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
  1147				       dev_name(&pdev->dev), od);
  1148		if (ret) {
  1149			dev_err(&pdev->dev, "unable to request IRQ\n");
  1150			return ret;
  1151		}
  1152	
  1153		/* Init physical channel */
  1154		od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
  1155					  sizeof(struct owl_dma_pchan), GFP_KERNEL);
  1156		if (!od->pchans)
  1157			return -ENOMEM;
  1158	
  1159		for (i = 0; i < od->nr_pchans; i++) {
  1160			struct owl_dma_pchan *pchan = &od->pchans[i];
  1161	
  1162			pchan->id = i;
  1163			pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
  1164		}
  1165	
  1166		/* Init virtual channel */
  1167		od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
  1168					  sizeof(struct owl_dma_vchan), GFP_KERNEL);
  1169		if (!od->vchans)
  1170			return -ENOMEM;
  1171	
  1172		for (i = 0; i < od->nr_vchans; i++) {
  1173			struct owl_dma_vchan *vchan = &od->vchans[i];
  1174	
  1175			vchan->vc.desc_free = owl_dma_desc_free;
  1176			vchan_init(&vchan->vc, &od->dma);
  1177		}
  1178	
  1179		/* Create a pool of consistent memory blocks for hardware descriptors */
  1180		od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
  1181					       sizeof(struct owl_dma_lli),
  1182					       __alignof__(struct owl_dma_lli),
  1183					       0);
  1184		if (!od->lli_pool) {
  1185			dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
  1186			return -ENOMEM;
  1187		}
  1188	
  1189		clk_prepare_enable(od->clk);
  1190	
  1191		ret = dma_async_device_register(&od->dma);
  1192		if (ret) {
  1193			dev_err(&pdev->dev, "failed to register DMA engine device\n");
  1194			goto err_pool_free;
  1195		}
  1196	
  1197		/* Device-tree DMA controller registration */
  1198		ret = of_dma_controller_register(pdev->dev.of_node,
  1199						 owl_dma_of_xlate, od);
  1200		if (ret) {
  1201			dev_err(&pdev->dev, "of_dma_controller_register failed\n");
  1202			goto err_dma_unregister;
  1203		}
  1204	
  1205		return 0;
  1206	
  1207	err_dma_unregister:
  1208		dma_async_device_unregister(&od->dma);
  1209	err_pool_free:
  1210		clk_disable_unprepare(od->clk);
  1211		dma_pool_destroy(od->lli_pool);
  1212	
  1213		return ret;
  1214	}
  1215	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index dd85c205454e..17d2fc2d568b 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -137,6 +137,11 @@  enum owl_dmadesc_offsets {
 	OWL_DMADESC_SIZE
 };
 
+enum owl_dma_id {
+	S900_DMA,
+	S700_DMA,
+};
+
 /**
  * struct owl_dma_lli - Link list for dma transfer
  * @hw: hardware link list
@@ -203,6 +208,7 @@  struct owl_dma_vchan {
  * @pchans: array of data for the physical channels
  * @nr_vchans: the number of physical channels
  * @vchans: array of data for the physical channels
+ * @devid: device id based on OWL SoC
  */
 struct owl_dma {
 	struct dma_device	dma;
@@ -217,6 +223,7 @@  struct owl_dma {
 
 	unsigned int		nr_vchans;
 	struct owl_dma_vchan	*vchans;
+	enum owl_dma_id		devid;
 };
 
 static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
@@ -306,6 +313,11 @@  static inline u32 llc_hw_ctrlb(u32 int_ctl)
 {
 	u32 ctl;
 
+	/*
+	 * Irrespective of the SoC, ctrlb value starts filling from
+	 * bit 18.
+	 */
+
 	ctl = BIT_FIELD(int_ctl, 7, 0, 18);
 
 	return ctl;
@@ -362,6 +374,7 @@  static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
 				  struct dma_slave_config *sconfig,
 				  bool is_cyclic)
 {
+	struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
 	u32 mode, ctrlb;
 
 	mode = OWL_DMA_MODE_PW(0);
@@ -417,8 +430,18 @@  static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
 	lli->hw[OWL_DMADESC_DADDR] = dst;
 	lli->hw[OWL_DMADESC_SRC_STRIDE] = 0;
 	lli->hw[OWL_DMADESC_DST_STRIDE] = 0;
-	lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20;
-	lli->hw[OWL_DMADESC_CTRLB] = ctrlb;
+
+	/*
+	 * S700 put flen and fcnt at offset 0x0c and 0x1c respectively,
+	 * whereas S900 put flen and fcnt at offset 0x0c.
+	 */
+	if (od->devid == S700_DMA) {
+		lli->hw[OWL_DMADESC_FLEN] = len;
+		lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb;
+	} else {
+		lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20;
+		lli->hw[OWL_DMADESC_CTRLB] = ctrlb;
+	}
 
 	return 0;
 }
@@ -580,7 +603,7 @@  static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
 
 		global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
 
-		if (chan_irq_pending && !(global_irq_pending & BIT(i)))	{
+		if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
 			dev_dbg(od->dma.dev,
 				"global and channel IRQ pending match err\n");
 
@@ -1038,11 +1061,20 @@  static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
 	return chan;
 }
 
+static const struct of_device_id owl_dma_match[] = {
+	{ .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
+	{ .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, owl_dma_match);
+
 static int owl_dma_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct owl_dma *od;
 	int ret, i, nr_channels, nr_requests;
+	const struct of_device_id *of_id =
+				of_match_device(owl_dma_match, &pdev->dev);
 
 	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
 	if (!od)
@@ -1067,6 +1099,8 @@  static int owl_dma_probe(struct platform_device *pdev)
 	dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
 		 nr_channels, nr_requests);
 
+	od->devid = (enum owl_dma_id)of_id->data;
+
 	od->nr_pchans = nr_channels;
 	od->nr_vchans = nr_requests;
 
@@ -1199,12 +1233,6 @@  static int owl_dma_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static const struct of_device_id owl_dma_match[] = {
-	{ .compatible = "actions,s900-dma", },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, owl_dma_match);
-
 static struct platform_driver owl_dma_driver = {
 	.probe	= owl_dma_probe,
 	.remove	= owl_dma_remove,