diff mbox series

[v2,2/2] ufs: mcq: Convert MCQ_CFG_n to a inline function

Message ID 20240519221457.772346-3-minwoo.im@samsung.com (mailing list archive)
State Accepted
Headers show
Series ufs: mcq: Fix and cleanup unsafe macros | expand

Commit Message

Minwoo Im May 19, 2024, 10:14 p.m. UTC
Unlike the previous patch, this patch does not fix any issues, but,
inline functions are much more preferred over macros, so this patch
converted MCQ_CFG_n macro in ufs-mcq to an inline function along with
the previous patch.

Signed-off-by: Minwoo Im <minwoo.im@samsung.com>
---
 drivers/ufs/core/ufs-mcq.c | 25 ++++++++++---------------
 include/ufs/ufshcd.h       |  7 +++++++
 2 files changed, 17 insertions(+), 15 deletions(-)

Comments

Bart Van Assche May 20, 2024, 6:17 p.m. UTC | #1
On 5/19/24 15:14, Minwoo Im wrote:
> Unlike the previous patch, this patch does not fix any issues, but,
> inline functions are much more preferred over macros, so this patch
> converted MCQ_CFG_n macro in ufs-mcq to an inline function along with
> the previous patch.

Reviewed-by: Bart Van Assche <bvanassche@acm.org>
diff mbox series

Patch

diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index b93ec147641c..d15817a3900b 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -25,7 +25,6 @@ 
 #define QUEUE_ID_OFFSET 16
 
 #define MCQ_CFG_MAC_MASK	GENMASK(16, 8)
-#define MCQ_QCFG_SIZE		0x40
 #define MCQ_ENTRY_SIZE_IN_DWORD	8
 #define CQE_UCD_BA GENMASK_ULL(63, 7)
 
@@ -228,10 +227,6 @@  int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
 	return 0;
 }
 
-
-/* Operation and runtime registers configuration */
-#define MCQ_CFG_n(r, i)	((r) + MCQ_QCFG_SIZE * (i))
-
 static void __iomem *mcq_opr_base(struct ufs_hba *hba,
 					 enum ufshcd_mcq_opr n, int i)
 {
@@ -336,29 +331,29 @@  void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
 
 		/* Submission Queue Lower Base Address */
 		ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
-			      MCQ_CFG_n(REG_SQLBA, i));
+			      ufshcd_mcq_cfg_offset(REG_SQLBA, i));
 		/* Submission Queue Upper Base Address */
 		ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
-			      MCQ_CFG_n(REG_SQUBA, i));
+			      ufshcd_mcq_cfg_offset(REG_SQUBA, i));
 		/* Submission Queue Doorbell Address Offset */
 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
-			      MCQ_CFG_n(REG_SQDAO, i));
+			      ufshcd_mcq_cfg_offset(REG_SQDAO, i));
 		/* Submission Queue Interrupt Status Address Offset */
 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
-			      MCQ_CFG_n(REG_SQISAO, i));
+			      ufshcd_mcq_cfg_offset(REG_SQISAO, i));
 
 		/* Completion Queue Lower Base Address */
 		ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
-			      MCQ_CFG_n(REG_CQLBA, i));
+			      ufshcd_mcq_cfg_offset(REG_CQLBA, i));
 		/* Completion Queue Upper Base Address */
 		ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
-			      MCQ_CFG_n(REG_CQUBA, i));
+			      ufshcd_mcq_cfg_offset(REG_CQUBA, i));
 		/* Completion Queue Doorbell Address Offset */
 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
-			      MCQ_CFG_n(REG_CQDAO, i));
+			      ufshcd_mcq_cfg_offset(REG_CQDAO, i));
 		/* Completion Queue Interrupt Status Address Offset */
 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
-			      MCQ_CFG_n(REG_CQISAO, i));
+			      ufshcd_mcq_cfg_offset(REG_CQISAO, i));
 
 		/* Save the base addresses for quicker access */
 		hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
@@ -375,7 +370,7 @@  void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
 
 		/* Completion Queue Enable|Size to Completion Queue Attribute */
 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
-			      MCQ_CFG_n(REG_CQATTR, i));
+			      ufshcd_mcq_cfg_offset(REG_CQATTR, i));
 
 		/*
 		 * Submission Qeueue Enable|Size|Completion Queue ID to
@@ -383,7 +378,7 @@  void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
 		 */
 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
 			      (i << QUEUE_ID_OFFSET),
-			      MCQ_CFG_n(REG_SQATTR, i));
+			      ufshcd_mcq_cfg_offset(REG_SQATTR, i));
 	}
 }
 EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index eec7c97e3dbe..94fa400b646e 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -1127,6 +1127,8 @@  struct ufs_hw_queue {
 	struct mutex sq_mutex;
 };
 
+#define MCQ_QCFG_SIZE		0x40
+
 static inline bool is_mcq_enabled(struct ufs_hba *hba)
 {
 	return hba->mcq_enabled;
@@ -1138,6 +1140,11 @@  static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
 	return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx;
 }
 
+static inline unsigned int ufshcd_mcq_cfg_offset(unsigned int reg, int idx)
+{
+	return reg + MCQ_QCFG_SIZE * idx;
+}
+
 #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
 static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
 {