@@ -27,6 +27,7 @@
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
+#define CQE_UCD_BA GENMASK(63, 7)
static unsigned int dev_cmd_queue = 1;
@@ -340,6 +341,57 @@ static void __iomem *mcq_opr_base(struct ufs_hba *hba,
return opr->base + opr->stride * i;
}
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
+{
+ return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
+{
+ writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq,
+ struct cq_entry *cqe)
+{
+ dma_addr_t dma_addr;
+
+ /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
+ dma_addr = le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA;
+
+ return (dma_addr - hba->ucdl_dma_addr) /
+ sizeof(struct utp_transfer_cmd_desc);
+}
+
+static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
+ int tag;
+
+ tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+ ufshcd_compl_one_cqe(hba, tag, cqe);
+}
+
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs = 0;
+
+ ufshcd_mcq_update_cq_tail_slot(hwq);
+ while (!ufshcd_mcq_is_cq_empty(hwq)) {
+ ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_inc_cq_head_slot(hwq);
+ completed_reqs++;
+ }
+
+ if (completed_reqs)
+ ufshcd_mcq_update_cq_head(hwq);
+
+ return completed_reqs;
+}
+
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
@@ -58,6 +58,10 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
@@ -242,6 +246,15 @@ static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ if (hba->vops && hba->vops->get_outstanding_cqs)
+ return hba->vops->get_outstanding_cqs(hba, ocqs);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
@@ -332,4 +345,34 @@ static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
writel(val, q->mcq_sq_tail);
}
+static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
+{
+ u32 val = readl(q->mcq_cq_tail);
+
+ q->cq_tail_slot = val / sizeof(struct cq_entry);
+}
+
+static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
+{
+ return q->cq_head_slot == q->cq_tail_slot;
+}
+
+static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
+{
+ q->cq_head_slot++;
+ if (q->cq_head_slot == q->max_entries)
+ q->cq_head_slot = 0;
+}
+
+static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
+{
+ writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
+}
+
+static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
+{
+ struct cq_entry *cqe = q->cqe_base_addr;
+
+ return cqe + q->cq_head_slot;
+}
#endif /* _UFSHCD_PRIV_H_ */
@@ -6639,6 +6639,40 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
}
/**
+ * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
+ * @hba: per adapter instance
+ *
+ * Returns IRQ_HANDLED if interrupt is handled
+ */
+static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ unsigned long outstanding_cqs;
+ unsigned int nr_queues;
+ int i, ret;
+ u32 events;
+
+ ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
+ if (ret)
+ outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
+
+ /* Exclude the poll queues */
+ nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ for_each_set_bit(i, &outstanding_cqs, nr_queues) {
+ hwq = &hba->uhq[i];
+
+ events = ufshcd_mcq_read_cqis(hba, i);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, i);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
@@ -6663,6 +6697,9 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (intr_status & UTP_TRANSFER_REQ_COMPL)
retval |= ufshcd_transfer_req_compl(hba);
+ if (intr_status & MCQ_CQ_EVENT_STATUS)
+ retval |= ufshcd_handle_mcq_cq_events(hba);
+
return retval;
}
@@ -1454,6 +1454,21 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
return MAX_SUPP_MAC;
}
+static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ u32 cqis_vs;
+ struct ufshcd_res_info_t *mcq_vs_res = &hba->res[RES_MCQ_VS];
+
+ if (!mcq_vs_res->base)
+ return -EINVAL;
+
+ cqis_vs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+ *ocqs = cqis_vs;
+
+ return 0;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1479,6 +1494,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.program_key = ufs_qcom_ice_program_key,
.get_hba_mac = ufs_qcom_get_hba_mac,
.op_runtime_config = ufs_qcom_op_runtime_config,
+ .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
};
/**
@@ -72,6 +72,10 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+enum {
+ UFS_MEM_CQIS_VS = 0x8,
+};
+
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
@@ -296,6 +296,7 @@ struct ufs_pwr_mode_info {
* @event_notify: called to notify important events
* @get_hba_mac: called to get vendor specific mac value
* @op_runtime_config: called to config Operation and runtime regs Pointers
+ * @get_outstanding_cqs: called to get outstanding completion queues
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -336,6 +337,8 @@ struct ufs_hba_variant_ops {
enum ufs_event_type evt, void *data);
int (*get_hba_mac)(struct ufs_hba *hba);
int (*op_runtime_config)(struct ufs_hba *hba);
+ int (*get_outstanding_cqs)(struct ufs_hba *hba,
+ unsigned long *ocqs);
};
/* clock gating state */
@@ -1057,6 +1060,8 @@ struct ufs_hba {
* @id: hardware queue ID
* @sq_tp_slot: current slot to which SQ tail pointer is pointing
* @sq_lock: serialize submission queue access
+ * @cq_tail_slot: current slot to which CQ tail pointer is pointing
+ * @cq_head_slot: current slot to which CQ head pointer is pointing
*/
struct ufs_hw_queue {
void __iomem *mcq_sq_head;
@@ -1072,6 +1077,8 @@ struct ufs_hw_queue {
u32 id;
u32 sq_tail_slot;
spinlock_t sq_lock;
+ u32 cq_tail_slot;
+ u32 cq_head_slot;
};
static inline bool is_mcq_enabled(struct ufs_hba *hba)
@@ -266,6 +266,9 @@ enum {
/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
+/* CQISy - CQ y Interrupt Status Register */
+#define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1
+
/* UICCMD - UIC Command */
#define COMMAND_OPCODE_MASK 0xFF
#define GEN_SELECTOR_INDEX_MASK 0xFFFF