@@ -1424,6 +1424,119 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
}
#endif
+static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ return -EINVAL;
+}
+
+static int ufs_qcom_config_mcq_rop(struct ufs_hba *hba)
+{
+ struct ufshcd_mcq_rop_info_t *rop;
+ struct ufshcd_res_info_t *mem_res, *sqdao_res;
+ int i;
+
+ mem_res = &hba->res[RES_MEM];
+ sqdao_res = &hba->res[RES_MCQ_SQD];
+
+ if (!mem_res->base || !sqdao_res->base)
+ return -EINVAL;
+
+ for(i = 0; i < ROP_MAX; i++) {
+ rop = &hba->mcq_rop[i];
+ rop->offset = sqdao_res->resource->start -
+ mem_res->resource->start + 0x40 * i;
+ rop->stride = 0x100;
+ rop->base = sqdao_res->base + 0x40 * i;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufshcd_mcq_config_esi(hba, msg);
+}
+
+static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 event_id = irq - host->esi_base;
+ struct ufs_hw_queue *hwq = &hba->uhq[event_id];
+
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+
+ return IRQ_HANDLED;
+}
+
+static int ufs_qcom_config_mcq_esi(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+ u32 reg;
+ int nr_irqs, ret;
+
+ /*
+ * 1. We only handle CQs as of now.
+ * 2. Poll queues do not need ESI.
+ */
+ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
+ if (ret)
+ goto out;
+
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (!desc->msi_index)
+ host->esi_base = desc->irq;
+
+ ret = devm_request_irq(hba->dev, desc->irq,
+ ufs_qcom_mcq_esi_handler,
+ 0, "qcom-mcq-esi", hba);
+ if (ret) {
+ dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
+ __func__, desc->irq, ret);
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* Rewind */
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(hba->dev, desc->irq, hba);
+ }
+ } else {
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ reg = ufshcd_readl(hba, REG_UFS_CFG3);
+ reg |= 0x1F000;
+ ufshcd_writel(hba, reg, REG_UFS_CFG3);
+ }
+ ufshcd_mcq_enable_esi(hba);
+ }
+
+out:
+ if (ret)
+ dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret);
+ return ret;
+}
+
+#else
+static int ufs_qcom_config_mcq_esi(struct ufs_hba *hba)
+{
+ return -EINVAL;
+}
+#endif
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1447,6 +1560,9 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
+ .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
+ .config_mcq_rop = ufs_qcom_config_mcq_rop,
+ .config_mcq_esi = ufs_qcom_config_mcq_esi,
};
/**
@@ -53,6 +53,7 @@ enum {
* added in HW Version 3.0.0
*/
UFS_AH8_CFG = 0xFC,
+ REG_UFS_CFG3 = 0x271C,
};
/* QCOM UFS host controller vendor specific debug registers */
@@ -221,6 +222,7 @@ struct ufs_qcom_host {
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
+ int esi_base;
};
static inline u32