@@ -19,6 +19,9 @@
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
+#define MAX_DEV_CMD_ENTRIES 2
+#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
+
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
@@ -67,6 +70,38 @@ module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues,
"Number of poll queues used for r/w. Default value is 1");
+/**
+ * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * @hba - per adapter instance
+ *
+ * Returns queue-depth on success, non-zero on error
+ *
+ * MAC - Max. Active Command of the Host Controller (HC)
+ * HC wouldn't send more than this commands to the device.
+ * It is mandatory to implement get_hba_mac() to enable MCQ mode.
+ * Calculates and adjusts the queue depth based on the depth
+ * supported by the HC and ufs device.
+ */
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+{
+ int mac;
+
+ /* Mandatory to implement get_hba_mac() */
+ mac = ufshcd_mcq_vops_get_hba_mac(hba);
+ if (mac < 0) {
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
+ }
+
+ WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
+ /*
+ * max. value of bqueuedepth = 256, mac is host dependent.
+ * It is mandatory for UFS device to define bQueueDepth if
+ * shared queuing architecture is enabled.
+ */
+ return min_t(int, mac, hba->dev_info.bqueuedepth);
+}
+
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
{
int i;
@@ -62,6 +62,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
int ufshcd_mcq_init(struct ufs_hba *hba);
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
#define SD_ASCII_STD true
#define SD_RAW false
@@ -232,6 +233,14 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->get_hba_mac)
+ return hba->vops->get_hba_mac(hba);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
@@ -7887,6 +7887,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -8296,7 +8297,21 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
- return ufshcd_mcq_init(hba);
+ int ret;
+ int old_nutrs = hba->nutrs;
+
+ ret = ufshcd_mcq_decide_queue_depth(hba);
+ if (ret < 0)
+ return ret;
+
+ hba->nutrs = ret;
+ ret = ufshcd_mcq_init(hba);
+ if (ret) {
+ hba->nutrs = old_nutrs;
+ return ret;
+ }
+
+ return 0;
}
/**
@@ -1485,6 +1485,12 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
return ret;
}
+static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
+{
+ /* Qualcomm HC supports up to 64 */
+ return MAX_SUPP_MAC;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1509,6 +1515,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
.mcq_config_resource = ufs_qcom_mcq_config_resource,
+ .get_hba_mac = ufs_qcom_get_hba_mac,
};
/**
@@ -16,6 +16,7 @@
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
#define BUS_VECTOR_NAME_LEN 32
+#define MAX_SUPP_MAC 64
#define UFS_HW_VER_MAJOR_SHFT (28)
#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
@@ -617,6 +617,8 @@ struct ufs_dev_info {
u8 *model;
u16 wspecversion;
u32 clk_gating_wait_us;
+ /* Stores the depth of queue in UFS device */
+ u8 bqueuedepth;
/* UFS HPB related flag */
bool hpb_enabled;
@@ -299,6 +299,7 @@ struct ufs_pwr_mode_info {
* @program_key: program or evict an inline encryption key
* @event_notify: called to notify important events
* @mcq_config_resource: called to configure MCQ platform resources
+ * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -338,6 +339,7 @@ struct ufs_hba_variant_ops {
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
int (*mcq_config_resource)(struct ufs_hba *hba);
+ int (*get_hba_mac)(struct ufs_hba *hba);
};
/* clock gating state */
@@ -57,6 +57,7 @@ enum {
REG_UFS_CCAP = 0x100,
REG_UFS_CRYPTOCAP = 0x104,
+ REG_UFS_MCQ_CFG = 0x380,
UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};