@@ -6,7 +6,7 @@ obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o ufsfeature.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
new file mode 100644
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Universal Flash Storage Feature Support
+ *
+ * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Yongmyung Lee <ymhungry.lee@samsung.com>
+ * Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#include "ufshcd.h"
+#include "ufsfeature.h"
+
+inline void ufsf_slave_configure(struct ufs_hba *hba,
+ struct scsi_device *sdev)
+{
+ /* skip well-known LU */
+ if (sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID)
+ return;
+
+ if (!(hba->dev_info.b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT))
+ return;
+
+ atomic_inc(&hba->ufsf.slave_conf_cnt);
+
+ wake_up(&hba->ufsf.sdev_wait);
+}
+
+inline void ufsf_ops_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshpb_driver *ufshpb_drv;
+
+ ufshpb_drv = dev_get_drvdata(&hba->ufsf.hpb_dev);
+
+ if (ufshpb_drv && ufshpb_drv->ufshpb_ops.prep_fn)
+ ufshpb_drv->ufshpb_ops.prep_fn(hba, lrbp);
+}
+
+inline void ufsf_ops_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshpb_driver *ufshpb_drv;
+
+ ufshpb_drv = dev_get_drvdata(&hba->ufsf.hpb_dev);
+
+ if (ufshpb_drv && ufshpb_drv->ufshpb_ops.rsp_upiu)
+ ufshpb_drv->ufshpb_ops.rsp_upiu(hba, lrbp);
+}
+
+inline void ufsf_ops_reset_host(struct ufs_hba *hba)
+{
+ struct ufshpb_driver *ufshpb_drv;
+
+ ufshpb_drv = dev_get_drvdata(&hba->ufsf.hpb_dev);
+
+ if (ufshpb_drv && ufshpb_drv->ufshpb_ops.reset_host)
+ ufshpb_drv->ufshpb_ops.reset_host(hba);
+}
+
+inline void ufsf_ops_reset(struct ufs_hba *hba)
+{
+ struct ufshpb_driver *ufshpb_drv;
+
+ ufshpb_drv = dev_get_drvdata(&hba->ufsf.hpb_dev);
+
+ if (ufshpb_drv && ufshpb_drv->ufshpb_ops.reset)
+ ufshpb_drv->ufshpb_ops.reset(hba);
+}
+
+inline void ufsf_ops_suspend(struct ufs_hba *hba)
+{
+ struct ufshpb_driver *ufshpb_drv;
+
+ ufshpb_drv = dev_get_drvdata(&hba->ufsf.hpb_dev);
+
+ if (ufshpb_drv && ufshpb_drv->ufshpb_ops.suspend)
+ ufshpb_drv->ufshpb_ops.suspend(hba);
+}
+
+inline void ufsf_ops_resume(struct ufs_hba *hba)
+{
+ struct ufshpb_driver *ufshpb_drv;
+
+ ufshpb_drv = dev_get_drvdata(&hba->ufsf.hpb_dev);
+
+ if (ufshpb_drv && ufshpb_drv->ufshpb_ops.resume)
+ ufshpb_drv->ufshpb_ops.resume(hba);
+}
+
+struct device_type ufshpb_dev_type = {
+ .name = "ufshpb_device"
+};
+EXPORT_SYMBOL(ufshpb_dev_type);
+
+static int ufsf_bus_match(struct device *dev,
+ struct device_driver *gendrv)
+{
+ if (dev->type == &ufshpb_dev_type)
+ return 1;
+
+ return 0;
+}
+
+struct bus_type ufsf_bus_type = {
+ .name = "ufsf_bus",
+ .match = ufsf_bus_match,
+};
+EXPORT_SYMBOL(ufsf_bus_type);
+
+static void ufsf_dev_release(struct device *dev)
+{
+ put_device(dev->parent);
+}
+
+void ufsf_scan_features(struct ufs_hba *hba)
+{
+ int ret;
+
+ init_waitqueue_head(&hba->ufsf.sdev_wait);
+ atomic_set(&hba->ufsf.slave_conf_cnt, 0);
+
+ if (hba->dev_info.wspecversion >= HPB_SUPPORTED_VERSION &&
+ (hba->dev_info.b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
+ device_initialize(&hba->ufsf.hpb_dev);
+
+ hba->ufsf.hpb_dev.bus = &ufsf_bus_type;
+ hba->ufsf.hpb_dev.type = &ufshpb_dev_type;
+ hba->ufsf.hpb_dev.parent = get_device(hba->dev);
+ hba->ufsf.hpb_dev.release = ufsf_dev_release;
+
+ dev_set_name(&hba->ufsf.hpb_dev, "ufshpb");
+ ret = device_add(&hba->ufsf.hpb_dev);
+ if (ret)
+ dev_warn(hba->dev, "ufshpb: failed to add device\n");
+ }
+}
+
+static int __init ufsf_init(void)
+{
+ int ret;
+
+ ret = bus_register(&ufsf_bus_type);
+ if (ret)
+ pr_err("%s bus_register failed\n", __func__);
+
+ return ret;
+}
+device_initcall(ufsf_init);
new file mode 100644
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Universal Flash Storage Feature Support
+ *
+ * Copyright (C) 2017-2018 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Yongmyung Lee <ymhungry.lee@samsung.com>
+ * Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#ifndef _UFSFEATURE_H_
+#define _UFSFEATURE_H_
+
+#define HPB_SUPPORTED_VERSION 0x0310
+
+struct ufs_hba;
+struct ufshcd_lrb;
+
+/**
+ * struct ufsf_operation - UFS feature specific callbacks
+ * @prep_fn: called after construct upiu structure. The prep_fn should work
+ * properly even if it processes the same SCSI command multiple
+ * times by requeuing.
+ * @reset: called after probing hba
+ * @reset_host: called before ufshcd_host_reset_and_restore
+ * @suspend: called before ufshcd_suspend
+ * @resume: called after ufshcd_resume
+ * @rsp_upiu: called in ufshcd_transfer_rsp_status with SAM_STAT_GOOD state
+ */
+struct ufsf_operation {
+ void (*prep_fn)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+ void (*reset)(struct ufs_hba *hba);
+ void (*reset_host)(struct ufs_hba *hba);
+ void (*suspend)(struct ufs_hba *hba);
+ void (*resume)(struct ufs_hba *hba);
+ void (*rsp_upiu)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+};
+
+struct ufshpb_driver {
+ struct device_driver drv;
+ struct list_head lh_hpb_lu;
+
+ struct ufsf_operation ufshpb_ops;
+
+ /* memory management */
+ struct kmem_cache *ufshpb_mctx_cache;
+ mempool_t *ufshpb_mctx_pool;
+ mempool_t *ufshpb_page_pool;
+
+ struct workqueue_struct *ufshpb_wq;
+};
+
+struct ufsf_feature_info {
+ atomic_t slave_conf_cnt;
+ wait_queue_head_t sdev_wait;
+ struct device hpb_dev;
+};
+
+void ufsf_slave_configure(struct ufs_hba *hba, struct scsi_device *sdev);
+void ufsf_scan_features(struct ufs_hba *hba);
+void ufsf_ops_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufsf_ops_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufsf_ops_reset_host(struct ufs_hba *hba);
+void ufsf_ops_reset(struct ufs_hba *hba);
+void ufsf_ops_suspend(struct ufs_hba *hba);
+void ufsf_ops_resume(struct ufs_hba *hba);
+
+#endif /* End of Header */
@@ -2556,6 +2556,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_comp_scsi_upiu(hba, lrbp);
+ ufsf_ops_prep_fn(hba, lrbp);
+
err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
@@ -4689,6 +4691,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
+ ufsf_slave_configure(hba, sdev);
+
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (ufshcd_is_rpm_autosuspend_allowed(hba))
@@ -4817,6 +4821,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
*/
pm_runtime_get_noresume(hba->dev);
}
+
+ if (scsi_status == SAM_STAT_GOOD)
+ ufsf_ops_rsp_upiu(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -6569,6 +6576,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufsf_ops_reset_host(hba);
+
ufshcd_hba_stop(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7003,6 +7012,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ dev_info->b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -7373,6 +7383,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
}
ufs_bsg_probe(hba);
+ ufsf_scan_features(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
@@ -7461,6 +7472,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ ufsf_ops_reset(hba);
out:
trace_ufshcd_init(dev_name(hba->dev), ret,
@@ -8218,6 +8230,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
+ ufsf_ops_suspend(hba);
+
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@@ -8339,6 +8353,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
+ ufsf_ops_resume(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
@@ -8435,6 +8450,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ ufsf_ops_resume(hba);
+
if (hba->dev_info.b_rpm_dev_flush_capable) {
hba->dev_info.b_rpm_dev_flush_capable = false;
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
@@ -47,6 +47,7 @@
#include "ufs.h"
#include "ufs_quirks.h"
#include "ufshci.h"
+#include "ufsfeature.h"
#define UFSHCD "ufshcd"
#define UFSHCD_DRIVER_VERSION "0.2"
@@ -754,6 +755,7 @@ struct ufs_hba {
bool wb_enabled;
struct delayed_work rpm_dev_flush_recheck_work;
+ struct ufsf_feature_info ufsf;
#ifdef CONFIG_SCSI_UFS_CRYPTO
union ufs_crypto_capabilities crypto_capabilities;
union ufs_crypto_cap_entry *crypto_cap_array;