@@ -378,6 +378,8 @@ static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
}
+ if (ctrl->ops->set_dbbuf && ctrl->shadow_db)
+ log->acs[nvme_admin_dbbuf] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
log->acs[nvme_admin_get_log_page] =
log->acs[nvme_admin_identify] =
log->acs[nvme_admin_abort_cmd] =
@@ -713,7 +715,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
ctratt |= NVME_CTRL_ATTR_RHII;
id->ctratt = cpu_to_le32(ctratt);
- id->oacs = 0;
+ if (ctrl->ops->set_dbbuf && ctrl->shadow_db)
+ id->oacs = cpu_to_le16(NVME_CTRL_OACS_DBBUF_SUPP);
/*
* We don't really have a practical limit on the number of abort
@@ -1640,6 +1643,23 @@ u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
}
}
+static void nvmet_execute_dbbuf(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_command *cmd = req->cmd;
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ status = ctrl->ops->set_dbbuf(ctrl, le64_to_cpu(cmd->dbbuf.prp1),
+ le64_to_cpu(cmd->dbbuf.prp2));
+complete:
+ nvmet_req_complete(req, status);
+}
+
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -1696,6 +1716,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
return 0;
+ case nvme_admin_dbbuf:
+ req->execute = nvmet_execute_dbbuf;
+ return 0;
default:
return nvmet_report_invalid_opcode(req);
}
@@ -2086,6 +2086,31 @@ static ssize_t nvmet_ctrl_enable_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_ctrl_, enable);
+static ssize_t nvmet_ctrl_shadow_doorbell_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_ctrl_conf *conf = to_nvmet_ctrl_conf(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", conf->args.shadow_db);
+}
+
+static ssize_t nvmet_ctrl_shadow_doorbell_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ctrl_conf *conf = to_nvmet_ctrl_conf(item);
+ int ret;
+
+ if (nvmet_is_ctrl_enabled(conf, __func__))
+ return -EACCES;
+
+ ret = kstrtobool(page, &conf->args.shadow_db);
+ if (ret)
+ return ret;
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_ctrl_, shadow_doorbell);
+
static ssize_t nvmet_ctrl_trtype_show(struct config_item *item, char *page)
{
struct nvmet_ctrl_conf *conf = to_nvmet_ctrl_conf(item);
@@ -2128,6 +2153,7 @@ static ssize_t nvmet_ctrl_trtype_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ctrl_, trtype);
static struct configfs_attribute *nvmet_ctrl_attrs[] = {
+ &nvmet_ctrl_attr_shadow_doorbell,
&nvmet_ctrl_attr_trtype,
&nvmet_ctrl_attr_enable,
NULL,
@@ -1587,6 +1587,7 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
goto out_put_subsystem;
mutex_init(&ctrl->lock);
+ ctrl->shadow_db = args->shadow_db;
ctrl->port = args->port;
ctrl->ops = args->ops;
@@ -253,6 +253,7 @@ struct nvmet_ctrl {
u64 cap;
u32 cc;
u32 csts;
+ bool shadow_db;
uuid_t hostid;
u16 cntlid;
@@ -418,6 +419,8 @@ struct nvmet_fabrics_ops {
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
+ u16 (*set_dbbuf)(struct nvmet_ctrl *ctrl, u64 prp1, u64 prp2);
+
/* Operations mandatory for PCI target controllers */
u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags,
u16 qsize, u64 prp1);
@@ -593,6 +596,7 @@ struct nvmet_alloc_ctrl_args {
const struct nvmet_fabrics_ops *ops;
struct device *p2p_client;
u32 kato;
+ bool shadow_db;
u16 cntlid;
__le32 result;
u16 error_loc;
This patch allows a user to enable shadow doorbell support and to report that its supported if the driver also support it. Signed-off-by: Mike Christie <michael.christie@oracle.com> --- drivers/nvme/target/admin-cmd.c | 25 ++++++++++++++++++++++++- drivers/nvme/target/configfs.c | 26 ++++++++++++++++++++++++++ drivers/nvme/target/core.c | 1 + drivers/nvme/target/nvmet.h | 4 ++++ 4 files changed, 55 insertions(+), 1 deletion(-)