@@ -1282,6 +1282,27 @@ static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
sizeof(req->sq->ctrl->hostid));
}
+static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_irq_coalesce irqc = {
+ .time = (cdw11 >> 8) & 0xff,
+ .thr = cdw11 & 0xff,
+ };
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+}
+
void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
@@ -1305,6 +1326,9 @@ void nvmet_execute_set_features(struct nvmet_req *req)
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
+ case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_set_feat_irq_coalesce(req);
+ break;
case NVME_FEAT_KATO:
status = nvmet_set_feat_kato(req);
break;
@@ -1349,6 +1373,30 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
return 0;
}
+static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_feat_irq_coalesce irqc = { };
+ u16 status;
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
+
+ return NVME_SC_SUCCESS;
+}
+
void nvmet_get_feat_kato(struct nvmet_req *req)
{
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
@@ -1383,13 +1431,14 @@ void nvmet_execute_get_features(struct nvmet_req *req)
break;
case NVME_FEAT_ERR_RECOVERY:
break;
- case NVME_FEAT_IRQ_COALESCE:
- break;
case NVME_FEAT_IRQ_CONFIG:
break;
case NVME_FEAT_WRITE_ATOMIC:
break;
#endif
+ case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_get_feat_irq_coalesce(req);
+ break;
case NVME_FEAT_ASYNC_EVENT:
nvmet_get_feat_async_event(req);
break;
@@ -906,4 +906,14 @@ static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
{
percpu_ref_put(&pc_ref->ref);
}
+
+/*
+ * Data for the get_feature() and set_feature() operations of PCI target
+ * controllers.
+ */
+struct nvmet_feat_irq_coalesce {
+ u8 thr;
+ u8 time;
+};
+
#endif /* _NVMET_H */