@@ -103,6 +103,135 @@ static int cxl_debugfs_poison_clear(void *data, u64 dpa)
DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
cxl_debugfs_poison_clear, "%llx\n");
+struct qos_class_ctx {
+ bool matched;
+ int dev_qos_class;
+};
+
+static int match_cxlrd_qos_class(struct device *dev, void *data)
+{
+ struct qos_class_ctx *ctx = data;
+ struct cxl_root_decoder *cxlrd;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxlrd = to_cxl_root_decoder(dev);
+ if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID ||
+ ctx->dev_qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ if (cxlrd->qos_class == ctx->dev_qos_class) {
+ ctx->matched = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cxl_qos_match(struct cxl_port *root_port,
+ struct list_head *work_list,
+ struct list_head *discard_list)
+{
+ struct perf_prop_entry *perf, *n;
+ struct qos_class_ctx ctx;
+ int rc;
+
+ if (list_empty(work_list))
+ return 0;
+
+ list_for_each_entry_safe(perf, n, work_list, list) {
+ ctx = (struct qos_class_ctx) {
+ .matched = false,
+ .dev_qos_class = perf->qos_class,
+ };
+ rc = device_for_each_child(&root_port->dev, &ctx, match_cxlrd_qos_class);
+ if (rc < 0)
+ return -ENOENT;
+
+ if (!ctx.matched)
+ list_move_tail(&perf->list, discard_list);
+ }
+
+ return 0;
+}
+
+struct qos_hb_ctx {
+ bool matched;
+ struct device *host_bridge;
+};
+
+static int match_cxlrd_hb(struct device *dev, void *data)
+{
+ struct cxl_switch_decoder *cxlsd;
+ struct qos_hb_ctx *ctx = data;
+ struct cxl_root_decoder *cxlrd;
+ unsigned int seq;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxlrd = to_cxl_root_decoder(dev);
+ cxlsd = &cxlrd->cxlsd;
+
+ do {
+ seq = read_seqbegin(&cxlsd->target_lock);
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ if (ctx->host_bridge ==
+ cxlsd->target[i]->dport_dev) {
+ ctx->matched = true;
+ return 1;
+ }
+ }
+ } while (read_seqretry(&cxlsd->target_lock, seq));
+
+ return 0;
+}
+
+static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct cxl_port *root_port;
+ struct qos_hb_ctx hbctx;
+ int rc;
+
+ root_port = find_cxl_root(cxlmd->endpoint);
+ if (!root_port)
+ return -ENODEV;
+
+ /* Check that the QTG IDs are all sane between end device and root decoders */
+ rc = cxl_qos_match(root_port, &mds->ram_perf_list,
+ &mds->unmatched_perf_list);
+ if (rc < 0)
+ goto out;
+
+ rc = cxl_qos_match(root_port, &mds->pmem_perf_list,
+ &mds->unmatched_perf_list);
+ if (rc < 0)
+ goto out;
+
+ /* Check to make sure that the device's host bridge is under a root decoder */
+ hbctx = (struct qos_hb_ctx) {
+ .matched = false,
+ .host_bridge = cxlmd->endpoint->host_bridge,
+ };
+ rc = device_for_each_child(&root_port->dev, &hbctx, match_cxlrd_hb);
+ if (rc < 0)
+ goto out;
+
+ if (!hbctx.matched) {
+ list_splice_tail_init(&mds->ram_perf_list,
+ &mds->unmatched_perf_list);
+ list_splice_tail_init(&mds->pmem_perf_list,
+ &mds->unmatched_perf_list);
+ }
+
+out:
+ put_device(&root_port->dev);
+ return rc;
+}
+
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
@@ -174,6 +303,10 @@ static int cxl_mem_probe(struct device *dev)
if (rc)
return rc;
+ rc = cxl_qos_class_verify(cxlmd);
+ if (rc < 0)
+ dev_dbg(dev, "QoS Class verify failed\n");
+
if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
rc = devm_cxl_add_nvdimm(cxlmd);
if (rc == -ENODEV)
Add a check to make sure the qos_class for the device will match one of the root decoders qos_class. If no match is found, then the qos_class for the device is set to invalid. Also add a check to ensure that the device's host bridge matches to one of the root decoder's downstream targets. Signed-off-by: Dave Jiang <dave.jiang@intel.com> --- v12: - Add support to match all QTG IDs (Dan) - Do not stop probe if match fails. (Jonathan) --- drivers/cxl/mem.c | 133 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+)