@@ -1265,6 +1265,7 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
mutex_init(&cxlds->mbox_mutex);
mutex_init(&cxlds->event.log_lock);
cxlds->dev = dev;
+ INIT_LIST_HEAD(&cxlds->perf_list);
return cxlds;
}
@@ -5,6 +5,7 @@
#include <uapi/linux/cxl_mem.h>
#include <linux/cdev.h>
#include <linux/uuid.h>
+#include <linux/node.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -254,6 +255,21 @@ struct cxl_poison_state {
struct mutex lock; /* Protect reads of poison list */
};
+/**
+ * struct perf_prop - performance property entry
+ * @list - list entry
+ * @dpa_range - range for DPA address
+ * @coord - QoS performance data (i.e. latency, bandwidth)
+ * @qos_class - QoS Class cookies
+ */
+struct perf_prop_entry {
+ struct list_head list;
+ struct range dpa_range;
+ struct access_coordinate coord;
+ /* Do not add members below this, contains flex array */
+ struct qos_class qos_class;
+};
+
/**
* struct cxl_dev_state - The driver device state
*
@@ -292,6 +308,9 @@ struct cxl_poison_state {
* @event: event log driver state
* @poison: poison driver state info
* @mbox_send: @dev specific transport for transmitting mailbox commands
+ * @ram_qos_class: QoS class cookies for volatile region
+ * @pmem_qos_class: QoS class cookies for persistent region
+ * @perf_list: performance data entries list
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -325,6 +344,10 @@ struct cxl_dev_state {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
+ struct qos_class *ram_qos_class;
+ struct qos_class *pmem_qos_class;
+ struct list_head perf_list;
+
resource_size_t component_reg_phys;
u64 serial;
@@ -104,6 +104,42 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
return 0;
}
+static void cxl_memdev_set_qtg(struct cxl_dev_state *cxlds, struct list_head *dsmas_list)
+{
+ struct range pmem_range = {
+ .start = cxlds->pmem_res.start,
+ .end = cxlds->pmem_res.end,
+ };
+ struct range ram_range = {
+ .start = cxlds->ram_res.start,
+ .end = cxlds->ram_res.end,
+ };
+ struct perf_prop_entry *perf;
+ struct dsmas_entry *dent;
+
+ list_for_each_entry(dent, dsmas_list, list) {
+ perf = devm_kzalloc(cxlds->dev,
+ sizeof(*perf) + dent->qos_class->nr * sizeof(int),
+ GFP_KERNEL);
+ if (!perf)
+ return;
+
+ perf->dpa_range = dent->dpa_range;
+ perf->coord = dent->coord;
+ perf->qos_class = *dent->qos_class;
+ list_add_tail(&perf->list, &cxlds->perf_list);
+
+ if (resource_size(&cxlds->ram_res) &&
+ range_contains(&ram_range, &dent->dpa_range) &&
+ !cxlds->ram_qos_class)
+ cxlds->ram_qos_class = &perf->qos_class;
+ else if (resource_size(&cxlds->pmem_res) &&
+ range_contains(&pmem_range, &dent->dpa_range) &&
+ !cxlds->pmem_qos_class)
+ cxlds->pmem_qos_class = &perf->qos_class;
+ }
+}
+
static int cxl_switch_port_probe(struct cxl_port *port)
{
struct cxl_hdm *cxlhdm;
@@ -201,6 +237,8 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
dev_dbg(&port->dev,
"Failed to do perf coord calculations.\n");
+ else
+ cxl_memdev_set_qtg(cxlds, &dsmas_list);
}
cxl_cdat_dsmas_list_destroy(&dsmas_list);