diff mbox series

[v5,12/14] cxl: Store QTG IDs and related info to the CXL memory device context

Message ID 168357888523.2756219.11912716889990252182.stgit@djiang5-mobl3
State Superseded
Headers show
Series cxl: Add support for QTG ID retrieval for CXL subsystem | expand

Commit Message

Dave Jiang May 8, 2023, 8:48 p.m. UTC
Once the QTG ID _DSM is executed successfully, the QTG ID is retrieved from
the return package. Create a list of entries in the cxl_memdev context and
store the QTG ID and the associated DPA range. This information can be
exposed to user space via sysfs in order to help region setup for
hot-plugged CXL memory devices.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>

---
v4:
- Remove unused qos_list from cxl_md
v3:
- Move back to QTG ID per partition
---
 drivers/cxl/core/mbox.c |    3 +++
 drivers/cxl/cxlmem.h    |   21 +++++++++++++++++++++
 drivers/cxl/port.c      |   36 ++++++++++++++++++++++++++++++++++++
 3 files changed, 60 insertions(+)

Comments

Jonathan Cameron May 12, 2023, 3:30 p.m. UTC | #1
On Mon, 08 May 2023 13:48:05 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> Once the QTG ID _DSM is executed successfully, the QTG ID is retrieved from
> the return package. Create a list of entries in the cxl_memdev context and
> store the QTG ID and the associated DPA range. This information can be
> exposed to user space via sysfs in order to help region setup for
> hot-plugged CXL memory devices.
> 
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Trivial inline but my suggestion of needing to carry the full list
will apply here too...

> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
> index 001dabf0231b..9d77b7e420ce 100644
> --- a/drivers/cxl/cxlmem.h
> +++ b/drivers/cxl/cxlmem.h
> @@ -5,6 +5,7 @@
>  #include <uapi/linux/cxl_mem.h>
>  #include <linux/cdev.h>
>  #include <linux/uuid.h>
> +#include <linux/node.h>
>  #include "cxl.h"
>  
>  /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
> @@ -215,6 +216,19 @@ struct cxl_event_state {
>  	struct mutex log_lock;
>  };
>  
> +/**
> + * struct perf_prop - performance property entry
> + * @list - list entry
> + * @dpa_range - range for DPA address
> + * @qos_class - QoS Class cookie

coord?

Run a W=1 build and it will moan about this sort of missing description.

> + */
> +struct perf_prop_entry {
> +	struct list_head list;
> +	struct range dpa_range;
> +	u16 qos_class;
> +	struct access_coordinate coord;
> +};
> +
diff mbox series

Patch

diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index f2addb457172..9c363060e5c1 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1120,6 +1120,9 @@  struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
 	mutex_init(&cxlds->mbox_mutex);
 	mutex_init(&cxlds->event.log_lock);
 	cxlds->dev = dev;
+	INIT_LIST_HEAD(&cxlds->perf_list);
+	cxlds->ram_qos_class = CXL_QOS_CLASS_INVALID;
+	cxlds->pmem_qos_class = CXL_QOS_CLASS_INVALID;
 
 	return cxlds;
 }
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 001dabf0231b..9d77b7e420ce 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -5,6 +5,7 @@ 
 #include <uapi/linux/cxl_mem.h>
 #include <linux/cdev.h>
 #include <linux/uuid.h>
+#include <linux/node.h>
 #include "cxl.h"
 
 /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -215,6 +216,19 @@  struct cxl_event_state {
 	struct mutex log_lock;
 };
 
+/**
+ * struct perf_prop - performance property entry
+ * @list - list entry
+ * @dpa_range - range for DPA address
+ * @qos_class - QoS Class cookie
+ */
+struct perf_prop_entry {
+	struct list_head list;
+	struct range dpa_range;
+	u16 qos_class;
+	struct access_coordinate coord;
+};
+
 /**
  * struct cxl_dev_state - The driver device state
  *
@@ -251,6 +265,9 @@  struct cxl_event_state {
  * @serial: PCIe Device Serial Number
  * @event: event log driver state
  * @mbox_send: @dev specific transport for transmitting mailbox commands
+ * @ram_qos_class: QTG ID for volatile region
+ * @pmem_qos_class: QTG ID for persistent region
+ * @perf_list: performance data entries list
  *
  * See section 8.2.9.5.2 Capacity Configuration and Label Storage for
  * details on capacity parameters.
@@ -283,6 +300,10 @@  struct cxl_dev_state {
 	u64 next_volatile_bytes;
 	u64 next_persistent_bytes;
 
+	int ram_qos_class;
+	int pmem_qos_class;
+	struct list_head perf_list;
+
 	resource_size_t component_reg_phys;
 	u64 serial;
 
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 1d55c460e1ab..c8c37dd79ecc 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -113,6 +113,40 @@  static int cxl_port_perf_data_calculate(struct cxl_port *port,
 	return 0;
 }
 
+static void cxl_memdev_set_qtg(struct cxl_dev_state *cxlds, struct list_head *dsmas_list)
+{
+	struct range pmem_range = {
+		.start = cxlds->pmem_res.start,
+		.end = cxlds->pmem_res.end,
+	};
+	struct range ram_range = {
+		.start = cxlds->ram_res.start,
+		.end = cxlds->ram_res.end,
+	};
+	struct perf_prop_entry *perf;
+	struct dsmas_entry *dent;
+
+	list_for_each_entry(dent, dsmas_list, list) {
+		perf = devm_kzalloc(cxlds->dev, sizeof(*perf), GFP_KERNEL);
+		if (!perf)
+			return;
+
+		perf->dpa_range = dent->dpa_range;
+		perf->qos_class = dent->qos_class;
+		perf->coord = dent->coord;
+		list_add_tail(&perf->list, &cxlds->perf_list);
+
+		if (resource_size(&cxlds->ram_res) &&
+		    range_contains(&ram_range, &dent->dpa_range) &&
+		    cxlds->ram_qos_class == CXL_QOS_CLASS_INVALID)
+			cxlds->ram_qos_class = dent->qos_class;
+		else if (resource_size(&cxlds->pmem_res) &&
+			 range_contains(&pmem_range, &dent->dpa_range) &&
+			 cxlds->pmem_qos_class == CXL_QOS_CLASS_INVALID)
+			cxlds->pmem_qos_class = dent->qos_class;
+	}
+}
+
 static int cxl_switch_port_probe(struct cxl_port *port)
 {
 	struct cxl_hdm *cxlhdm;
@@ -216,6 +250,8 @@  static int cxl_endpoint_port_probe(struct cxl_port *port)
 			if (rc)
 				dev_dbg(&port->dev,
 					"Failed to do perf coord calculations.\n");
+			else
+				cxl_memdev_set_qtg(cxlds, &dsmas_list);
 		}
 
 		dsmas_list_destroy(&dsmas_list);