Message ID | 168451604884.3470703.10173844932484539394.stgit@djiang5-mobl3 |
---|---|
State | Superseded |
Headers | show |
Series | cxl: Add support for QTG ID retrieval for CXL subsystem | expand |
On Fri, 19 May 2023 10:07:28 -0700 Dave Jiang <dave.jiang@intel.com> wrote: > Once the QTG ID _DSM is executed successfully, the QTG ID is retrieved from > the return package. Create a list of entries in the cxl_memdev context and > store the QTG ID and the associated DPA range. This information can be > exposed to user space via sysfs in order to help region setup for > hot-plugged CXL memory devices. > > Signed-off-by: Dave Jiang <dave.jiang@intel.com> > LGTM Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> > --- > v6: > - Store entire QTG ID list > v4: > - Remove unused qos_list from cxl_md > v3: > - Move back to QTG ID per partition > --- > drivers/cxl/core/mbox.c | 1 + > drivers/cxl/cxlmem.h | 23 +++++++++++++++++++++++ > drivers/cxl/port.c | 38 ++++++++++++++++++++++++++++++++++++++ > 3 files changed, 62 insertions(+) > > diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c > index 2c8dc7e2b84d..35941a306ea8 100644 > --- a/drivers/cxl/core/mbox.c > +++ b/drivers/cxl/core/mbox.c > @@ -1260,6 +1260,7 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev) > mutex_init(&cxlds->mbox_mutex); > mutex_init(&cxlds->event.log_lock); > cxlds->dev = dev; > + INIT_LIST_HEAD(&cxlds->perf_list); > > return cxlds; > } > diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h > index a2845a7a69d8..708d60c5ffe1 100644 > --- a/drivers/cxl/cxlmem.h > +++ b/drivers/cxl/cxlmem.h > @@ -5,6 +5,7 @@ > #include <uapi/linux/cxl_mem.h> > #include <linux/cdev.h> > #include <linux/uuid.h> > +#include <linux/node.h> > #include "cxl.h" > > /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */ > @@ -254,6 +255,21 @@ struct cxl_poison_state { > struct mutex lock; /* Protect reads of poison list */ > }; > > +/** > + * struct perf_prop - performance property entry > + * @list - list entry > + * @dpa_range - range for DPA address > + * @coord - QoS performance data (i.e. latency, bandwidth) > + * @qos_class - QoS Class cookies > + */ > +struct perf_prop_entry { > + struct list_head list; > + struct range dpa_range; > + struct access_coordinate coord; > + /* Do not add members below this, contains flex array */ > + struct qos_class qos_class; > +}; > + > /** > * struct cxl_dev_state - The driver device state > * > @@ -292,6 +308,9 @@ struct cxl_poison_state { > * @event: event log driver state > * @poison: poison driver state info > * @mbox_send: @dev specific transport for transmitting mailbox commands > + * @ram_qos_class: QoS class cookies for volatile region > + * @pmem_qos_class: QoS class cookies for persistent region > + * @perf_list: performance data entries list > * > * See section 8.2.9.5.2 Capacity Configuration and Label Storage for > * details on capacity parameters. > @@ -325,6 +344,10 @@ struct cxl_dev_state { > u64 next_volatile_bytes; > u64 next_persistent_bytes; > > + struct qos_class *ram_qos_class; > + struct qos_class *pmem_qos_class; > + struct list_head perf_list; > + > resource_size_t component_reg_phys; > u64 serial; > > diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c > index 03af92217192..e5d7ad5b1e16 100644 > --- a/drivers/cxl/port.c > +++ b/drivers/cxl/port.c > @@ -104,6 +104,42 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, > return 0; > } > > +static void cxl_memdev_set_qtg(struct cxl_dev_state *cxlds, struct list_head *dsmas_list) > +{ > + struct range pmem_range = { > + .start = cxlds->pmem_res.start, > + .end = cxlds->pmem_res.end, > + }; > + struct range ram_range = { > + .start = cxlds->ram_res.start, > + .end = cxlds->ram_res.end, > + }; > + struct perf_prop_entry *perf; > + struct dsmas_entry *dent; > + > + list_for_each_entry(dent, dsmas_list, list) { > + perf = devm_kzalloc(cxlds->dev, > + sizeof(*perf) + dent->qos_class->nr * sizeof(int), > + GFP_KERNEL); > + if (!perf) > + return; > + > + perf->dpa_range = dent->dpa_range; > + perf->coord = dent->coord; > + perf->qos_class = *dent->qos_class; > + list_add_tail(&perf->list, &cxlds->perf_list); > + > + if (resource_size(&cxlds->ram_res) && > + range_contains(&ram_range, &dent->dpa_range) && > + !cxlds->ram_qos_class) > + cxlds->ram_qos_class = &perf->qos_class; > + else if (resource_size(&cxlds->pmem_res) && > + range_contains(&pmem_range, &dent->dpa_range) && > + !cxlds->pmem_qos_class) > + cxlds->pmem_qos_class = &perf->qos_class; > + } > +} > + > static int cxl_switch_port_probe(struct cxl_port *port) > { > struct cxl_hdm *cxlhdm; > @@ -197,6 +233,8 @@ static int cxl_endpoint_port_probe(struct cxl_port *port) > if (rc) > dev_dbg(&port->dev, > "Failed to do perf coord calculations.\n"); > + else > + cxl_memdev_set_qtg(cxlds, &dsmas_list); > } > > cxl_cdat_dsmas_list_destroy(&dsmas_list); > >
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 2c8dc7e2b84d..35941a306ea8 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1260,6 +1260,7 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev) mutex_init(&cxlds->mbox_mutex); mutex_init(&cxlds->event.log_lock); cxlds->dev = dev; + INIT_LIST_HEAD(&cxlds->perf_list); return cxlds; } diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index a2845a7a69d8..708d60c5ffe1 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -5,6 +5,7 @@ #include <uapi/linux/cxl_mem.h> #include <linux/cdev.h> #include <linux/uuid.h> +#include <linux/node.h> #include "cxl.h" /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */ @@ -254,6 +255,21 @@ struct cxl_poison_state { struct mutex lock; /* Protect reads of poison list */ }; +/** + * struct perf_prop - performance property entry + * @list - list entry + * @dpa_range - range for DPA address + * @coord - QoS performance data (i.e. latency, bandwidth) + * @qos_class - QoS Class cookies + */ +struct perf_prop_entry { + struct list_head list; + struct range dpa_range; + struct access_coordinate coord; + /* Do not add members below this, contains flex array */ + struct qos_class qos_class; +}; + /** * struct cxl_dev_state - The driver device state * @@ -292,6 +308,9 @@ struct cxl_poison_state { * @event: event log driver state * @poison: poison driver state info * @mbox_send: @dev specific transport for transmitting mailbox commands + * @ram_qos_class: QoS class cookies for volatile region + * @pmem_qos_class: QoS class cookies for persistent region + * @perf_list: performance data entries list * * See section 8.2.9.5.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -325,6 +344,10 @@ struct cxl_dev_state { u64 next_volatile_bytes; u64 next_persistent_bytes; + struct qos_class *ram_qos_class; + struct qos_class *pmem_qos_class; + struct list_head perf_list; + resource_size_t component_reg_phys; u64 serial; diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 03af92217192..e5d7ad5b1e16 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -104,6 +104,42 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, return 0; } +static void cxl_memdev_set_qtg(struct cxl_dev_state *cxlds, struct list_head *dsmas_list) +{ + struct range pmem_range = { + .start = cxlds->pmem_res.start, + .end = cxlds->pmem_res.end, + }; + struct range ram_range = { + .start = cxlds->ram_res.start, + .end = cxlds->ram_res.end, + }; + struct perf_prop_entry *perf; + struct dsmas_entry *dent; + + list_for_each_entry(dent, dsmas_list, list) { + perf = devm_kzalloc(cxlds->dev, + sizeof(*perf) + dent->qos_class->nr * sizeof(int), + GFP_KERNEL); + if (!perf) + return; + + perf->dpa_range = dent->dpa_range; + perf->coord = dent->coord; + perf->qos_class = *dent->qos_class; + list_add_tail(&perf->list, &cxlds->perf_list); + + if (resource_size(&cxlds->ram_res) && + range_contains(&ram_range, &dent->dpa_range) && + !cxlds->ram_qos_class) + cxlds->ram_qos_class = &perf->qos_class; + else if (resource_size(&cxlds->pmem_res) && + range_contains(&pmem_range, &dent->dpa_range) && + !cxlds->pmem_qos_class) + cxlds->pmem_qos_class = &perf->qos_class; + } +} + static int cxl_switch_port_probe(struct cxl_port *port) { struct cxl_hdm *cxlhdm; @@ -197,6 +233,8 @@ static int cxl_endpoint_port_probe(struct cxl_port *port) if (rc) dev_dbg(&port->dev, "Failed to do perf coord calculations.\n"); + else + cxl_memdev_set_qtg(cxlds, &dsmas_list); } cxl_cdat_dsmas_list_destroy(&dsmas_list);
Once the QTG ID _DSM is executed successfully, the QTG ID is retrieved from the return package. Create a list of entries in the cxl_memdev context and store the QTG ID and the associated DPA range. This information can be exposed to user space via sysfs in order to help region setup for hot-plugged CXL memory devices. Signed-off-by: Dave Jiang <dave.jiang@intel.com> --- v6: - Store entire QTG ID list v4: - Remove unused qos_list from cxl_md v3: - Move back to QTG ID per partition --- drivers/cxl/core/mbox.c | 1 + drivers/cxl/cxlmem.h | 23 +++++++++++++++++++++++ drivers/cxl/port.c | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+)