diff mbox series

[1/3] cxl/region: Calculate performance data for a region

Message ID 170199190986.3543815.7111880145751330916.stgit@djiang5-mobl3
State Superseded
Headers show
Series cxl: Add support to report region access coordinates to numa nodes | expand

Commit Message

Dave Jiang Dec. 7, 2023, 11:31 p.m. UTC
Calculate and store the performance data for a CXL region. Find the worst
read and write latency for all the included ranges from each of the devices
that attributes to the region and designate that as the latency data. Sum
all the read and write bandwidth data for each of the device region and
that is the total bandwidth for the region.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/cxl/core/region.c |   94 +++++++++++++++++++++++++++++++++++++++++++++
 drivers/cxl/cxl.h         |    1 
 2 files changed, 95 insertions(+)

Comments

Fan Ni Dec. 11, 2023, 5:44 p.m. UTC | #1
On Thu, Dec 07, 2023 at 04:31:49PM -0700, Dave Jiang wrote:
> Calculate and store the performance data for a CXL region. Find the worst
> read and write latency for all the included ranges from each of the devices
> that attributes to the region and designate that as the latency data. Sum
> all the read and write bandwidth data for each of the device region and
> that is the total bandwidth for the region.
> 
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> ---
>  drivers/cxl/core/region.c |   94 +++++++++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxl.h         |    1 
>  2 files changed, 95 insertions(+)
> 
> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
> index 56e575c79bb4..d879f5702cf2 100644
> --- a/drivers/cxl/core/region.c
> +++ b/drivers/cxl/core/region.c
> @@ -2934,6 +2934,98 @@ static int is_system_ram(struct resource *res, void *arg)
>  	return 1;
>  }
>  
> +static int cxl_region_perf_data_calculate(struct cxl_region *cxlr)
> +{
> +	struct cxl_region_params *p = &cxlr->params;
> +	struct cxl_endpoint_decoder *cxled;
> +	unsigned int rd_bw = 0, rd_lat = 0;
> +	unsigned int wr_bw = 0, wr_lat = 0;
> +	struct access_coordinate *coord;
> +	struct list_head *perf_list;
> +	int rc = 0, i;
> +
> +	lockdep_assert_held(&cxl_region_rwsem);
> +
> +	/* No need to proceed if hmem attributes are already present */
> +	if (cxlr->coord)
> +		return 0;
> +
> +	coord = devm_kzalloc(&cxlr->dev, sizeof(*coord), GFP_KERNEL);
> +	if (!coord)
> +		return -ENOMEM;
> +
> +	cxled = p->targets[0];

cxled is only used in the for loop below, maybe we can move it into the loop.

Fan

> +
> +	for (i = 0; i < p->nr_targets; i++) {
> +		struct range dpa = {
> +			.start = cxled->dpa_res->start,
> +			.end = cxled->dpa_res->end,
> +		};
> +		struct cxl_memdev_state *mds;
> +		struct perf_prop_entry *perf;
> +		struct cxl_dev_state *cxlds;
> +		struct cxl_memdev *cxlmd;
> +		bool found = false;
> +
> +		cxled = p->targets[i];
> +		cxlmd = cxled_to_memdev(cxled);
> +		cxlds = cxlmd->cxlds;
> +		mds = to_cxl_memdev_state(cxlds);
> +
> +		switch (cxlr->mode) {
> +		case CXL_DECODER_RAM:
> +			perf_list = &mds->ram_perf_list;
> +			break;
> +		case CXL_DECODER_PMEM:
> +			perf_list = &mds->pmem_perf_list;
> +			break;
> +		default:
> +			rc = -EINVAL;
> +			goto err;
> +		}
> +
> +		if (list_empty(perf_list)) {
> +			rc = -ENOENT;
> +			goto err;
> +		}
> +
> +		list_for_each_entry(perf, perf_list, list) {
> +			if (range_contains(&perf->dpa_range, &dpa)) {
> +				found = true;
> +				break;
> +			}
> +		}
> +
> +		if (!found) {
> +			rc = -ENOENT;
> +			goto err;
> +		}
> +
> +		/* Get total bandwidth and the worst latency for the cxl region */
> +		rd_lat = max_t(unsigned int, rd_lat,
> +			       perf->coord.read_latency);
> +		rd_bw += perf->coord.read_bandwidth;
> +		wr_lat = max_t(unsigned int, wr_lat,
> +			       perf->coord.write_latency);
> +		wr_bw += perf->coord.write_bandwidth;
> +	}
> +
> +	*coord = (struct access_coordinate) {
> +		.read_latency = rd_lat,
> +		.read_bandwidth = rd_bw,
> +		.write_latency = wr_lat,
> +		.write_bandwidth = wr_bw,
> +	};
> +
> +	cxlr->coord = coord;
> +
> +	return 0;
> +
> +err:
> +	devm_kfree(&cxlr->dev, coord);
> +	return rc;
> +}
> +
>  static int cxl_region_probe(struct device *dev)
>  {
>  	struct cxl_region *cxlr = to_cxl_region(dev);
> @@ -2959,6 +3051,8 @@ static int cxl_region_probe(struct device *dev)
>  		goto out;
>  	}
>  
> +	cxl_region_perf_data_calculate(cxlr);
> +
>  	/*
>  	 * From this point on any path that changes the region's state away from
>  	 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index 004534cf0361..265da412c5bd 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -529,6 +529,7 @@ struct cxl_region {
>  	struct cxl_pmem_region *cxlr_pmem;
>  	unsigned long flags;
>  	struct cxl_region_params params;
> +	struct access_coordinate *coord;
>  };
>  
>  struct cxl_nvdimm_bridge {
> 
>
Dan Williams Dec. 12, 2023, 12:19 a.m. UTC | #2
Dave Jiang wrote:
> Calculate and store the performance data for a CXL region. Find the worst
> read and write latency for all the included ranges from each of the devices
> that attributes to the region and designate that as the latency data. Sum
> all the read and write bandwidth data for each of the device region and
> that is the total bandwidth for the region.
> 
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> ---
>  drivers/cxl/core/region.c |   94 +++++++++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxl.h         |    1 
>  2 files changed, 95 insertions(+)
> 
> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
> index 56e575c79bb4..d879f5702cf2 100644
> --- a/drivers/cxl/core/region.c
> +++ b/drivers/cxl/core/region.c
> @@ -2934,6 +2934,98 @@ static int is_system_ram(struct resource *res, void *arg)
>  	return 1;
>  }
>  
> +static int cxl_region_perf_data_calculate(struct cxl_region *cxlr)
> +{
> +	struct cxl_region_params *p = &cxlr->params;
> +	struct cxl_endpoint_decoder *cxled;
> +	unsigned int rd_bw = 0, rd_lat = 0;
> +	unsigned int wr_bw = 0, wr_lat = 0;
> +	struct access_coordinate *coord;
> +	struct list_head *perf_list;
> +	int rc = 0, i;
> +
> +	lockdep_assert_held(&cxl_region_rwsem);
> +
> +	/* No need to proceed if hmem attributes are already present */
> +	if (cxlr->coord)
> +		return 0;
> +
> +	coord = devm_kzalloc(&cxlr->dev, sizeof(*coord), GFP_KERNEL);
> +	if (!coord)
> +		return -ENOMEM;

Why does this need to be dynamically allocated? It's only a few fields
that all regions will likely have, just include a 'struct
access_coordinate' instance in 'struct cxl_region' and check if the
values are non-zero (memcmp()) to see if it is initialized.

Saves a devm_free() error case.

> +
> +	cxled = p->targets[0];
> +
> +	for (i = 0; i < p->nr_targets; i++) {
> +		struct range dpa = {
> +			.start = cxled->dpa_res->start,
> +			.end = cxled->dpa_res->end,
> +		};
> +		struct cxl_memdev_state *mds;
> +		struct perf_prop_entry *perf;
> +		struct cxl_dev_state *cxlds;
> +		struct cxl_memdev *cxlmd;
> +		bool found = false;
> +
> +		cxled = p->targets[i];
> +		cxlmd = cxled_to_memdev(cxled);
> +		cxlds = cxlmd->cxlds;
> +		mds = to_cxl_memdev_state(cxlds);
> +
> +		switch (cxlr->mode) {
> +		case CXL_DECODER_RAM:
> +			perf_list = &mds->ram_perf_list;
> +			break;
> +		case CXL_DECODER_PMEM:
> +			perf_list = &mds->pmem_perf_list;
> +			break;
> +		default:
> +			rc = -EINVAL;
> +			goto err;
> +		}
> +
> +		if (list_empty(perf_list)) {
> +			rc = -ENOENT;
> +			goto err;
> +		}

No need for this check since list_for_each_entry() will already be a nop
and not set @found.

> +
> +		list_for_each_entry(perf, perf_list, list) {

This looks like a potential race / problem as region-sysfs and
auto-discovered regions are allowed to start running before
cxl_qos_class_verify() runs. cxl_qos_class_verify() needs to be
done with its work before any region might see it. It needs to be the
case that these unlocked walks of memdev lists are ok because they are
known to be stable for the lifetime of any region that might read them.

Somewhat glad that we never went back and added support for "immediate"
(CXL_SET_PARTITION_IMMEDIATE_FLAG) DPA partition changes, that would
make all of this that much more complicated.

> +			if (range_contains(&perf->dpa_range, &dpa)) {
> +				found = true;
> +				break;
> +			}
> +		}
> +
> +		if (!found) {
> +			rc = -ENOENT;
> +			goto err;
> +		}
> +
> +		/* Get total bandwidth and the worst latency for the cxl region */
> +		rd_lat = max_t(unsigned int, rd_lat,
> +			       perf->coord.read_latency);
> +		rd_bw += perf->coord.read_bandwidth;
> +		wr_lat = max_t(unsigned int, wr_lat,
> +			       perf->coord.write_latency);
> +		wr_bw += perf->coord.write_bandwidth;
> +	}
> +
> +	*coord = (struct access_coordinate) {
> +		.read_latency = rd_lat,
> +		.read_bandwidth = rd_bw,
> +		.write_latency = wr_lat,
> +		.write_bandwidth = wr_bw,
> +	};
> +
> +	cxlr->coord = coord;
> +
> +	return 0;
> +
> +err:
> +	devm_kfree(&cxlr->dev, coord);

Another reason I do not like open-coded devm_kfree() is because it is
not supported by cleanup.h and still needs "goto".
diff mbox series

Patch

diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 56e575c79bb4..d879f5702cf2 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2934,6 +2934,98 @@  static int is_system_ram(struct resource *res, void *arg)
 	return 1;
 }
 
+static int cxl_region_perf_data_calculate(struct cxl_region *cxlr)
+{
+	struct cxl_region_params *p = &cxlr->params;
+	struct cxl_endpoint_decoder *cxled;
+	unsigned int rd_bw = 0, rd_lat = 0;
+	unsigned int wr_bw = 0, wr_lat = 0;
+	struct access_coordinate *coord;
+	struct list_head *perf_list;
+	int rc = 0, i;
+
+	lockdep_assert_held(&cxl_region_rwsem);
+
+	/* No need to proceed if hmem attributes are already present */
+	if (cxlr->coord)
+		return 0;
+
+	coord = devm_kzalloc(&cxlr->dev, sizeof(*coord), GFP_KERNEL);
+	if (!coord)
+		return -ENOMEM;
+
+	cxled = p->targets[0];
+
+	for (i = 0; i < p->nr_targets; i++) {
+		struct range dpa = {
+			.start = cxled->dpa_res->start,
+			.end = cxled->dpa_res->end,
+		};
+		struct cxl_memdev_state *mds;
+		struct perf_prop_entry *perf;
+		struct cxl_dev_state *cxlds;
+		struct cxl_memdev *cxlmd;
+		bool found = false;
+
+		cxled = p->targets[i];
+		cxlmd = cxled_to_memdev(cxled);
+		cxlds = cxlmd->cxlds;
+		mds = to_cxl_memdev_state(cxlds);
+
+		switch (cxlr->mode) {
+		case CXL_DECODER_RAM:
+			perf_list = &mds->ram_perf_list;
+			break;
+		case CXL_DECODER_PMEM:
+			perf_list = &mds->pmem_perf_list;
+			break;
+		default:
+			rc = -EINVAL;
+			goto err;
+		}
+
+		if (list_empty(perf_list)) {
+			rc = -ENOENT;
+			goto err;
+		}
+
+		list_for_each_entry(perf, perf_list, list) {
+			if (range_contains(&perf->dpa_range, &dpa)) {
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			rc = -ENOENT;
+			goto err;
+		}
+
+		/* Get total bandwidth and the worst latency for the cxl region */
+		rd_lat = max_t(unsigned int, rd_lat,
+			       perf->coord.read_latency);
+		rd_bw += perf->coord.read_bandwidth;
+		wr_lat = max_t(unsigned int, wr_lat,
+			       perf->coord.write_latency);
+		wr_bw += perf->coord.write_bandwidth;
+	}
+
+	*coord = (struct access_coordinate) {
+		.read_latency = rd_lat,
+		.read_bandwidth = rd_bw,
+		.write_latency = wr_lat,
+		.write_bandwidth = wr_bw,
+	};
+
+	cxlr->coord = coord;
+
+	return 0;
+
+err:
+	devm_kfree(&cxlr->dev, coord);
+	return rc;
+}
+
 static int cxl_region_probe(struct device *dev)
 {
 	struct cxl_region *cxlr = to_cxl_region(dev);
@@ -2959,6 +3051,8 @@  static int cxl_region_probe(struct device *dev)
 		goto out;
 	}
 
+	cxl_region_perf_data_calculate(cxlr);
+
 	/*
 	 * From this point on any path that changes the region's state away from
 	 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 004534cf0361..265da412c5bd 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -529,6 +529,7 @@  struct cxl_region {
 	struct cxl_pmem_region *cxlr_pmem;
 	unsigned long flags;
 	struct cxl_region_params params;
+	struct access_coordinate *coord;
 };
 
 struct cxl_nvdimm_bridge {