diff mbox series

[RFC,09/13] cxl: add cxl_request_dpa

Message ID 20240516081202.27023-10-alucerop@amd.com
State New
Headers show
Series RFC: add Type2 device support | expand

Commit Message

Alejandro Lucero Palau May 16, 2024, 8:11 a.m. UTC
From: Alejandro Lucero <alucerop@amd.com>

Search and reserve DPA given input constraints.

Signed-off-by: Alejandro Lucero <alucerop@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/cxl/core/core.h             |   1 -
 drivers/cxl/core/hdm.c              | 153 +++++++++++++++++++++++-----
 include/linux/cxlmem.h              |   5 +
 tools/testing/cxl/type2/pci_type2.c |  12 ++-
 4 files changed, 145 insertions(+), 26 deletions(-)

Comments

Alejandro Lucero Palau June 12, 2024, 7:29 a.m. UTC | #1
From: Alejandro Lucero <alucerop@amd.com>

Search and reserve DPA given input constraints.


Based on: https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m4271ee49a91615c8af54e3ab20679f8be3099393

Signed-off-by: Alejandro Lucero <alucerop@amd.com>
Co-developed-by: Dan Williams <dan.j.williams@intel.com>



On 5/16/24 09:11, alucerop@amd.com wrote:
> From: Alejandro Lucero <alucerop@amd.com>
>
> Search and reserve DPA given input constraints.
>
> Signed-off-by: Alejandro Lucero <alucerop@amd.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
>   drivers/cxl/core/core.h             |   1 -
>   drivers/cxl/core/hdm.c              | 153 +++++++++++++++++++++++-----
>   include/linux/cxlmem.h              |   5 +
>   tools/testing/cxl/type2/pci_type2.c |  12 ++-
>   4 files changed, 145 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
> index bc5a95665aa0..c0a2e2c1ccb3 100644
> --- a/drivers/cxl/core/core.h
> +++ b/drivers/cxl/core/core.h
> @@ -61,7 +61,6 @@ struct dentry *cxl_debugfs_create_dir(const char *dir);
>   int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
>   		     enum cxl_decoder_mode mode);
>   int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
> -int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
>   resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
>   resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
>   
> diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
> index c5f70741d70a..6459b6ecde88 100644
> --- a/drivers/cxl/core/hdm.c
> +++ b/drivers/cxl/core/hdm.c
> @@ -404,6 +404,7 @@ int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
>   	up_write(&cxl_dpa_rwsem);
>   	return rc;
>   }
> +EXPORT_SYMBOL_NS_GPL(cxl_dpa_free, CXL);
>   
>   int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
>   		     enum cxl_decoder_mode mode)
> @@ -451,30 +452,17 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
>   	return rc;
>   }
>   
> -int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
> +static resource_size_t cxl_dpa_freespace(struct cxl_endpoint_decoder *cxled,
> +					 resource_size_t *start_out,
> +					 resource_size_t *skip_out)
>   {
>   	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
>   	resource_size_t free_ram_start, free_pmem_start;
> -	struct cxl_port *port = cxled_to_port(cxled);
>   	struct cxl_dev_state *cxlds = cxlmd->cxlds;
> -	struct device *dev = &cxled->cxld.dev;
>   	resource_size_t start, avail, skip;
>   	struct resource *p, *last;
> -	int rc;
> -
> -	down_write(&cxl_dpa_rwsem);
> -	if (cxled->cxld.region) {
> -		dev_dbg(dev, "decoder attached to %s\n",
> -			dev_name(&cxled->cxld.region->dev));
> -		rc = -EBUSY;
> -		goto out;
> -	}
>   
> -	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
> -		dev_dbg(dev, "decoder enabled\n");
> -		rc = -EBUSY;
> -		goto out;
> -	}
> +	lockdep_assert_held(&cxl_dpa_rwsem);
>   
>   	for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
>   		last = p;
> @@ -496,7 +484,6 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
>   		skip = 0;
>   	} else if (cxled->mode == CXL_DECODER_PMEM) {
>   		resource_size_t skip_start, skip_end;
> -
>   		start = free_pmem_start;
>   		avail = cxlds->pmem_res.end - start + 1;
>   		skip_start = free_ram_start;
> @@ -506,21 +493,50 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
>   		 * already handled the skip.
>   		 */
>   		if (cxlds->pmem_res.child &&
> -		    skip_start == cxlds->pmem_res.child->start)
> +				skip_start == cxlds->pmem_res.child->start)
>   			skip_end = skip_start - 1;
>   		else
>   			skip_end = start - 1;
>   		skip = skip_end - skip_start + 1;
>   	} else {
> -		dev_dbg(dev, "mode not set\n");
> -		rc = -EINVAL;
> +		avail = 0;
> +	}
> +
> +	if (!avail)
> +		return 0;
> +	if (start_out)
> +		*start_out = start;
> +	if (skip_out)
> +		*skip_out = skip;
> +	return avail;
> +}
> +
> +int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
> +{
> +	struct cxl_port *port = cxled_to_port(cxled);
> +	struct device *dev = &cxled->cxld.dev;
> +	resource_size_t start, avail, skip;
> +	int rc;
> +
> +	down_write(&cxl_dpa_rwsem);
> +	if (cxled->cxld.region) {
> +		dev_dbg(dev, "EBUSY, decoder attached to %s\n",
> +			     dev_name(&cxled->cxld.region->dev));
> +		rc = -EBUSY;
>   		goto out;
>   	}
>   
> +	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
> +		dev_dbg(dev, "EBUSY, decoder enabled\n");
> +		rc = -EBUSY;
> +		goto out;
> +	}
> +
> +	avail = cxl_dpa_freespace(cxled, &start, &skip);
>   	if (size > avail) {
>   		dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
> -			cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
> -			&avail);
> +			     cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
> +			     &avail);
>   		rc = -ENOSPC;
>   		goto out;
>   	}
> @@ -532,9 +548,98 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
>   	if (rc)
>   		return rc;
>   
> -	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
> +        return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
>   }
>   
> +static int find_free_decoder(struct device *dev, void *data)
> +{
> +	struct cxl_endpoint_decoder *cxled;
> +	struct cxl_port *port;
> +
> +	if (!is_endpoint_decoder(dev))
> +		return 0;
> +
> +	cxled = to_cxl_endpoint_decoder(dev);
> +	port = cxled_to_port(cxled);
> +
> +	if (cxled->cxld.id != port->hdm_end + 1) {
> +		return 0;
> +	}
> +	return 1;
> +}
> +
> +/**
> + * cxl_request_dpa - search and reserve DPA given input constraints
> + * @endpoint: an endpoint port with available decoders
> + * @mode: DPA operation mode (ram vs pmem)
> + * @min: the minimum amount of capacity the call needs
> + * @max: extra capacity to allocate after min is satisfied
> + *
> + * Given that a region needs to allocate from limited HPA capacity it
> + * may be the case that a device has more mappable DPA capacity than
> + * available HPA. So, the expectation is that @min is a driver known
> + * value for how much capacity is needed, and @max is based the limit of
> + * how much HPA space is available for a new region.
> + *
> + * Returns a pinned cxl_decoder with at least @min bytes of capacity
> + * reserved, or an error pointer. The caller is also expected to own the
> + * lifetime of the memdev registration associated with the endpoint to
> + * pin the decoder registered as well.
> + */
> +struct cxl_endpoint_decoder *cxl_request_dpa(struct cxl_port *endpoint,
> +					     enum cxl_decoder_mode mode,
> +					     resource_size_t min,
> +					     resource_size_t max)
> +{
> +	struct cxl_endpoint_decoder *cxled;
> +	struct device *cxled_dev;
> +	resource_size_t alloc;
> +	int rc;
> +
> +	if (!IS_ALIGNED(min | max, SZ_256M))
> +		return ERR_PTR(-EINVAL);
> +
> +	down_read(&cxl_dpa_rwsem);
> +
> +	cxled_dev = device_find_child(&endpoint->dev, NULL, find_free_decoder);
> +	if (!cxled_dev)
> +		cxled = ERR_PTR(-ENXIO);
> +	else
> +		cxled = to_cxl_endpoint_decoder(cxled_dev);
> +
> +	up_read(&cxl_dpa_rwsem);
> +
> +	if (IS_ERR(cxled)) {
> +               return cxled;
> +	}
> +
> +	rc = cxl_dpa_set_mode(cxled, mode);
> +	if (rc)
> +		goto err;
> +
> +	down_read(&cxl_dpa_rwsem);
> +	alloc = cxl_dpa_freespace(cxled, NULL, NULL);
> +	up_read(&cxl_dpa_rwsem);
> +
> +	if (max)
> +		alloc = min(max, alloc);
> +	if (alloc < min) {
> +		rc = -ENOMEM;
> +		goto err;
> +	}
> +
> +	rc = cxl_dpa_alloc(cxled, alloc);
> +	if (rc)
> +		goto err;
> +
> +	return cxled;
> +err:
> +	put_device(cxled_dev);
> +	return ERR_PTR(rc);
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_request_dpa, CXL);
> +
> +
>   static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
>   {
>   	u16 eig;
> diff --git a/include/linux/cxlmem.h b/include/linux/cxlmem.h
> index 342ccd5486d3..caf1cd86421c 100644
> --- a/include/linux/cxlmem.h
> +++ b/include/linux/cxlmem.h
> @@ -870,4 +870,9 @@ struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint,
>   					   int interleave_ways,
>   					   unsigned long flags,
>   					   resource_size_t *max);
> +struct cxl_endpoint_decoder *cxl_request_dpa(struct cxl_port *endpoint,
> +					     enum cxl_decoder_mode mode,
> +					     resource_size_t min,
> +					     resource_size_t max);
> +int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
>   #endif /* __CXL_MEM_H__ */
> diff --git a/tools/testing/cxl/type2/pci_type2.c b/tools/testing/cxl/type2/pci_type2.c
> index deb5eeae501b..6499d709f54d 100644
> --- a/tools/testing/cxl/type2/pci_type2.c
> +++ b/tools/testing/cxl/type2/pci_type2.c
> @@ -4,6 +4,7 @@
>   #include <linux/cxlpci.h>
>   #include <linux/cxlmem.h>
>   
> +struct cxl_endpoint_decoder *cxled;
>   struct cxl_root_decoder *cxlrd;
>   struct cxl_dev_state *cxlds;
>   struct cxl_memdev *cxlmd;
> @@ -99,6 +100,15 @@ static int type2_pci_probe(struct pci_dev *pci_dev,
>   		goto out;
>   	}
>   
> +	pci_info(pci_dev, "cxl request_dpa...");
> +	cxled = cxl_request_dpa(endpoint, CXL_DECODER_RAM, CXL_TYPE2_MEM_SIZE,
> +				CXL_TYPE2_MEM_SIZE);
> +	if (IS_ERR(cxled)) {
> +		dev_dbg(&pci_dev->dev, "cxl_request_dpa error\n");
> +		rc = PTR_ERR(cxled);
> +		goto out;
> +	}
> +
>   out:
>   	cxl_release_endpoint(cxlmd, endpoint);
>   
> @@ -107,7 +117,7 @@ static int type2_pci_probe(struct pci_dev *pci_dev,
>   
>   static void type2_pci_remove(struct pci_dev *pci_dev)
>   {
> -
> +	cxl_dpa_free(cxled);
>   }
>   
>   /* PCI device ID table */
diff mbox series

Patch

diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index bc5a95665aa0..c0a2e2c1ccb3 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -61,7 +61,6 @@  struct dentry *cxl_debugfs_create_dir(const char *dir);
 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
 		     enum cxl_decoder_mode mode);
 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
-int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
 
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index c5f70741d70a..6459b6ecde88 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -404,6 +404,7 @@  int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
 	up_write(&cxl_dpa_rwsem);
 	return rc;
 }
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_free, CXL);
 
 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
 		     enum cxl_decoder_mode mode)
@@ -451,30 +452,17 @@  int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
 	return rc;
 }
 
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static resource_size_t cxl_dpa_freespace(struct cxl_endpoint_decoder *cxled,
+					 resource_size_t *start_out,
+					 resource_size_t *skip_out)
 {
 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
 	resource_size_t free_ram_start, free_pmem_start;
-	struct cxl_port *port = cxled_to_port(cxled);
 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
-	struct device *dev = &cxled->cxld.dev;
 	resource_size_t start, avail, skip;
 	struct resource *p, *last;
-	int rc;
-
-	down_write(&cxl_dpa_rwsem);
-	if (cxled->cxld.region) {
-		dev_dbg(dev, "decoder attached to %s\n",
-			dev_name(&cxled->cxld.region->dev));
-		rc = -EBUSY;
-		goto out;
-	}
 
-	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
-		dev_dbg(dev, "decoder enabled\n");
-		rc = -EBUSY;
-		goto out;
-	}
+	lockdep_assert_held(&cxl_dpa_rwsem);
 
 	for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
 		last = p;
@@ -496,7 +484,6 @@  int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
 		skip = 0;
 	} else if (cxled->mode == CXL_DECODER_PMEM) {
 		resource_size_t skip_start, skip_end;
-
 		start = free_pmem_start;
 		avail = cxlds->pmem_res.end - start + 1;
 		skip_start = free_ram_start;
@@ -506,21 +493,50 @@  int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
 		 * already handled the skip.
 		 */
 		if (cxlds->pmem_res.child &&
-		    skip_start == cxlds->pmem_res.child->start)
+				skip_start == cxlds->pmem_res.child->start)
 			skip_end = skip_start - 1;
 		else
 			skip_end = start - 1;
 		skip = skip_end - skip_start + 1;
 	} else {
-		dev_dbg(dev, "mode not set\n");
-		rc = -EINVAL;
+		avail = 0;
+	}
+
+	if (!avail)
+		return 0;
+	if (start_out)
+		*start_out = start;
+	if (skip_out)
+		*skip_out = skip;
+	return avail;
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+	struct cxl_port *port = cxled_to_port(cxled);
+	struct device *dev = &cxled->cxld.dev;
+	resource_size_t start, avail, skip;
+	int rc;
+
+	down_write(&cxl_dpa_rwsem);
+	if (cxled->cxld.region) {
+		dev_dbg(dev, "EBUSY, decoder attached to %s\n",
+			     dev_name(&cxled->cxld.region->dev));
+		rc = -EBUSY;
 		goto out;
 	}
 
+	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+		dev_dbg(dev, "EBUSY, decoder enabled\n");
+		rc = -EBUSY;
+		goto out;
+	}
+
+	avail = cxl_dpa_freespace(cxled, &start, &skip);
 	if (size > avail) {
 		dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
-			cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
-			&avail);
+			     cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
+			     &avail);
 		rc = -ENOSPC;
 		goto out;
 	}
@@ -532,9 +548,98 @@  int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
 	if (rc)
 		return rc;
 
-	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
+        return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
 }
 
+static int find_free_decoder(struct device *dev, void *data)
+{
+	struct cxl_endpoint_decoder *cxled;
+	struct cxl_port *port;
+
+	if (!is_endpoint_decoder(dev))
+		return 0;
+
+	cxled = to_cxl_endpoint_decoder(dev);
+	port = cxled_to_port(cxled);
+
+	if (cxled->cxld.id != port->hdm_end + 1) {
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ * cxl_request_dpa - search and reserve DPA given input constraints
+ * @endpoint: an endpoint port with available decoders
+ * @mode: DPA operation mode (ram vs pmem)
+ * @min: the minimum amount of capacity the call needs
+ * @max: extra capacity to allocate after min is satisfied
+ *
+ * Given that a region needs to allocate from limited HPA capacity it
+ * may be the case that a device has more mappable DPA capacity than
+ * available HPA. So, the expectation is that @min is a driver known
+ * value for how much capacity is needed, and @max is based the limit of
+ * how much HPA space is available for a new region.
+ *
+ * Returns a pinned cxl_decoder with at least @min bytes of capacity
+ * reserved, or an error pointer. The caller is also expected to own the
+ * lifetime of the memdev registration associated with the endpoint to
+ * pin the decoder registered as well.
+ */
+struct cxl_endpoint_decoder *cxl_request_dpa(struct cxl_port *endpoint,
+					     enum cxl_decoder_mode mode,
+					     resource_size_t min,
+					     resource_size_t max)
+{
+	struct cxl_endpoint_decoder *cxled;
+	struct device *cxled_dev;
+	resource_size_t alloc;
+	int rc;
+
+	if (!IS_ALIGNED(min | max, SZ_256M))
+		return ERR_PTR(-EINVAL);
+
+	down_read(&cxl_dpa_rwsem);
+
+	cxled_dev = device_find_child(&endpoint->dev, NULL, find_free_decoder);
+	if (!cxled_dev)
+		cxled = ERR_PTR(-ENXIO);
+	else
+		cxled = to_cxl_endpoint_decoder(cxled_dev);
+
+	up_read(&cxl_dpa_rwsem);
+
+	if (IS_ERR(cxled)) {
+               return cxled;
+	}
+
+	rc = cxl_dpa_set_mode(cxled, mode);
+	if (rc)
+		goto err;
+
+	down_read(&cxl_dpa_rwsem);
+	alloc = cxl_dpa_freespace(cxled, NULL, NULL);
+	up_read(&cxl_dpa_rwsem);
+
+	if (max)
+		alloc = min(max, alloc);
+	if (alloc < min) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	rc = cxl_dpa_alloc(cxled, alloc);
+	if (rc)
+		goto err;
+
+	return cxled;
+err:
+	put_device(cxled_dev);
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_request_dpa, CXL);
+
+
 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
 {
 	u16 eig;
diff --git a/include/linux/cxlmem.h b/include/linux/cxlmem.h
index 342ccd5486d3..caf1cd86421c 100644
--- a/include/linux/cxlmem.h
+++ b/include/linux/cxlmem.h
@@ -870,4 +870,9 @@  struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint,
 					   int interleave_ways,
 					   unsigned long flags,
 					   resource_size_t *max);
+struct cxl_endpoint_decoder *cxl_request_dpa(struct cxl_port *endpoint,
+					     enum cxl_decoder_mode mode,
+					     resource_size_t min,
+					     resource_size_t max);
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
 #endif /* __CXL_MEM_H__ */
diff --git a/tools/testing/cxl/type2/pci_type2.c b/tools/testing/cxl/type2/pci_type2.c
index deb5eeae501b..6499d709f54d 100644
--- a/tools/testing/cxl/type2/pci_type2.c
+++ b/tools/testing/cxl/type2/pci_type2.c
@@ -4,6 +4,7 @@ 
 #include <linux/cxlpci.h>
 #include <linux/cxlmem.h>
 
+struct cxl_endpoint_decoder *cxled;
 struct cxl_root_decoder *cxlrd;
 struct cxl_dev_state *cxlds;
 struct cxl_memdev *cxlmd;
@@ -99,6 +100,15 @@  static int type2_pci_probe(struct pci_dev *pci_dev,
 		goto out;
 	}
 
+	pci_info(pci_dev, "cxl request_dpa...");
+	cxled = cxl_request_dpa(endpoint, CXL_DECODER_RAM, CXL_TYPE2_MEM_SIZE,
+				CXL_TYPE2_MEM_SIZE);
+	if (IS_ERR(cxled)) {
+		dev_dbg(&pci_dev->dev, "cxl_request_dpa error\n");
+		rc = PTR_ERR(cxled);
+		goto out;
+	}
+
 out:
 	cxl_release_endpoint(cxlmd, endpoint);
 
@@ -107,7 +117,7 @@  static int type2_pci_probe(struct pci_dev *pci_dev,
 
 static void type2_pci_remove(struct pci_dev *pci_dev)
 {
-
+	cxl_dpa_free(cxled);
 }
 
 /* PCI device ID table */