@@ -39,7 +39,8 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
int cxl_region_init(void);
void cxl_region_exit(void);
int cxl_get_poison_by_endpoint(struct cxl_port *port);
-struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa,
+ struct cxl_endpoint_decoder **cxled);
u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa);
@@ -50,7 +51,8 @@ static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr,
return ULLONG_MAX;
}
static inline
-struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa,
+ struct cxl_endpoint_decoder **cxled)
{
return NULL;
}
@@ -909,7 +909,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
guard(rwsem_read)(&cxl_dpa_rwsem);
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
- cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ cxlr = cxl_dpa_to_region(cxlmd, dpa, NULL);
if (cxlr)
hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
@@ -323,7 +323,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (rc)
goto out;
- cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ cxlr = cxl_dpa_to_region(cxlmd, dpa, NULL);
if (cxlr)
dev_warn_once(mds->cxlds.dev,
"poison inject dpa:%#llx region: %s\n", dpa,
@@ -387,7 +387,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (rc)
goto out;
- cxlr = cxl_dpa_to_region(cxlmd, dpa);
+ cxlr = cxl_dpa_to_region(cxlmd, dpa, NULL);
if (cxlr)
dev_warn_once(mds->cxlds.dev,
"poison clear dpa:%#llx region: %s\n", dpa,
@@ -2828,6 +2828,7 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
struct cxl_dpa_to_region_context {
struct cxl_region *cxlr;
u64 dpa;
+ struct cxl_endpoint_decoder *cxled;
};
static int __cxl_dpa_to_region(struct device *dev, void *arg)
@@ -2861,11 +2862,13 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
dev_name(dev));
ctx->cxlr = cxlr;
+ ctx->cxled = cxled;
return 1;
}
-struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
+struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa,
+ struct cxl_endpoint_decoder **cxled)
{
struct cxl_dpa_to_region_context ctx;
struct cxl_port *port;
@@ -2877,6 +2880,9 @@ struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+ if (cxled)
+ *cxled = ctx.cxled;
+
return ctx.cxlr;
}
cxl_dpa_to_region() finds the region from a <DPA, device> tuple. The search involves finding the device endpoint decoder as well. Dyanmic capacity extent processing uses the endpoint decoder HPA information to calculate the HPA offset. In addition, well behaved extents should be contained within an endpoint decoder. Return the endpoint decoder found to be used in subsequent DCD code. Signed-off-by: Ira Weiny <ira.weiny@intel.com> --- drivers/cxl/core/core.h | 6 ++++-- drivers/cxl/core/mbox.c | 2 +- drivers/cxl/core/memdev.c | 4 ++-- drivers/cxl/core/region.c | 8 +++++++- 4 files changed, 14 insertions(+), 6 deletions(-)