@@ -17,6 +17,8 @@ struct dsmas_entry {
struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
int entries;
int qos_class;
+ bool shareable;
+ bool read_only;
};
static u32 cdat_normalize(u16 entry, u64 base, u8 type)
@@ -74,6 +76,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
return -ENOMEM;
dent->handle = dsmas->dsmad_handle;
+ dent->shareable = dsmas->flags & ACPI_CDAT_DSMAS_SHAREABLE;
+ dent->read_only = dsmas->flags & ACPI_CDAT_DSMAS_READ_ONLY;
dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
@@ -255,6 +259,39 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
dent->coord[ACCESS_COORDINATE_CPU].write_latency);
}
+static void update_dcd_perf(struct cxl_dev_state *cxlds,
+ struct dsmas_entry *dent)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = cxlds->dev;
+
+ for (int i = 0; i < mds->nr_dc_region; i++) {
+ /* CXL defines a u32 handle while CDAT defines u8, ignore upper bits */
+ u8 dc_handle = mds->dc_region[i].dsmad_handle & 0xff;
+
+ if (resource_size(&cxlds->dc_res[i])) {
+ struct range dc_range = {
+ .start = cxlds->dc_res[i].start,
+ .end = cxlds->dc_res[i].end,
+ };
+
+ if (range_contains(&dent->dpa_range, &dc_range)) {
+ if (dent->handle != dc_handle)
+ dev_warn(dev, "DC Region/DSMAS mis-matched handle/range; region [range 0x%016llx-0x%016llx] (%u); dsmas [range 0x%016llx-0x%016llx] (%u)\n"
+ " setting DC region attributes regardless\n",
+ dent->dpa_range.start, dent->dpa_range.end,
+ dent->handle,
+ dc_range.start, dc_range.end,
+ dc_handle);
+
+ mds->dc_region[i].shareable = dent->shareable;
+ mds->dc_region[i].read_only = dent->read_only;
+ update_perf_entry(dev, dent, &mds->dc_perf[i]);
+ }
+ }
+ }
+}
+
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
@@ -278,6 +315,8 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
else if (resource_size(&cxlds->pmem_res) &&
range_contains(&pmem_range, &dent->dpa_range))
update_perf_entry(dev, dent, &mds->pmem_perf);
+ else if (cxl_dcd_supported(mds))
+ update_dcd_perf(cxlds, dent);
else
dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
dent->dpa_range.start);
@@ -1649,6 +1649,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
+ for (int i = 0; i < CXL_MAX_DC_REGION; i++)
+ mds->dc_perf[i].qos_class = CXL_QOS_CLASS_INVALID;
return mds;
}
@@ -466,6 +466,8 @@ struct cxl_dc_region_info {
u64 blk_size;
u32 dsmad_handle;
u8 flags;
+ bool shareable;
+ bool read_only;
u8 name[CXL_DC_REGION_STRLEN];
};
@@ -533,6 +535,7 @@ struct cxl_memdev_state {
u8 nr_dc_region;
struct cxl_dc_region_info dc_region[CXL_MAX_DC_REGION];
+ struct cxl_dpa_perf dc_perf[CXL_MAX_DC_REGION];
struct cxl_event_state event;
struct cxl_poison_state poison;