@@ -144,7 +144,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
- if (cxlr->mode != CXL_DECODER_PMEM)
+ if (cxlr->mode != CXL_REGION_PMEM)
rc = sysfs_emit(buf, "\n");
else
rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
@@ -457,7 +457,7 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
* Support tooling that expects to find a 'uuid' attribute for all
* regions regardless of mode.
*/
- if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_REGION_PMEM)
return 0444;
return a->mode;
}
@@ -620,7 +620,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_region *cxlr = to_cxl_region(dev);
- return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
+ return sysfs_emit(buf, "%s\n", cxl_region_mode_name(cxlr->mode));
}
static DEVICE_ATTR_RO(mode);
@@ -646,7 +646,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
/* ways, granularity and uuid (if PMEM) need to be set before HPA */
if (!p->interleave_ways || !p->interleave_granularity ||
- (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ (cxlr->mode == CXL_REGION_PMEM && uuid_is_null(&p->uuid)))
return -ENXIO;
div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
@@ -1863,6 +1863,17 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr)
return rc;
}
+static bool cxl_modes_compatible(enum cxl_region_mode rmode,
+ enum cxl_decoder_mode dmode)
+{
+ if (rmode == CXL_REGION_RAM && dmode == CXL_DECODER_RAM)
+ return true;
+ if (rmode == CXL_REGION_PMEM && dmode == CXL_DECODER_PMEM)
+ return true;
+
+ return false;
+}
+
static int cxl_region_attach(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos)
{
@@ -1882,9 +1893,11 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return rc;
}
- if (cxled->mode != cxlr->mode) {
- dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
- dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
+ if (!cxl_modes_compatible(cxlr->mode, cxled->mode)) {
+ dev_dbg(&cxlr->dev, "%s region mode: %s mismatch decoder: %s\n",
+ dev_name(&cxled->cxld.dev),
+ cxl_region_mode_name(cxlr->mode),
+ cxl_decoder_mode_name(cxled->mode));
return -EINVAL;
}
@@ -2447,7 +2460,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
* devm_cxl_add_region - Adds a region to a decoder
* @cxlrd: root decoder
* @id: memregion id to create, or memregion_free() on failure
- * @mode: mode for the endpoint decoders of this region
+ * @mode: mode of this region
* @type: select whether this is an expander or accelerator (type-2 or type-3)
*
* This is the second step of region initialization. Regions exist within an
@@ -2458,7 +2471,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
*/
static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
int id,
- enum cxl_decoder_mode mode,
+ enum cxl_region_mode mode,
enum cxl_decoder_type type)
{
struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
@@ -2512,16 +2525,17 @@ static ssize_t create_ram_region_show(struct device *dev,
}
static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
- enum cxl_decoder_mode mode, int id)
+ enum cxl_region_mode mode, int id)
{
int rc;
switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
+ case CXL_REGION_RAM:
+ case CXL_REGION_PMEM:
break;
default:
- dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+ dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %s\n",
+ cxl_region_mode_name(mode));
return ERR_PTR(-EINVAL);
}
@@ -2549,7 +2563,7 @@ static ssize_t create_pmem_region_store(struct device *dev,
if (rc != 1)
return -EINVAL;
- cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
+ cxlr = __create_region(cxlrd, CXL_REGION_PMEM, id);
if (IS_ERR(cxlr))
return PTR_ERR(cxlr);
@@ -2569,7 +2583,7 @@ static ssize_t create_ram_region_store(struct device *dev,
if (rc != 1)
return -EINVAL;
- cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
+ cxlr = __create_region(cxlrd, CXL_REGION_RAM, id);
if (IS_ERR(cxlr))
return PTR_ERR(cxlr);
@@ -3215,6 +3229,22 @@ static int match_region_by_range(struct device *dev, void *data)
return rc;
}
+static enum cxl_region_mode
+cxl_decoder_to_region_mode(enum cxl_decoder_mode mode)
+{
+ switch (mode) {
+ case CXL_DECODER_NONE:
+ return CXL_REGION_NONE;
+ case CXL_DECODER_RAM:
+ return CXL_REGION_RAM;
+ case CXL_DECODER_PMEM:
+ return CXL_REGION_PMEM;
+ case CXL_DECODER_MIXED:
+ default:
+ return CXL_REGION_MIXED;
+ }
+}
+
/* Establish an empty region covering the given HPA range */
static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
struct cxl_endpoint_decoder *cxled)
@@ -3223,12 +3253,17 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
struct cxl_port *port = cxlrd_to_port(cxlrd);
struct range *hpa = &cxled->cxld.hpa_range;
struct cxl_region_params *p;
+ enum cxl_region_mode mode;
struct cxl_region *cxlr;
struct resource *res;
int rc;
+ if (cxled->mode == CXL_DECODER_DEAD)
+ return ERR_PTR(-EINVAL);
+
+ mode = cxl_decoder_to_region_mode(cxled->mode);
do {
- cxlr = __create_region(cxlrd, cxled->mode,
+ cxlr = __create_region(cxlrd, mode,
atomic_read(&cxlrd->region_id));
} while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
@@ -3431,9 +3466,9 @@ static int cxl_region_probe(struct device *dev)
return rc;
switch (cxlr->mode) {
- case CXL_DECODER_PMEM:
+ case CXL_REGION_PMEM:
return devm_cxl_add_pmem_region(cxlr);
- case CXL_DECODER_RAM:
+ case CXL_REGION_RAM:
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
@@ -3445,8 +3480,8 @@ static int cxl_region_probe(struct device *dev)
return 0;
return devm_cxl_add_dax_region(cxlr);
default:
- dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
- cxlr->mode);
+ dev_dbg(&cxlr->dev, "unsupported region mode: %s\n",
+ cxl_region_mode_name(cxlr->mode));
return -ENXIO;
}
}
@@ -388,6 +388,27 @@ static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode)
return "mixed";
}
+enum cxl_region_mode {
+ CXL_REGION_NONE,
+ CXL_REGION_RAM,
+ CXL_REGION_PMEM,
+ CXL_REGION_MIXED,
+};
+
+static inline const char *cxl_region_mode_name(enum cxl_region_mode mode)
+{
+ static const char * const names[] = {
+ [CXL_REGION_NONE] = "none",
+ [CXL_REGION_RAM] = "ram",
+ [CXL_REGION_PMEM] = "pmem",
+ [CXL_REGION_MIXED] = "mixed",
+ };
+
+ if (mode >= CXL_REGION_NONE && mode <= CXL_REGION_MIXED)
+ return names[mode];
+ return "mixed";
+}
+
/*
* Track whether this decoder is reserved for region autodiscovery, or
* free for userspace provisioning.
@@ -515,7 +536,8 @@ struct cxl_region_params {
* struct cxl_region - CXL region
* @dev: This region's device
* @id: This region's id. Id is globally unique across all regions
- * @mode: Endpoint decoder allocation / access mode
+ * @mode: Region mode which defines which endpoint decoder modes the region is
+ * compatible with
* @type: Endpoint decoder target type
* @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown
* @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
@@ -528,7 +550,7 @@ struct cxl_region_params {
struct cxl_region {
struct device dev;
int id;
- enum cxl_decoder_mode mode;
+ enum cxl_region_mode mode;
enum cxl_decoder_type type;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_pmem_region *cxlr_pmem;