@@ -940,6 +940,42 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
return 0;
}
+static int cxl_interleave_capable(struct cxl_port *port, struct device *dev,
+ int ways, int granularity)
+{
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ unsigned int addr_mask;
+ u16 eig;
+ u8 eiw;
+ int rc;
+
+ rc = granularity_to_cxl(granularity, &eig);
+ if (rc)
+ return rc;
+
+ rc = ways_to_cxl(ways, &eiw);
+ if (rc)
+ return rc;
+
+ if (eiw == 0)
+ return 0;
+
+ if (is_power_of_2(eiw))
+ addr_mask = GENMASK(eig + 8 + eiw - 1, eig + 8);
+ else
+ addr_mask = GENMASK((eig + eiw) / 3 - 1, eig + 8);
+
+ if (~cxlhdm->interleave_mask & addr_mask) {
+ dev_dbg(dev,
+ "%s:%s interleave (eig: %d eiw: %d mask: %#x) exceed cap (mask: %#x)\n",
+ dev_name(port->uport), dev_name(&port->dev), eig, eiw,
+ cxlhdm->interleave_mask, addr_mask);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
@@ -1047,6 +1083,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
return rc;
}
+ rc = cxl_interleave_capable(port, &cxlr->dev, iw, ig);
+ if (rc)
+ return rc;
+
cxld->interleave_ways = iw;
cxld->interleave_granularity = ig;
cxld->hpa_range = (struct range) {
@@ -1196,6 +1236,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -EBUSY;
}
+ ep_port = cxled_to_port(cxled);
+ rc = cxl_interleave_capable(ep_port, &cxlr->dev, p->interleave_ways,
+ p->interleave_granularity);
+ if (rc)
+ return rc;
+
for (i = 0; i < p->interleave_ways; i++) {
struct cxl_endpoint_decoder *cxled_target;
struct cxl_memdev *cxlmd_target;
@@ -1214,7 +1260,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
}
}
- ep_port = cxled_to_port(cxled);
root_port = cxlrd_to_port(cxlrd);
dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
if (!dport) {