Message ID | 166144366038.745916.13425367025352369885.stgit@djiang5-desk3.ch.intel.com |
---|---|
State | Under Review |
Delegated to: | Dan Williams |
Headers | show |
Series | Add sanity check for interleave setup | expand |
On Thu, 25 Aug 2022 09:07:40 -0700 Dave Jiang <dave.jiang@intel.com> wrote: > Add a helper function to check the combination of interleave ways and > interleave granularity together is sane against the interleave mask from > the HDM decoder. Add the check to cxl_region_attach() to make sure the > region config is sane. Add the check to cxl_port_setup_targets() to make > sure the port setup config is also sane. > > Calculation refers to CXL spec rev3.0 8.2.4.19.13 implementation note #3. > > Reviewed-by: Dan Williams <dan.j.williams@intel.com> > Signed-off-by: Dave Jiang <dave.jiang@intel.com> LGTM Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> > --- > drivers/cxl/core/region.c | 47 +++++++++++++++++++++++++++++++++++++++++- > tools/testing/cxl/test/cxl.c | 1 + > 2 files changed, 47 insertions(+), 1 deletion(-) > > diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c > index 401148016978..28272b0196e6 100644 > --- a/drivers/cxl/core/region.c > +++ b/drivers/cxl/core/region.c > @@ -940,6 +940,42 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled, > return 0; > } > > +static int cxl_interleave_capable(struct cxl_port *port, struct device *dev, > + int ways, int granularity) > +{ > + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); > + unsigned int addr_mask; > + u16 eig; > + u8 eiw; > + int rc; > + > + rc = granularity_to_cxl(granularity, &eig); > + if (rc) > + return rc; > + > + rc = ways_to_cxl(ways, &eiw); > + if (rc) > + return rc; > + > + if (eiw == 0) > + return 0; > + > + if (is_power_of_2(eiw)) > + addr_mask = GENMASK(eig + 8 + eiw - 1, eig + 8); > + else > + addr_mask = GENMASK((eig + eiw) / 3 - 1, eig + 8); > + > + if (~cxlhdm->interleave_mask & addr_mask) { > + dev_dbg(dev, > + "%s:%s interleave (eig: %d eiw: %d mask: %#x) exceed cap (mask: %#x)\n", > + dev_name(port->uport), dev_name(&port->dev), eig, eiw, > + cxlhdm->interleave_mask, addr_mask); > + return -EINVAL; > + } > + > + return 0; > +} > + > static int cxl_port_setup_targets(struct cxl_port *port, > struct cxl_region *cxlr, > struct cxl_endpoint_decoder *cxled) > @@ -1047,6 +1083,10 @@ static int cxl_port_setup_targets(struct cxl_port *port, > return rc; > } > > + rc = cxl_interleave_capable(port, &cxlr->dev, iw, ig); > + if (rc) > + return rc; > + > cxld->interleave_ways = iw; > cxld->interleave_granularity = ig; > cxld->hpa_range = (struct range) { > @@ -1196,6 +1236,12 @@ static int cxl_region_attach(struct cxl_region *cxlr, > return -EBUSY; > } > > + ep_port = cxled_to_port(cxled); > + rc = cxl_interleave_capable(ep_port, &cxlr->dev, p->interleave_ways, > + p->interleave_granularity); > + if (rc) > + return rc; > + > for (i = 0; i < p->interleave_ways; i++) { > struct cxl_endpoint_decoder *cxled_target; > struct cxl_memdev *cxlmd_target; > @@ -1214,7 +1260,6 @@ static int cxl_region_attach(struct cxl_region *cxlr, > } > } > > - ep_port = cxled_to_port(cxled); > root_port = cxlrd_to_port(cxlrd); > dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); > if (!dport) { > diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c > index a072b2d3e726..4b361ed63333 100644 > --- a/tools/testing/cxl/test/cxl.c > +++ b/tools/testing/cxl/test/cxl.c > @@ -398,6 +398,7 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port) > return ERR_PTR(-ENOMEM); > > cxlhdm->port = port; > + dev_set_drvdata(&port->dev, cxlhdm); > return cxlhdm; > } > > >
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 401148016978..28272b0196e6 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -940,6 +940,42 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled, return 0; } +static int cxl_interleave_capable(struct cxl_port *port, struct device *dev, + int ways, int granularity) +{ + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + unsigned int addr_mask; + u16 eig; + u8 eiw; + int rc; + + rc = granularity_to_cxl(granularity, &eig); + if (rc) + return rc; + + rc = ways_to_cxl(ways, &eiw); + if (rc) + return rc; + + if (eiw == 0) + return 0; + + if (is_power_of_2(eiw)) + addr_mask = GENMASK(eig + 8 + eiw - 1, eig + 8); + else + addr_mask = GENMASK((eig + eiw) / 3 - 1, eig + 8); + + if (~cxlhdm->interleave_mask & addr_mask) { + dev_dbg(dev, + "%s:%s interleave (eig: %d eiw: %d mask: %#x) exceed cap (mask: %#x)\n", + dev_name(port->uport), dev_name(&port->dev), eig, eiw, + cxlhdm->interleave_mask, addr_mask); + return -EINVAL; + } + + return 0; +} + static int cxl_port_setup_targets(struct cxl_port *port, struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled) @@ -1047,6 +1083,10 @@ static int cxl_port_setup_targets(struct cxl_port *port, return rc; } + rc = cxl_interleave_capable(port, &cxlr->dev, iw, ig); + if (rc) + return rc; + cxld->interleave_ways = iw; cxld->interleave_granularity = ig; cxld->hpa_range = (struct range) { @@ -1196,6 +1236,12 @@ static int cxl_region_attach(struct cxl_region *cxlr, return -EBUSY; } + ep_port = cxled_to_port(cxled); + rc = cxl_interleave_capable(ep_port, &cxlr->dev, p->interleave_ways, + p->interleave_granularity); + if (rc) + return rc; + for (i = 0; i < p->interleave_ways; i++) { struct cxl_endpoint_decoder *cxled_target; struct cxl_memdev *cxlmd_target; @@ -1214,7 +1260,6 @@ static int cxl_region_attach(struct cxl_region *cxlr, } } - ep_port = cxled_to_port(cxled); root_port = cxlrd_to_port(cxlrd); dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); if (!dport) { diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index a072b2d3e726..4b361ed63333 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -398,6 +398,7 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port) return ERR_PTR(-ENOMEM); cxlhdm->port = port; + dev_set_drvdata(&port->dev, cxlhdm); return cxlhdm; }