From patchwork Thu Dec 2 04:37:37 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651711 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4721DC433EF for ; Thu, 2 Dec 2021 04:40:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355360AbhLBEng (ORCPT ); Wed, 1 Dec 2021 23:43:36 -0500 Received: from mga09.intel.com ([134.134.136.24]:61244 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355497AbhLBEn0 (ORCPT ); Wed, 1 Dec 2021 23:43:26 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438363" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438363" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:03 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717375" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:03 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 01/14] cxl/core: Add, document, and tighten up decoder APIs Date: Wed, 1 Dec 2021 20:37:37 -0800 Message-Id: <20211202043750.3501494-2-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org Since the code to add decoders for switches and endpoints is on the horizon it helps to have properly documented APIs. In addition, the decoder APIs will never need to support a negative count for downstream targets as the spec explicitly starts numbering them at 1, ie. even 0 is an "invalid" value which can be used as a sentinel. Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron --- Changes since v1: - Added decoder type specific APIs (Dan) --- drivers/cxl/acpi.c | 4 +-- drivers/cxl/core/bus.c | 82 +++++++++++++++++++++++++++++++++++++----- drivers/cxl/cxl.h | 5 ++- 3 files changed, 79 insertions(+), 12 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index da70f1836db6..9f88dec03b33 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -102,7 +102,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++) target_map[i] = cfmws->interleave_targets[i]; - cxld = cxl_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws)); + cxld = cxl_root_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws)); if (IS_ERR(cxld)) return 0; @@ -260,7 +260,7 @@ static int add_host_bridge_uport(struct device *match, void *arg) * dport. Disable the range until the first CXL region is enumerated / * activated. */ - cxld = cxl_decoder_alloc(port, 1); + cxld = cxl_root_decoder_alloc(port, 1); if (IS_ERR(cxld)) return PTR_ERR(cxld); diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index ab756a53a983..2f72087846e3 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -495,7 +495,20 @@ static int decoder_populate_targets(struct cxl_decoder *cxld, return rc; } -struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets) +/** + * cxl_decoder_alloc - Allocate a new CXL decoder + * @port: owning port of this decoder + * @nr_targets: downstream targets accessible by this decoder. All upstream + * ports and root ports must have at least 1 target. + * + * A port should contain one or more decoders. Each of those decoders enable + * some address space for CXL.mem utilization. A decoder is expected to be + * configured by the caller before registering. + * + * Return: A new cxl decoder to be registered by cxl_decoder_add() + */ +static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, + unsigned int nr_targets) { struct cxl_decoder *cxld, cxld_const_init = { .nr_targets = nr_targets, @@ -503,7 +516,7 @@ struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets) struct device *dev; int rc = 0; - if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1) + if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets == 0) return ERR_PTR(-EINVAL); cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); @@ -522,19 +535,70 @@ struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets) dev->parent = &port->dev; dev->bus = &cxl_bus_type; - /* root ports do not have a cxl_port_type parent */ - if (port->dev.parent->type == &cxl_port_type) - dev->type = &cxl_decoder_switch_type; - else - dev->type = &cxl_decoder_root_type; - return cxld; err: kfree(cxld); return ERR_PTR(rc); } -EXPORT_SYMBOL_NS_GPL(cxl_decoder_alloc, CXL); +/** + * cxl_root_decoder_alloc - Allocate a root level decoder + * @port: owning port of this decoder + * @nr_targets: number of downstream targets. The number of downstream targets + * is determined with a platform specific mechanism. + * + * Return: A new cxl decoder to be registered by cxl_decoder_add() + */ +struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port, + unsigned int nr_targets) +{ + struct cxl_decoder *cxld; + + cxld = cxl_decoder_alloc(port, nr_targets); + if (!IS_ERR(cxld)) + cxld->dev.type = &cxl_decoder_root_type; + + return cxld; +} +EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); + +/** + * cxl_switch_decoder_alloc - Allocate a switch level decoder + * @port: owning port of this decoder + * @nr_targets: number of downstream targets. The number of downstream targets + * is determined via CXL capability registers. + * + * Return: A new cxl decoder to be registered by cxl_decoder_add() + */ +struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, + unsigned int nr_targets) +{ + struct cxl_decoder *cxld; + + cxld = cxl_decoder_alloc(port, nr_targets); + if (!IS_ERR(cxld)) + cxld->dev.type = &cxl_decoder_switch_type; + + return cxld; +} +EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); + +/** + * cxl_decoder_add - Add a decoder with targets + * @cxld: The cxl decoder allocated by cxl_decoder_alloc() + * @target_map: A list of downstream ports that this decoder can direct memory + * traffic to. These numbers should correspond with the port number + * in the PCIe Link Capabilities structure. + * + * Certain types of decoders may not have any targets. The main example of this + * is an endpoint device. A more awkward example is a hostbridge whose root + * ports get hot added (technically possible, though unlikely). + * + * Context: Process context. Takes and releases the cxld's device lock. + * + * Return: Negative error code if the decoder wasn't properly configured; else + * returns 0. + */ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) { struct cxl_port *port; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index ad816fb5bdcc..a036594ec5b3 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -288,7 +288,10 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id, struct cxl_decoder *to_cxl_decoder(struct device *dev); bool is_root_decoder(struct device *dev); -struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets); +struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port, + unsigned int nr_targets); +struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, + unsigned int nr_targets); int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); From patchwork Thu Dec 2 04:37:38 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651707 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 05C46C433F5 for ; Thu, 2 Dec 2021 04:40:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355525AbhLBEnc (ORCPT ); Wed, 1 Dec 2021 23:43:32 -0500 Received: from mga09.intel.com ([134.134.136.24]:61245 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355501AbhLBEn1 (ORCPT ); Wed, 1 Dec 2021 23:43:27 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438369" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438369" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:04 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717387" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:03 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Jonathan Cameron , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 02/14] cxl: Introduce endpoint decoders Date: Wed, 1 Dec 2021 20:37:38 -0800 Message-Id: <20211202043750.3501494-3-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org Endpoints have decoders too. It is useful to share the same infrastructure from cxl_core. Endpoints do not have dports (downstream targets), only the underlying physical medium. As a result, some special casing is needed. There is no functional change introduced yet as endpoints don't actually enumerate decoders yet. Reviewed-by: Jonathan Cameron Signed-off-by: Ben Widawsky --- Dan brought up having an endpoint specific API for adding decoders. Ultimately I didn't see value in doing this, so it's not here. I'm open for discussion. Until the final patch in the series, there isn't much reason to have it. --- drivers/cxl/core/bus.c | 54 ++++++++++++++++++++++++++++++++++++------ drivers/cxl/cxl.h | 1 + 2 files changed, 48 insertions(+), 7 deletions(-) diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 2f72087846e3..2bf9c0704a70 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -187,6 +187,12 @@ static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { NULL, }; +static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { + &cxl_decoder_base_attribute_group, + &cxl_base_attribute_group, + NULL, +}; + static void cxl_decoder_release(struct device *dev) { struct cxl_decoder *cxld = to_cxl_decoder(dev); @@ -196,6 +202,12 @@ static void cxl_decoder_release(struct device *dev) kfree(cxld); } +static const struct device_type cxl_decoder_endpoint_type = { + .name = "cxl_decoder_endpoint", + .release = cxl_decoder_release, + .groups = cxl_decoder_endpoint_attribute_groups, +}; + static const struct device_type cxl_decoder_switch_type = { .name = "cxl_decoder_switch", .release = cxl_decoder_release, @@ -208,6 +220,11 @@ static const struct device_type cxl_decoder_root_type = { .groups = cxl_decoder_root_attribute_groups, }; +static bool is_endpoint_decoder(struct device *dev) +{ + return dev->type == &cxl_decoder_endpoint_type; +} + bool is_root_decoder(struct device *dev) { return dev->type == &cxl_decoder_root_type; @@ -499,7 +516,9 @@ static int decoder_populate_targets(struct cxl_decoder *cxld, * cxl_decoder_alloc - Allocate a new CXL decoder * @port: owning port of this decoder * @nr_targets: downstream targets accessible by this decoder. All upstream - * ports and root ports must have at least 1 target. + * ports and root ports must have at least 1 target. Endpoint + * devices will have 0 targets. Callers wishing to register an + * endpoint device should specify 0. * * A port should contain one or more decoders. Each of those decoders enable * some address space for CXL.mem utilization. A decoder is expected to be @@ -516,7 +535,7 @@ static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, struct device *dev; int rc = 0; - if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets == 0) + if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) return ERR_PTR(-EINVAL); cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); @@ -583,6 +602,24 @@ struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, } EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); +/** + * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder + * @port: owning port of this decoder + * + * Return: A new cxl decoder to be registered by cxl_decoder_add() + */ +struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) +{ + struct cxl_decoder *cxld; + + cxld = cxl_decoder_alloc(port, 0); + if (!IS_ERR(cxld)) + cxld->dev.type = &cxl_decoder_endpoint_type; + + return cxld; +} +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); + /** * cxl_decoder_add - Add a decoder with targets * @cxld: The cxl decoder allocated by cxl_decoder_alloc() @@ -614,12 +651,15 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) if (cxld->interleave_ways < 1) return -EINVAL; - port = to_cxl_port(cxld->dev.parent); - rc = decoder_populate_targets(cxld, port, target_map); - if (rc) - return rc; - dev = &cxld->dev; + + port = to_cxl_port(cxld->dev.parent); + if (!is_endpoint_decoder(dev)) { + rc = decoder_populate_targets(cxld, port, target_map); + if (rc) + return rc; + } + rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); if (rc) return rc; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index a036594ec5b3..9b3904788762 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -292,6 +292,7 @@ struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port, unsigned int nr_targets); struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, unsigned int nr_targets); +struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); From patchwork Thu Dec 2 04:37:39 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651713 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D6DA2C433EF for ; Thu, 2 Dec 2021 04:40:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355497AbhLBEnj (ORCPT ); Wed, 1 Dec 2021 23:43:39 -0500 Received: from mga09.intel.com ([134.134.136.24]:61245 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355512AbhLBEnc (ORCPT ); Wed, 1 Dec 2021 23:43:32 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438370" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438370" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:04 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717395" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:04 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Dan Williams , Jonathan Cameron , Alison Schofield , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 03/14] cxl/core: Move target population locking to caller Date: Wed, 1 Dec 2021 20:37:39 -0800 Message-Id: <20211202043750.3501494-4-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org In preparation for a port driver that enumerates a descendant port + decoder hierarchy, arrange for an unlocked version of cxl_decoder_add(). Otherwise a port-driver that adds a child decoder will deadlock on the device_lock() in ->probe(). As a result of this change the device lock for the port is held for a longer amount of time. Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Signed-off-by: Ben Widawsky --- Changes since v1: - Fix kdoc describing the lock held (Dan) --- drivers/cxl/core/bus.c | 61 +++++++++++++++++++++++++++++++----------- drivers/cxl/cxl.h | 1 + 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 2bf9c0704a70..4bf355a3e396 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -487,28 +487,22 @@ static int decoder_populate_targets(struct cxl_decoder *cxld, { int rc = 0, i; + device_lock_assert(&port->dev); + if (!target_map) return 0; - device_lock(&port->dev); - if (list_empty(&port->dports)) { - rc = -EINVAL; - goto out_unlock; - } + if (list_empty(&port->dports)) + return -EINVAL; for (i = 0; i < cxld->nr_targets; i++) { struct cxl_dport *dport = find_dport(port, target_map[i]); - if (!dport) { - rc = -ENXIO; - goto out_unlock; - } + if (!dport) + return -ENXIO; cxld->target[i] = dport; } -out_unlock: - device_unlock(&port->dev); - return rc; } @@ -621,7 +615,7 @@ struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); /** - * cxl_decoder_add - Add a decoder with targets + * cxl_decoder_add_locked - Add a decoder with targets * @cxld: The cxl decoder allocated by cxl_decoder_alloc() * @target_map: A list of downstream ports that this decoder can direct memory * traffic to. These numbers should correspond with the port number @@ -631,12 +625,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); * is an endpoint device. A more awkward example is a hostbridge whose root * ports get hot added (technically possible, though unlikely). * - * Context: Process context. Takes and releases the cxld's device lock. + * This is the locked variant of cxl_decoder_add(). + * + * Context: Process context. Expects the device lock of the port that owns the + * @cxld to be held. * * Return: Negative error code if the decoder wasn't properly configured; else * returns 0. */ -int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) +int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) { struct cxl_port *port; struct device *dev; @@ -673,6 +670,40 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) return device_add(dev); } +EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); + +/** + * cxl_decoder_add - Add a decoder with targets + * @cxld: The cxl decoder allocated by cxl_decoder_alloc() + * @target_map: A list of downstream ports that this decoder can direct memory + * traffic to. These numbers should correspond with the port number + * in the PCIe Link Capabilities structure. + * + * This is the unlocked variant of cxl_decoder_add_locked(). + * See cxl_decoder_add_locked(). + * + * Context: Process context. Takes and releases the device lock of the port that + * owns the @cxld. + */ +int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) +{ + struct cxl_port *port; + int rc; + + if (WARN_ON_ONCE(!cxld)) + return -EINVAL; + + if (WARN_ON_ONCE(IS_ERR(cxld))) + return PTR_ERR(cxld); + + port = to_cxl_port(cxld->dev.parent); + + device_lock(&port->dev); + rc = cxl_decoder_add_locked(cxld, target_map); + device_unlock(&port->dev); + + return rc; +} EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); static void cxld_unregister(void *dev) diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 9b3904788762..762d8254c7c6 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -293,6 +293,7 @@ struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port, struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, unsigned int nr_targets); struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); +int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); From patchwork Thu Dec 2 04:37:40 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651717 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C39C4C433F5 for ; Thu, 2 Dec 2021 04:40:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355536AbhLBEnq (ORCPT ); Wed, 1 Dec 2021 23:43:46 -0500 Received: from mga09.intel.com ([134.134.136.24]:61242 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355506AbhLBEnc (ORCPT ); Wed, 1 Dec 2021 23:43:32 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438373" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438373" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:05 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717403" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:04 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Jonathan Cameron , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 04/14] cxl: Introduce topology host registration Date: Wed, 1 Dec 2021 20:37:40 -0800 Message-Id: <20211202043750.3501494-5-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org The description of the CXL topology will be conveyed by a platform specific entity that is expected to be a singleton. For ACPI based systems, this is ACPI0017. This cxl_topology_host is needed as a constraint for when CXL.mem connectivity can be verified from root to endpoint. Given that endpoints can attach at any point in time relative to when the root arrives CXL.mem connectivity needs to be revalidated at every topology host arrival / depart event. cxl_test makes for an interesting case. cxl_test creates an alternate universe where there are possibly two root topology hosts (a real ACPI0017/CEDT, and a fake ACPI0017/CEDT). For this to work in the future, cxl_acpi (or some future platform host driver) will need to be unloaded first. Reviewed-by: Jonathan Cameron Signed-off-by: Ben Widawsky Reported-by: kernel test robot Reported-by: Dan Carpenter --- Changes since v1 - Commit message overhaul (Dan) --- drivers/cxl/acpi.c | 18 ++++++++++--- drivers/cxl/core/bus.c | 57 +++++++++++++++++++++++++++++++++++++++--- drivers/cxl/cxl.h | 5 +++- 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 9f88dec03b33..7bcb54e9fe00 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -225,8 +225,7 @@ static int add_host_bridge_uport(struct device *match, void *arg) return 0; } - port = devm_cxl_add_port(host, match, dport->component_reg_phys, - root_port); + port = devm_cxl_add_port(match, dport->component_reg_phys, root_port); if (IS_ERR(port)) return PTR_ERR(port); dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev)); @@ -377,6 +376,11 @@ static int add_root_nvdimm_bridge(struct device *match, void *data) return 1; } +static void clear_topology_host(void *data) +{ + cxl_unregister_topology_host(data); +} + static int cxl_acpi_probe(struct platform_device *pdev) { int rc; @@ -385,7 +389,15 @@ static int cxl_acpi_probe(struct platform_device *pdev) struct acpi_device *adev = ACPI_COMPANION(host); struct cxl_cfmws_context ctx; - root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); + rc = cxl_register_topology_host(host); + if (rc) + return rc; + + rc = devm_add_action_or_reset(host, clear_topology_host, host); + if (rc) + return rc; + + root_port = devm_cxl_add_port(host, CXL_RESOURCE_NONE, root_port); if (IS_ERR(root_port)) return PTR_ERR(root_port); dev_dbg(host, "add: %s\n", dev_name(&root_port->dev)); diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 4bf355a3e396..97cbd7132b15 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -25,6 +25,53 @@ */ static DEFINE_IDA(cxl_port_ida); +static DECLARE_RWSEM(topology_host_sem); + +static struct device *cxl_topology_host; + +int cxl_register_topology_host(struct device *host) +{ + down_write(&topology_host_sem); + if (cxl_topology_host) { + up_write(&topology_host_sem); + pr_warn("%s host currently in use. Please try unloading %s", + dev_name(cxl_topology_host), host->driver->name); + return -EBUSY; + } + + cxl_topology_host = host; + up_write(&topology_host_sem); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_register_topology_host, CXL); + +void cxl_unregister_topology_host(struct device *host) +{ + down_write(&topology_host_sem); + if (cxl_topology_host == host) + cxl_topology_host = NULL; + else + pr_warn("topology host in use by %s\n", + cxl_topology_host->driver->name); + up_write(&topology_host_sem); +} +EXPORT_SYMBOL_NS_GPL(cxl_unregister_topology_host, CXL); + +static struct device *get_cxl_topology_host(void) +{ + down_read(&topology_host_sem); + if (cxl_topology_host) + return cxl_topology_host; + up_read(&topology_host_sem); + return NULL; +} + +static void put_cxl_topology_host(struct device *dev) +{ + WARN_ON(dev != cxl_topology_host); + up_read(&topology_host_sem); +} static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -362,17 +409,16 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, /** * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy - * @host: host device for devm operations * @uport: "physical" device implementing this upstream port * @component_reg_phys: (optional) for configurable cxl_port instances * @parent_port: next hop up in the CXL memory decode hierarchy */ -struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, +struct cxl_port *devm_cxl_add_port(struct device *uport, resource_size_t component_reg_phys, struct cxl_port *parent_port) { + struct device *dev, *host; struct cxl_port *port; - struct device *dev; int rc; port = cxl_port_alloc(uport, component_reg_phys, parent_port); @@ -391,7 +437,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, if (rc) goto err; + host = get_cxl_topology_host(); + if (!host) + return ERR_PTR(-ENODEV); + rc = devm_add_action_or_reset(host, unregister_port, port); + put_cxl_topology_host(host); if (rc) return ERR_PTR(rc); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 762d8254c7c6..6bafc2cd8f7a 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -152,6 +152,9 @@ int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, #define CXL_RESOURCE_NONE ((resource_size_t) -1) #define CXL_TARGET_STRLEN 20 +int cxl_register_topology_host(struct device *host); +void cxl_unregister_topology_host(struct device *host); + /* * cxl_decoder flags that define the type of memory / devices this * decoder supports as well as configuration lock status See "CXL 2.0 @@ -279,7 +282,7 @@ struct cxl_dport { }; struct cxl_port *to_cxl_port(struct device *dev); -struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, +struct cxl_port *devm_cxl_add_port(struct device *uport, resource_size_t component_reg_phys, struct cxl_port *parent_port); From patchwork Thu Dec 2 04:37:41 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651715 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EADB8C433FE for ; Thu, 2 Dec 2021 04:40:24 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355503AbhLBEnl (ORCPT ); Wed, 1 Dec 2021 23:43:41 -0500 Received: from mga09.intel.com ([134.134.136.24]:61244 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355520AbhLBEnc (ORCPT ); Wed, 1 Dec 2021 23:43:32 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438375" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438375" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:06 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717420" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:05 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 05/14] cxl/core: Store global list of root ports Date: Wed, 1 Dec 2021 20:37:41 -0800 Message-Id: <20211202043750.3501494-6-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org CXL root ports (the downstream port to a host bridge) are to be enumerated by a platform specific driver. In the case of ACPI compliant systems, this is like the cxl_acpi driver. Root ports are the first CXL spec defined component that can be "found" by that platform specific driver. By storing a list of these root ports components in lower levels of the topology (switches and endpoints), have a mechanism to walk up their device hierarchy to find an enumerated root port. This will be necessary for region programming. Signed-off-by: Ben Widawsky --- drivers/cxl/acpi.c | 4 ++-- drivers/cxl/core/bus.c | 34 +++++++++++++++++++++++++++++++++- drivers/cxl/cxl.h | 5 ++++- tools/testing/cxl/mock_acpi.c | 4 ++-- 4 files changed, 41 insertions(+), 6 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 7bcb54e9fe00..8960ff1d5729 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -160,7 +160,7 @@ __mock int match_add_root_ports(struct pci_dev *pdev, void *data) creg = cxl_regmap_to_base(pdev, &map); port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); - rc = cxl_add_dport(port, &pdev->dev, port_num, creg); + rc = cxl_add_dport(port, &pdev->dev, port_num, creg, true); if (rc) { ctx->error = rc; return rc; @@ -342,7 +342,7 @@ static int add_host_bridge_dport(struct device *match, void *arg) return 0; } - rc = cxl_add_dport(root_port, match, uid, ctx.chbcr); + rc = cxl_add_dport(root_port, match, uid, ctx.chbcr, false); if (rc) { dev_err(host, "failed to add downstream port: %s\n", dev_name(match)); diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 97cbd7132b15..98cd52c2a266 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -26,6 +26,8 @@ static DEFINE_IDA(cxl_port_ida); static DECLARE_RWSEM(topology_host_sem); +static LIST_HEAD(cxl_root_ports); +static DECLARE_RWSEM(root_port_sem); static struct device *cxl_topology_host; @@ -326,12 +328,31 @@ struct cxl_port *to_cxl_port(struct device *dev) return container_of(dev, struct cxl_port, dev); } +struct cxl_dport *cxl_get_root_dport(struct device *dev) +{ + struct cxl_dport *ret = NULL; + struct cxl_dport *dport; + + down_read(&root_port_sem); + list_for_each_entry(dport, &cxl_root_ports, root_port_link) { + if (dport->dport == dev) { + ret = dport; + break; + } + } + + up_read(&root_port_sem); + return ret; +} +EXPORT_SYMBOL_NS_GPL(cxl_get_root_dport, CXL); + static void unregister_port(void *_port) { struct cxl_port *port = _port; struct cxl_dport *dport; device_lock(&port->dev); + down_read(&root_port_sem); list_for_each_entry(dport, &port->dports, list) { char link_name[CXL_TARGET_STRLEN]; @@ -339,7 +360,10 @@ static void unregister_port(void *_port) dport->port_id) >= CXL_TARGET_STRLEN) continue; sysfs_remove_link(&port->dev.kobj, link_name); + + list_del_init(&dport->root_port_link); } + up_read(&root_port_sem); device_unlock(&port->dev); device_unregister(&port->dev); } @@ -493,12 +517,13 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new) * @dport_dev: firmware or PCI device representing the dport * @port_id: identifier for this dport in a decoder's target list * @component_reg_phys: optional location of CXL component registers + * @root_port: is this a root port (hostbridge downstream) * * Note that all allocations and links are undone by cxl_port deletion * and release. */ int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id, - resource_size_t component_reg_phys) + resource_size_t component_reg_phys, bool root_port) { char link_name[CXL_TARGET_STRLEN]; struct cxl_dport *dport; @@ -513,6 +538,7 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id, return -ENOMEM; INIT_LIST_HEAD(&dport->list); + INIT_LIST_HEAD(&dport->root_port_link); dport->dport = get_device(dport_dev); dport->port_id = port_id; dport->component_reg_phys = component_reg_phys; @@ -526,6 +552,12 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id, if (rc) goto err; + if (root_port) { + down_write(&root_port_sem); + list_add_tail(&dport->root_port_link, &cxl_root_ports); + up_write(&root_port_sem); + } + return 0; err: cxl_dport_release(dport); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 6bafc2cd8f7a..c744d2998628 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -272,6 +272,7 @@ struct cxl_port { * @component_reg_phys: downstream port component registers * @port: reference to cxl_port that contains this downstream port * @list: node for a cxl_port's list of cxl_dport instances + * @root_port_link: node for global list of root ports */ struct cxl_dport { struct device *dport; @@ -279,6 +280,7 @@ struct cxl_dport { resource_size_t component_reg_phys; struct cxl_port *port; struct list_head list; + struct list_head root_port_link; }; struct cxl_port *to_cxl_port(struct device *dev); @@ -287,7 +289,8 @@ struct cxl_port *devm_cxl_add_port(struct device *uport, struct cxl_port *parent_port); int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id, - resource_size_t component_reg_phys); + resource_size_t component_reg_phys, bool root_port); +struct cxl_dport *cxl_get_root_dport(struct device *dev); struct cxl_decoder *to_cxl_decoder(struct device *dev); bool is_root_decoder(struct device *dev); diff --git a/tools/testing/cxl/mock_acpi.c b/tools/testing/cxl/mock_acpi.c index 4c8a493ace56..ddefc4345f36 100644 --- a/tools/testing/cxl/mock_acpi.c +++ b/tools/testing/cxl/mock_acpi.c @@ -57,7 +57,7 @@ static int match_add_root_port(struct pci_dev *pdev, void *data) /* TODO walk DVSEC to find component register base */ port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); - rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE); + rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE, true); if (rc) { dev_err(dev, "failed to add dport: %s (%d)\n", dev_name(&pdev->dev), rc); @@ -78,7 +78,7 @@ static int mock_add_root_port(struct platform_device *pdev, void *data) struct device *dev = ctx->dev; int rc; - rc = cxl_add_dport(port, &pdev->dev, pdev->id, CXL_RESOURCE_NONE); + rc = cxl_add_dport(port, &pdev->dev, pdev->id, CXL_RESOURCE_NONE, true); if (rc) { dev_err(dev, "failed to add dport: %s (%d)\n", dev_name(&pdev->dev), rc); From patchwork Thu Dec 2 04:37:42 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651719 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 82D95C433FE for ; Thu, 2 Dec 2021 04:40:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355506AbhLBEnq (ORCPT ); Wed, 1 Dec 2021 23:43:46 -0500 Received: from mga09.intel.com ([134.134.136.24]:61245 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355501AbhLBEnc (ORCPT ); Wed, 1 Dec 2021 23:43:32 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438377" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438377" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:06 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717433" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:06 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 06/14] cxl/pci: Cache device DVSEC offset Date: Wed, 1 Dec 2021 20:37:42 -0800 Message-Id: <20211202043750.3501494-7-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org The PCIe device DVSEC, defined in the CXL 2.0 spec, 8.1.3 is required to be implemented by CXL 2.0 endpoint devices. Since the information contained within this DVSEC will be critically important, it makes sense to find the value early, and error out if it cannot be found. Signed-off-by: Ben Widawsky --- Changes since v1: - Error out if device dvsec isn't found (Jonathan) - Reword commit message --- drivers/cxl/cxlmem.h | 2 ++ drivers/cxl/pci.c | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 8d96d009ad90..3ef3c652599e 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -98,6 +98,7 @@ struct cxl_mbox_cmd { * * @dev: The device associated with this CXL state * @regs: Parsed register blocks + * @device_dvsec: Offset to the PCIe device DVSEC * @payload_size: Size of space for payload * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) * @lsa_size: Size of Label Storage Area @@ -125,6 +126,7 @@ struct cxl_dev_state { struct device *dev; struct cxl_regs regs; + int device_dvsec; size_t payload_size; size_t lsa_size; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 6aa3dd4b29a1..09ff82b4ea06 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -457,6 +457,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(cxlds)) return PTR_ERR(cxlds); + cxlds->device_dvsec = pci_find_dvsec_capability(pdev, + PCI_DVSEC_VENDOR_ID_CXL, + CXL_DVSEC_PCIE_DEVICE); + if (!cxlds->device_dvsec) { + dev_err(&pdev->dev, + "Device DVSEC not present. Expect limited functionality.\n"); + return -ENXIO; + } + rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); if (rc) return rc; From patchwork Thu Dec 2 04:37:43 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651723 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B55ACC433EF for ; Thu, 2 Dec 2021 04:40:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355501AbhLBEnu (ORCPT ); Wed, 1 Dec 2021 23:43:50 -0500 Received: from mga09.intel.com ([134.134.136.24]:61244 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355526AbhLBEnc (ORCPT ); Wed, 1 Dec 2021 23:43:32 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438379" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438379" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:07 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717443" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:06 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , kernel test robot , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 07/14] cxl: Cache and pass DVSEC ranges Date: Wed, 1 Dec 2021 20:37:43 -0800 Message-Id: <20211202043750.3501494-8-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org CXL 1.1 specification provided a mechanism for mapping an address space of a CXL device. That functionality is known as a "range" and can be programmed through PCIe DVSEC. In addition to this, the specification defines an active bit which a device will expose through the same DVSEC to notify system software that memory is initialized and ready. While CXL 2.0 introduces a more powerful mechanism called HDM decoders that are controlled by MMIO behind a PCIe BAR, the spec does allow the 1.1 style mapping to still be present. In such a case, when the CXL driver takes over, if it were to enable HDM decoding and there was an actively used range, things would likely blow up, in particular if it wasn't an identical mapping. This patch caches the relevant information which the cxl_mem driver will need to make the proper decision and passes it along. Reported-by: kernel test robot Signed-off-by: Ben Widawsky --- Changes since v1: - Fix unused size (LKP) - Use struct range - Get rid of macros for pci config reads (Jonathan) --- drivers/cxl/cxlmem.h | 15 ++++++ drivers/cxl/pci.c | 121 +++++++++++++++++++++++++++++++++++++++++++ drivers/cxl/pci.h | 13 +++++ 3 files changed, 149 insertions(+) diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 3ef3c652599e..8d0a14c53518 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -89,6 +89,18 @@ struct cxl_mbox_cmd { */ #define CXL_CAPACITY_MULTIPLIER SZ_256M +/** + * struct cxl_endpoint_dvsec_info - Cached DVSEC info + * @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE + * @ranges: Number of active HDM ranges this device uses. + * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE + */ +struct cxl_endpoint_dvsec_info { + bool mem_enabled; + int ranges; + struct range dvsec_range[2]; +}; + /** * struct cxl_dev_state - The driver device state * @@ -117,6 +129,7 @@ struct cxl_mbox_cmd { * @active_persistent_bytes: sum of hard + soft persistent * @next_volatile_bytes: volatile capacity change pending device reset * @next_persistent_bytes: persistent capacity change pending device reset + * @info: Cached DVSEC information about the device. * @mbox_send: @dev specific transport for transmitting mailbox commands * * See section 8.2.9.5.2 Capacity Configuration and Label Storage for @@ -147,6 +160,8 @@ struct cxl_dev_state { u64 next_volatile_bytes; u64 next_persistent_bytes; + struct cxl_endpoint_dvsec_info *info; + int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); }; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 09ff82b4ea06..4e00abde5dbb 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -435,8 +435,121 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, return rc; } +static int wait_for_valid(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + const unsigned long timeout = jiffies + HZ; + int d = cxlds->device_dvsec; + bool valid; + + do { + u32 temp; + int rc; + + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_PCIE_DEVICE_RANGE_SIZE_LOW_OFFSET(0), + &temp); + if (rc) + return -ENXIO; + + /* + * Memory_Info_Valid: When set, indicates that the CXL Range 1 + * Size high and Size Low registers are valid. Must be set + * within 1 second of deassertion of reset to CXL device. + */ + valid = FIELD_GET(CXL_DVSEC_PCIE_DEVICE_MEM_INFO_VALID, temp); + if (valid) + break; + cpu_relax(); + } while (!time_after(jiffies, timeout)); + + return valid ? 0 : -ETIMEDOUT; +} + +static struct cxl_endpoint_dvsec_info *dvsec_ranges(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct cxl_endpoint_dvsec_info *info; + int d = cxlds->device_dvsec; + int hdm_count, rc, i; + u16 cap, ctrl; + + rc = pci_read_config_word(pdev, d + CXL_DVSEC_PCIE_DEVICE_CAP_OFFSET, &cap); + if (rc) + return ERR_PTR(-ENXIO); + rc = pci_read_config_word(pdev, d + CXL_DVSEC_PCIE_DEVICE_CTRL_OFFSET, &ctrl); + if (rc) + return ERR_PTR(-ENXIO); + + if (!(cap & CXL_DVSEC_PCIE_DEVICE_MEM_CAPABLE)) + return ERR_PTR(-ENODEV); + + /* + * It is not allowed by spec for MEM.capable to be set and have 0 HDM + * decoders. As this driver is for a spec defined class code which must + * be CXL.mem capable, there is no point in continuing. + */ + hdm_count = FIELD_GET(CXL_DVSEC_PCIE_DEVICE_HDM_COUNT_MASK, cap); + if (!hdm_count || hdm_count > 2) + return ERR_PTR(-EINVAL); + + rc = wait_for_valid(cxlds); + if (rc) + return ERR_PTR(rc); + + info = devm_kzalloc(cxlds->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + info->mem_enabled = FIELD_GET(CXL_DVSEC_PCIE_DEVICE_MEM_ENABLE, ctrl); + + for (i = 0; i < hdm_count; i++) { + u64 base, size; + u32 temp; + + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_PCIE_DEVICE_RANGE_SIZE_HIGH_OFFSET(i), + &temp); + if (rc) + continue; + size = (u64)temp << 32; + + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_PCIE_DEVICE_RANGE_SIZE_LOW_OFFSET(i), + &temp); + if (rc) + continue; + size |= temp & CXL_DVSEC_PCIE_DEVICE_MEM_SIZE_LOW_MASK; + + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_PCIE_DEVICE_RANGE_BASE_HIGH_OFFSET(i), + &temp); + if (rc) + continue; + base = (u64)temp << 32; + + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_PCIE_DEVICE_RANGE_BASE_LOW_OFFSET(i), + &temp); + if (rc) + continue; + base |= temp & CXL_DVSEC_PCIE_DEVICE_MEM_BASE_LOW_MASK; + + info->dvsec_range[i] = (struct range) { + .start = base, + .end = base + size - 1 + }; + + if (size) + info->ranges++; + } + + return info; +} + static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct cxl_endpoint_dvsec_info *info; struct cxl_register_map map; struct cxl_memdev *cxlmd; struct cxl_dev_state *cxlds; @@ -490,6 +603,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; + info = dvsec_ranges(cxlds); + if (IS_ERR(info)) + dev_err(&pdev->dev, + "Failed to get DVSEC range information (%ld)\n", + PTR_ERR(info)); + else + cxlds->info = info; + cxlmd = devm_cxl_add_memdev(cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h index f418272dbe38..7eb38030e376 100644 --- a/drivers/cxl/pci.h +++ b/drivers/cxl/pci.h @@ -15,6 +15,19 @@ /* CXL 2.0 8.1.3: PCIe DVSEC for CXL Device */ #define CXL_DVSEC_PCIE_DEVICE 0 +#define CXL_DVSEC_PCIE_DEVICE_CAP_OFFSET 0xA +#define CXL_DVSEC_PCIE_DEVICE_MEM_CAPABLE BIT(2) +#define CXL_DVSEC_PCIE_DEVICE_HDM_COUNT_MASK GENMASK(5, 4) +#define CXL_DVSEC_PCIE_DEVICE_CTRL_OFFSET 0xC +#define CXL_DVSEC_PCIE_DEVICE_MEM_ENABLE BIT(2) +#define CXL_DVSEC_PCIE_DEVICE_RANGE_SIZE_HIGH_OFFSET(i) (0x18 + (i * 0x10)) +#define CXL_DVSEC_PCIE_DEVICE_RANGE_SIZE_LOW_OFFSET(i) (0x1C + (i * 0x10)) +#define CXL_DVSEC_PCIE_DEVICE_MEM_INFO_VALID BIT(0) +#define CXL_DVSEC_PCIE_DEVICE_MEM_ACTIVE BIT(1) +#define CXL_DVSEC_PCIE_DEVICE_MEM_SIZE_LOW_MASK GENMASK(31, 28) +#define CXL_DVSEC_PCIE_DEVICE_RANGE_BASE_HIGH_OFFSET(i) (0x20 + (i * 0x10)) +#define CXL_DVSEC_PCIE_DEVICE_RANGE_BASE_LOW_OFFSET(i) (0x24 + (i * 0x10)) +#define CXL_DVSEC_PCIE_DEVICE_MEM_BASE_LOW_MASK GENMASK(31, 28) /* CXL 2.0 8.1.4: Non-CXL Function Map DVSEC */ #define CXL_DVSEC_FUNCTION_MAP 2 From patchwork Thu Dec 2 04:37:44 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651721 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 706F9C433F5 for ; Thu, 2 Dec 2021 04:40:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355512AbhLBEnv (ORCPT ); Wed, 1 Dec 2021 23:43:51 -0500 Received: from mga09.intel.com ([134.134.136.24]:61242 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355530AbhLBEng (ORCPT ); Wed, 1 Dec 2021 23:43:36 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438380" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438380" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:08 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717453" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:07 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 08/14] cxl/pci: Implement wait for media active Date: Wed, 1 Dec 2021 20:37:44 -0800 Message-Id: <20211202043750.3501494-9-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org The CXL Type 3 Memory Device Software Guide (Revision 1.0) describes the need to check media active before using HDM. CXL 2.0 8.1.3.8.2 states: Memory_Active: When set, indicates that the CXL Range 1 memory is fully initialized and available for software use. Must be set within Range 1. Memory_Active_Timeout of deassertion of reset to CXL device if CXL.mem HwInit Mode=1 Unfortunately, Memory_Active can take quite a long time depending on media size (up to 256s per 2.0 spec). Since the cxl_pci driver doesn't care about this, a callback is exported as part of driver state for use by drivers that do care. The implementation waits for 60s as that is considered more than enough and falls within typical Linux timeout lengths. Signed-off-by: Ben Widawsky --- drivers/cxl/cxlmem.h | 1 + drivers/cxl/pci.c | 59 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 8d0a14c53518..47651432e2ae 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -163,6 +163,7 @@ struct cxl_dev_state { struct cxl_endpoint_dvsec_info *info; int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); + int (*wait_media_ready)(struct cxl_dev_state *cxlds); }; enum cxl_opcode { diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 4e00abde5dbb..e7523a7614a4 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -466,6 +466,63 @@ static int wait_for_valid(struct cxl_dev_state *cxlds) return valid ? 0 : -ETIMEDOUT; } +/* + * Implements Figure 43 of the CXL Type 3 Memory Device Software Guide. Waits a + * full 60s no matter what the device reports. + */ +static int wait_for_media_ready(struct cxl_dev_state *cxlds) +{ + const unsigned long timeout = jiffies + (60 * HZ); + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->device_dvsec; + u64 md_status; + bool active; + int rc; + + rc = wait_for_valid(cxlds); + if (rc) + return rc; + + do { + u64 size; + u32 temp; + int rc; + + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_PCIE_DEVICE_RANGE_SIZE_HIGH_OFFSET(0), + &temp); + if (rc) + return -ENXIO; + size = (u64)temp << 32; + + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_PCIE_DEVICE_RANGE_SIZE_LOW_OFFSET(0), + &temp); + if (rc) + return -ENXIO; + size |= temp & CXL_DVSEC_PCIE_DEVICE_MEM_SIZE_LOW_MASK; + + active = FIELD_GET(CXL_DVSEC_PCIE_DEVICE_MEM_ACTIVE, temp); + if (active) + break; + cpu_relax(); + mdelay(100); + } while (!time_after(jiffies, timeout)); + + if (!active) + return -ETIMEDOUT; + + rc = check_device_status(cxlds); + if (rc) + return rc; + + md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); + if (!CXLMDEV_READY(md_status)) + return -EIO; + + return 0; +} + static struct cxl_endpoint_dvsec_info *dvsec_ranges(struct cxl_dev_state *cxlds) { struct pci_dev *pdev = to_pci_dev(cxlds->dev); @@ -579,6 +636,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENXIO; } + cxlds->wait_media_ready = wait_for_media_ready; + rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); if (rc) return rc; From patchwork Thu Dec 2 04:37:45 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651725 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9FFEBC4332F for ; Thu, 2 Dec 2021 04:40:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355530AbhLBEnw (ORCPT ); Wed, 1 Dec 2021 23:43:52 -0500 Received: from mga09.intel.com ([134.134.136.24]:61245 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355533AbhLBEnl (ORCPT ); Wed, 1 Dec 2021 23:43:41 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438383" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438383" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:08 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717459" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:08 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , kernel test robot , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 09/14] cxl/pci: Store component register base in cxlds Date: Wed, 1 Dec 2021 20:37:45 -0800 Message-Id: <20211202043750.3501494-10-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org The component register base address is enumerated and stored in the cxl_device_state structure for use by the endpoint driver. Component register base addresses are obtained through PCI mechanisms. As such it makes most sense for the cxl_pci driver to obtain that address. In order to reuse the port driver for enumerating decoder resources for an endpoint, it is desirable to be able to add the endpoint as a port. The endpoint does know of the cxlds and can pull the component register base from there and pass it along to port creation. Reported-by: kernel test robot Signed-off-by: Ben Widawsky --- Changes since v1: Use the right dev for dev_warn (LKP) --- drivers/cxl/cxlmem.h | 2 ++ drivers/cxl/pci.c | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 47651432e2ae..ebb4d1cdded2 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -130,6 +130,7 @@ struct cxl_endpoint_dvsec_info { * @next_volatile_bytes: volatile capacity change pending device reset * @next_persistent_bytes: persistent capacity change pending device reset * @info: Cached DVSEC information about the device. + * @component_reg_phys: register base of component registers * @mbox_send: @dev specific transport for transmitting mailbox commands * * See section 8.2.9.5.2 Capacity Configuration and Label Storage for @@ -161,6 +162,7 @@ struct cxl_dev_state { u64 next_persistent_bytes; struct cxl_endpoint_dvsec_info *info; + resource_size_t component_reg_phys; int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); int (*wait_media_ready)(struct cxl_dev_state *cxlds); diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index e7523a7614a4..cc11f7cf9a53 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -646,6 +646,17 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; + /* + * If the component registers can't be found, the cxl_pci driver may + * still be useful for management functions so don't return an error. + */ + cxlds->component_reg_phys = CXL_RESOURCE_NONE; + rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map); + if (rc) + dev_warn(&pdev->dev, "No component registers (%d)\n", rc); + + cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map); + rc = cxl_pci_setup_mailbox(cxlds); if (rc) return rc; From patchwork Thu Dec 2 04:37:46 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651729 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D2831C433FE for ; Thu, 2 Dec 2021 04:40:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355531AbhLBEnx (ORCPT ); Wed, 1 Dec 2021 23:43:53 -0500 Received: from mga09.intel.com ([134.134.136.24]:61244 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355520AbhLBEnq (ORCPT ); Wed, 1 Dec 2021 23:43:46 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438384" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438384" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:09 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717465" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:08 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 10/14] cxl: Make passthrough decoder init implicit Date: Wed, 1 Dec 2021 20:37:46 -0800 Message-Id: <20211202043750.3501494-11-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org Unused CXL decoders, or ports which use a passthrough decoder (no HDM decoder registers) are expected to be initialized in a specific way. Since upcoming drivers will want the same initialization, and it was already a requirement to have consumers of the API configure the decoder specific to their needs, initialize to this passthrough state by default. Signed-off-by: Ben Widawsky --- This came up during review of v1: https://lore.kernel.org/linux-cxl/20211120000250.1663391-1-ben.widawsky@intel.com/T/#me8f127f3a7474396318418d748cefc29ed97cfa5 --- drivers/cxl/acpi.c | 5 ----- drivers/cxl/core/bus.c | 9 ++++++++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 8960ff1d5729..8dee8ebdec9d 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -263,11 +263,6 @@ static int add_host_bridge_uport(struct device *match, void *arg) if (IS_ERR(cxld)) return PTR_ERR(cxld); - cxld->interleave_ways = 1; - cxld->interleave_granularity = PAGE_SIZE; - cxld->target_type = CXL_DECODER_EXPANDER; - cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0); - device_lock(&port->dev); dport = list_first_entry(&port->dports, typeof(*dport), list); device_unlock(&port->dev); diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 98cd52c2a266..7da5fbe7a1af 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -601,7 +601,8 @@ static int decoder_populate_targets(struct cxl_decoder *cxld, * some address space for CXL.mem utilization. A decoder is expected to be * configured by the caller before registering. * - * Return: A new cxl decoder to be registered by cxl_decoder_add() + * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder + * is initialized to be a "passthrough" decoder. */ static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, unsigned int nr_targets) @@ -631,6 +632,12 @@ static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, dev->parent = &port->dev; dev->bus = &cxl_bus_type; + /* Pre initialize an "empty" decoder */ + cxld->interleave_ways = 1; + cxld->interleave_granularity = PAGE_SIZE; + cxld->target_type = CXL_DECODER_EXPANDER; + cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0); + return cxld; err: kfree(cxld); From patchwork Thu Dec 2 04:37:47 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651733 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 10816C43219 for ; Thu, 2 Dec 2021 04:40:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355520AbhLBEnx (ORCPT ); Wed, 1 Dec 2021 23:43:53 -0500 Received: from mga09.intel.com ([134.134.136.24]:61242 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355537AbhLBEnq (ORCPT ); Wed, 1 Dec 2021 23:43:46 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438385" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438385" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:09 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717481" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:09 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , kernel test robot , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 11/14] cxl/port: Introduce a port driver Date: Wed, 1 Dec 2021 20:37:47 -0800 Message-Id: <20211202043750.3501494-12-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org The CXL port driver is responsible for managing the decoder resources contained within the port. It will also provide APIs that other drivers will consume for managing these resources. There are 4 types of ports in a system: 1. Platform port. This is a non-programmable entity. Such a port is named rootX. It is enumerated with an ACPI0017 object, by cxl_acpi in an ACPI based system. 2. Host Bridge port. This port's register emumeration is defined in a platform specific way (CHBS for ACPI platforms). It has n downstream ports, each of which are known as CXL 2.0 root ports. Once the platform specific mechanism to get the offset to the registers is obtained it operates just like other CXL components. The enumeration of this component is started by cxl_acpi and completed by cxl_port. 3. Switch port. A switch port is similar to a Host Bridge port except register enumeration is defined in the CXL specification in a platform agnostic way. The enumeration of these are entirely contained within cxl_port. 4. Endpoint port. Endpoint ports are similar to switch ports with the exception that they have no downstream ports, only the underlying media on the device. The enumeration of these are started by cxl_pci, and completed by cxl_port. Reported-by: kernel test robot Signed-off-by: Ben Widawsky --- Changes since v1: - Include header for ioread64_hi_lo (LKP) - Fix variable out of scope usage (Jonathan) - Cleanup error cases when adding decoder (Jonathan) - Handle decoder initialization in separate function (Jonathan) - Commit message rewording (Bjorn) - Grammar, whitespace, and typo fixes (Bjorn) - Skip DVSEC mess; mem driver won't add (Dan) - Skip checking global enable; move to mem driver - Use better names for get_caps and map_regs - Rework helper functions to be more idiomatic --- .../driver-api/cxl/memory-devices.rst | 5 + drivers/cxl/Kconfig | 21 ++ drivers/cxl/Makefile | 2 + drivers/cxl/core/bus.c | 67 ++++ drivers/cxl/core/regs.c | 6 +- drivers/cxl/cxl.h | 32 +- drivers/cxl/port.c | 318 ++++++++++++++++++ 7 files changed, 442 insertions(+), 9 deletions(-) create mode 100644 drivers/cxl/port.c diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst index 3b8f41395f6b..fbf0393cdddc 100644 --- a/Documentation/driver-api/cxl/memory-devices.rst +++ b/Documentation/driver-api/cxl/memory-devices.rst @@ -28,6 +28,11 @@ CXL Memory Device .. kernel-doc:: drivers/cxl/pci.c :internal: +CXL Port +-------- +.. kernel-doc:: drivers/cxl/port.c + :doc: cxl port + CXL Core -------- .. kernel-doc:: drivers/cxl/cxl.h diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index ef05e96f8f97..d224bddf238a 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -77,4 +77,25 @@ config CXL_PMEM provisioning the persistent memory capacity of CXL memory expanders. If unsure say 'm'. + +config CXL_MEM + tristate "CXL.mem: Memory Devices" + select CXL_PORT + depends on CXL_PCI + default CXL_BUS + help + The CXL.mem protocol allows a device to act as a provider of "System + RAM" and/or "Persistent Memory" that is fully coherent as if the + memory were attached to the typical CPU memory controller. This is + known as HDM "Host-managed Device Memory". + + Say 'y/m' to enable a driver that will attach to CXL.mem devices for + memory expansion and control of HDM. See Chapter 9.13 in the CXL 2.0 + specification for a detailed description of HDM. + + If unsure say 'm'. + +config CXL_PORT + tristate + endif diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile index cf07ae6cea17..56fcac2323cb 100644 --- a/drivers/cxl/Makefile +++ b/drivers/cxl/Makefile @@ -3,7 +3,9 @@ obj-$(CONFIG_CXL_BUS) += core/ obj-$(CONFIG_CXL_PCI) += cxl_pci.o obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o +obj-$(CONFIG_CXL_PORT) += cxl_port.o cxl_pci-y := pci.o cxl_acpi-y := acpi.o cxl_pmem-y := pmem.o +cxl_port-y := port.o diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 7da5fbe7a1af..e2cdea990b81 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -31,6 +31,8 @@ static DECLARE_RWSEM(root_port_sem); static struct device *cxl_topology_host; +static bool is_cxl_decoder(struct device *dev); + int cxl_register_topology_host(struct device *host) { down_write(&topology_host_sem); @@ -75,6 +77,45 @@ static void put_cxl_topology_host(struct device *dev) up_read(&topology_host_sem); } +static int decoder_match(struct device *dev, void *data) +{ + struct resource *theirs = (struct resource *)data; + struct cxl_decoder *cxld; + + if (!is_cxl_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if (theirs->start <= cxld->decoder_range.start && + theirs->end >= cxld->decoder_range.end) + return 1; + + return 0; +} + +static struct cxl_decoder *cxl_find_root_decoder(resource_size_t base, + resource_size_t size) +{ + struct cxl_decoder *cxld = NULL; + struct device *cxldd; + struct device *host; + struct resource res = (struct resource){ + .start = base, + .end = base + size - 1, + }; + + host = get_cxl_topology_host(); + if (!host) + return NULL; + + cxldd = device_find_child(host, &res, decoder_match); + if (cxldd) + cxld = to_cxl_decoder(cxldd); + + put_cxl_topology_host(host); + return cxld; +} + static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -280,6 +321,11 @@ bool is_root_decoder(struct device *dev) } EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); +static bool is_cxl_decoder(struct device *dev) +{ + return dev->type->release == cxl_decoder_release; +} + struct cxl_decoder *to_cxl_decoder(struct device *dev) { if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release, @@ -327,6 +373,7 @@ struct cxl_port *to_cxl_port(struct device *dev) return NULL; return container_of(dev, struct cxl_port, dev); } +EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); struct cxl_dport *cxl_get_root_dport(struct device *dev) { @@ -798,6 +845,24 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); static void cxld_unregister(void *dev) { + struct cxl_decoder *plat_decoder, *cxld = to_cxl_decoder(dev); + resource_size_t base, size; + + if (is_root_decoder(dev)) { + device_unregister(dev); + return; + } + + base = cxld->decoder_range.start; + size = range_len(&cxld->decoder_range); + + if (size) { + plat_decoder = cxl_find_root_decoder(base, size); + if (plat_decoder) + __release_region(&plat_decoder->platform_res, base, + size); + } + device_unregister(dev); } @@ -852,6 +917,8 @@ static int cxl_device_id(struct device *dev) return CXL_DEVICE_NVDIMM_BRIDGE; if (dev->type == &cxl_nvdimm_type) return CXL_DEVICE_NVDIMM; + if (dev->type == &cxl_port_type) + return CXL_DEVICE_PORT; return 0; } diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 7a1c4290f0a3..1fb931e6c83e 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -159,9 +159,8 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base, } EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL); -static void __iomem *devm_cxl_iomap_block(struct device *dev, - resource_size_t addr, - resource_size_t length) +void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, + resource_size_t length) { void __iomem *ret_val; struct resource *res; @@ -180,6 +179,7 @@ static void __iomem *devm_cxl_iomap_block(struct device *dev, return ret_val; } +EXPORT_SYMBOL_NS_GPL(devm_cxl_iomap_block, CXL); int cxl_map_component_regs(struct pci_dev *pdev, struct cxl_component_regs *regs, diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index c744d2998628..dd6c76ac89df 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -17,6 +17,9 @@ * (port-driver, region-driver, nvdimm object-drivers... etc). */ +/* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */ +#define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K + /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/ #define CXL_CM_OFFSET 0x1000 #define CXL_CM_CAP_HDR_OFFSET 0x0 @@ -36,11 +39,22 @@ #define CXL_HDM_DECODER_CAP_OFFSET 0x0 #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0) #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4) -#define CXL_HDM_DECODER0_BASE_LOW_OFFSET 0x10 -#define CXL_HDM_DECODER0_BASE_HIGH_OFFSET 0x14 -#define CXL_HDM_DECODER0_SIZE_LOW_OFFSET 0x18 -#define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET 0x1c -#define CXL_HDM_DECODER0_CTRL_OFFSET 0x20 +#define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8) +#define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9) +#define CXL_HDM_DECODER_CTRL_OFFSET 0x4 +#define CXL_HDM_DECODER_ENABLE BIT(1) +#define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10) +#define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14) +#define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18) +#define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c) +#define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20) +#define CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0) +#define CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4) +#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9) +#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10) +#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12) +#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24) +#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28) static inline int cxl_hdm_decoder_count(u32 cap_hdr) { @@ -148,6 +162,8 @@ int cxl_map_device_regs(struct pci_dev *pdev, enum cxl_regloc_type; int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, struct cxl_register_map *map); +void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, + resource_size_t length); #define CXL_RESOURCE_NONE ((resource_size_t) -1) #define CXL_TARGET_STRLEN 20 @@ -165,7 +181,8 @@ void cxl_unregister_topology_host(struct device *host); #define CXL_DECODER_F_TYPE2 BIT(2) #define CXL_DECODER_F_TYPE3 BIT(3) #define CXL_DECODER_F_LOCK BIT(4) -#define CXL_DECODER_F_MASK GENMASK(4, 0) +#define CXL_DECODER_F_ENABLE BIT(5) +#define CXL_DECODER_F_MASK GENMASK(5, 0) enum cxl_decoder_type { CXL_DECODER_ACCELERATOR = 2, @@ -255,6 +272,7 @@ struct cxl_walk_context { * @dports: cxl_dport instances referenced by decoders * @decoder_ida: allocator for decoder ids * @component_reg_phys: component register capability base address (optional) + * @rescan_work: worker object for bus rescans after port additions */ struct cxl_port { struct device dev; @@ -263,6 +281,7 @@ struct cxl_port { struct list_head dports; struct ida decoder_ida; resource_size_t component_reg_phys; + struct work_struct rescan_work; }; /** @@ -328,6 +347,7 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv); #define CXL_DEVICE_NVDIMM_BRIDGE 1 #define CXL_DEVICE_NVDIMM 2 +#define CXL_DEVICE_PORT 3 #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*") #define CXL_MODALIAS_FMT "cxl:t%d" diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c new file mode 100644 index 000000000000..242e6374f4a1 --- /dev/null +++ b/drivers/cxl/port.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2021 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include + +#include "cxlmem.h" + +/** + * DOC: cxl port + * + * The port driver implements the set of functionality needed to allow full + * decoder enumeration and routing. A CXL port is an abstraction of a CXL + * component that implements some amount of CXL decoding of CXL.mem traffic. + * As of the CXL 2.0 spec, this includes: + * + * .. list-table:: CXL Components w/ Ports + * :widths: 25 25 50 + * :header-rows: 1 + * + * * - component + * - upstream + * - downstream + * * - Host Bridge + * - ACPI0016 + * - Root Port + * * - Switch + * - Switch Upstream Port + * - Switch Downstream Port + * * - Endpoint + * - Endpoint Port + * - N/A + * + * The primary service this driver provides is enumerating HDM decoders and + * presenting APIs to other drivers to utilize the decoders. + */ + +static struct workqueue_struct *cxl_port_wq; + +struct cxl_port_data { + struct cxl_component_regs regs; + + struct port_caps { + unsigned int count; + unsigned int tc; + unsigned int interleave11_8; + unsigned int interleave14_12; + } caps; +}; + +static inline int to_interleave_granularity(u32 ctrl) +{ + int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl); + + return 256 << val; +} + +static inline int to_interleave_ways(u32 ctrl) +{ + int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl); + + return 1 << val; +} + +static struct port_caps parse_hdm_decoder_caps(void __iomem *hdm_decoder) +{ + u32 hdm_cap = readl(hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); + struct port_caps caps; + + caps.count = cxl_hdm_decoder_count(hdm_cap); + caps.tc = FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap); + caps.interleave11_8 = + FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap); + caps.interleave14_12 = + FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap); + + return caps; +} + +static void __iomem *map_hdm_decoder_regs(struct cxl_port *port, + void __iomem *crb) +{ + struct cxl_register_map map; + struct cxl_component_reg_map *comp_map = &map.component_map; + + cxl_probe_component_regs(&port->dev, crb, comp_map); + if (!comp_map->hdm_decoder.valid) { + dev_err(&port->dev, "HDM decoder registers invalid\n"); + return IOMEM_ERR_PTR(-ENXIO); + } + + return crb + comp_map->hdm_decoder.offset; +} + +static u64 get_decoder_size(void __iomem *hdm_decoder, int n) +{ + u32 ctrl = readl(hdm_decoder + CXL_HDM_DECODER0_CTRL_OFFSET(n)); + + if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) + return 0; + + return ioread64_hi_lo(hdm_decoder + + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(n)); +} + +static bool is_endpoint_port(struct cxl_port *port) +{ + /* Endpoints can't be ports... yet! */ + return false; +} + +static void rescan_ports(struct work_struct *work) +{ + if (bus_rescan_devices(&cxl_bus_type)) + pr_warn("%s: Failed to rescan. Some CXL devices may be missing\n", + KBUILD_MODNAME); +} + +static void init_hdm_decoder(struct cxl_decoder *cxld, int *target_map, + void __iomem *hdm_decoder, int which) +{ + u64 size, base; + u32 ctrl; + int i; + union { + u64 value; + char target_id[8]; + } target_list; + + size = get_decoder_size(hdm_decoder, which); + if (!size) + return; + + ctrl = readl(hdm_decoder + CXL_HDM_DECODER0_CTRL_OFFSET(which)); + base = ioread64_hi_lo(hdm_decoder + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); + + cxld->decoder_range = (struct range) { + .start = base, + .end = base + size - 1 + }; + + cxld->flags = CXL_DECODER_F_ENABLE; + cxld->interleave_ways = to_interleave_ways(ctrl); + cxld->interleave_granularity = to_interleave_granularity(ctrl); + + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) + cxld->target_type = CXL_DECODER_ACCELERATOR; + + target_list.value = ioread64_hi_lo(hdm_decoder + CXL_HDM_DECODER0_TL_LOW(which)); + for (i = 0; i < cxld->interleave_ways; i++) + target_map[i] = target_list.target_id[i]; +} + +static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, + int *target_map) +{ + int rc; + + rc = cxl_decoder_add_locked(cxld, target_map); + if (rc) { + put_device(&cxld->dev); + dev_err(&port->dev, "Failed to add decoder\n"); + return rc; + } + + rc = cxl_decoder_autoremove(&port->dev, cxld); + if (!rc) + dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); + + return rc; +} + +static int enumerate_endpoint_decoder(struct cxl_port *port, + struct cxl_port_data *portdata) +{ + void __iomem *hdm_decoder = portdata->regs.hdm_decoder; + struct cxl_decoder *cxld; + int rc; + + cxld = cxl_endpoint_decoder_alloc(port); + if (IS_ERR(cxld)) { + dev_warn(&port->dev, "Failed to allocate endpoint decoder\n"); + return PTR_ERR(cxld); + } + + init_hdm_decoder(cxld, NULL, hdm_decoder, 0); + rc = add_hdm_decoder(port, cxld, NULL); + + return rc; +} + +/* Enumerates CXL switches and Host Bridges */ +static int enumerate_switch_decoders(struct cxl_port *port, + struct cxl_port_data *portdata) +{ + void __iomem *hdm_decoder = portdata->regs.hdm_decoder; + int i = 0; + + for (i = 0; i < portdata->caps.count; i++) { + int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; + int rc, target_count = portdata->caps.tc; + struct cxl_decoder *cxld; + + cxld = cxl_switch_decoder_alloc(port, target_count); + if (IS_ERR(cxld)) { + dev_warn(&port->dev, + "Failed to allocate the decoder\n"); + return PTR_ERR(cxld); + } + + init_hdm_decoder(cxld, target_map, hdm_decoder, i); + rc = add_hdm_decoder(port, cxld, target_map); + if (rc) { + dev_warn(&port->dev, + "Failed to add decoder to switch port\n"); + return rc; + } + } + + return 0; +} + +static int cxl_port_probe(struct device *dev) +{ + struct cxl_port *port = to_cxl_port(dev); + struct cxl_port_data *portdata; + void __iomem *crb; + int rc; + + /* + * All ports should have component registers except for the platform + * specific root port. There's no point in binding that device. + */ + if (port->component_reg_phys == CXL_RESOURCE_NONE) + return -ENODEV; + + portdata = devm_kzalloc(dev, sizeof(*portdata), GFP_KERNEL); + if (!portdata) + return -ENOMEM; + + crb = devm_cxl_iomap_block(&port->dev, port->component_reg_phys, + CXL_COMPONENT_REG_BLOCK_SIZE); + if (!crb) { + dev_err(&port->dev, "No component registers mapped\n"); + return -ENXIO; + } + + portdata->regs.hdm_decoder = map_hdm_decoder_regs(port, crb); + if (IS_ERR(portdata->regs.hdm_decoder)) + return PTR_ERR(portdata->regs.hdm_decoder); + + portdata->caps = parse_hdm_decoder_caps(portdata->regs.hdm_decoder); + if (portdata->caps.count == 0) { + dev_err(&port->dev, "Spec violation. Caps invalid\n"); + return -ENXIO; + } + + if (is_endpoint_port(port)) + rc = enumerate_endpoint_decoder(port, portdata); + else + rc = enumerate_switch_decoders(port, portdata); + if (rc) { + dev_err(&port->dev, "Couldn't enumerate decoders (%d)\n", rc); + return rc; + } + + /* + * Bus rescan is done in a workqueue so that we can do so with the + * device lock dropped. + * + * Why do we need to rescan? There is a race between cxl_acpi and + * cxl_mem (which depends on cxl_pci). cxl_mem will only create a port + * if it can establish a path up to a root port, which is enumerated by + * a platform specific driver (ie. cxl_acpi) and bound by this driver. + * While cxl_acpi could do the rescan, it makes sense to be here as + * other platform drivers might require the same functionality. + */ + INIT_WORK(&port->rescan_work, rescan_ports); + queue_work(cxl_port_wq, &port->rescan_work); + + return 0; +} + +static struct cxl_driver cxl_port_driver = { + .name = "cxl_port", + .probe = cxl_port_probe, + .id = CXL_DEVICE_PORT, +}; + +static __init int cxl_port_init(void) +{ + int rc; + + cxl_port_wq = alloc_ordered_workqueue("cxl_port", 0); + if (!cxl_port_wq) + return -ENOMEM; + + rc = cxl_driver_register(&cxl_port_driver); + if (rc) { + destroy_workqueue(cxl_port_wq); + return rc; + } + + return 0; +} + +static __exit void cxl_port_exit(void) +{ + destroy_workqueue(cxl_port_wq); + cxl_driver_unregister(&cxl_port_driver); +} + +module_init(cxl_port_init); +module_exit(cxl_port_exit); +MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(CXL); +MODULE_ALIAS_CXL(CXL_DEVICE_PORT); From patchwork Thu Dec 2 04:37:48 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651727 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8C2D6C43217 for ; Thu, 2 Dec 2021 04:40:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355532AbhLBEnx (ORCPT ); Wed, 1 Dec 2021 23:43:53 -0500 Received: from mga09.intel.com ([134.134.136.24]:61245 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355538AbhLBEnq (ORCPT ); Wed, 1 Dec 2021 23:43:46 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438387" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438387" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:10 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717491" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:09 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Jonathan Cameron , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 12/14] cxl: Unify port enumeration for decoders Date: Wed, 1 Dec 2021 20:37:48 -0800 Message-Id: <20211202043750.3501494-13-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org The port driver exists to do proper enumeration of the component registers for ports, including HDM decoder resources. Any port which follows the CXL specification to implement HDM decoder registers should be handled by the port driver. This includes host bridge registers that are currently handled within the cxl_acpi driver. In moving the responsibility from cxl_acpi to cxl_port, three primary things are accomplished here: 1. Multi-port host bridges are now handled by the port driver 2. Single port host bridges are handled by the port driver 3. Single port switches without component registers will be handled by the port driver. While it's tempting to remove decoder APIs from cxl_core entirely, it is still required that platform specific drivers are able to add decoders which aren't specified in CXL 2.0+. An example of this is the CFMWS parsing which is implementing in cxl_acpi. Reviewed-by: Jonathan Cameron Signed-off-by: Ben Widawsky --- drivers/cxl/acpi.c | 36 +++---------------------------- drivers/cxl/core/bus.c | 6 ++++-- drivers/cxl/cxl.h | 2 ++ drivers/cxl/port.c | 49 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 57 insertions(+), 36 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 8dee8ebdec9d..7bb5699fc1ce 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -211,8 +211,6 @@ static int add_host_bridge_uport(struct device *match, void *arg) struct acpi_device *bridge = to_cxl_host_bridge(host, match); struct acpi_pci_root *pci_root; struct cxl_walk_context ctx; - int single_port_map[1], rc; - struct cxl_decoder *cxld; struct cxl_dport *dport; struct cxl_port *port; @@ -246,38 +244,9 @@ static int add_host_bridge_uport(struct device *match, void *arg) return -ENODEV; if (ctx.error) return ctx.error; - if (ctx.count > 1) - return 0; - /* TODO: Scan CHBCR for HDM Decoder resources */ - - /* - * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability - * Structure) single ported host-bridges need not publish a decoder - * capability when a passthrough decode can be assumed, i.e. all - * transactions that the uport sees are claimed and passed to the single - * dport. Disable the range until the first CXL region is enumerated / - * activated. - */ - cxld = cxl_root_decoder_alloc(port, 1); - if (IS_ERR(cxld)) - return PTR_ERR(cxld); - - device_lock(&port->dev); - dport = list_first_entry(&port->dports, typeof(*dport), list); - device_unlock(&port->dev); - - single_port_map[0] = dport->port_id; - - rc = cxl_decoder_add(cxld, single_port_map); - if (rc) - put_device(&cxld->dev); - else - rc = cxl_decoder_autoremove(host, cxld); - - if (rc == 0) - dev_dbg(host, "add: %s\n", dev_name(&cxld->dev)); - return rc; + /* Host bridge ports are enumerated by the port driver. */ + return 0; } struct cxl_chbs_context { @@ -444,3 +413,4 @@ module_platform_driver(cxl_acpi_driver); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); MODULE_IMPORT_NS(ACPI); +MODULE_SOFTDEP("pre: cxl_port"); diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index e2cdea990b81..34a308708a99 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -62,7 +62,7 @@ void cxl_unregister_topology_host(struct device *host) } EXPORT_SYMBOL_NS_GPL(cxl_unregister_topology_host, CXL); -static struct device *get_cxl_topology_host(void) +struct device *get_cxl_topology_host(void) { down_read(&topology_host_sem); if (cxl_topology_host) @@ -70,12 +70,14 @@ static struct device *get_cxl_topology_host(void) up_read(&topology_host_sem); return NULL; } +EXPORT_SYMBOL_NS_GPL(get_cxl_topology_host, CXL); -static void put_cxl_topology_host(struct device *dev) +void put_cxl_topology_host(struct device *dev) { WARN_ON(dev != cxl_topology_host); up_read(&topology_host_sem); } +EXPORT_SYMBOL_NS_GPL(put_cxl_topology_host, CXL); static int decoder_match(struct device *dev, void *data) { diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index dd6c76ac89df..df25dd20ff95 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -170,6 +170,8 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, int cxl_register_topology_host(struct device *host); void cxl_unregister_topology_host(struct device *host); +struct device *get_cxl_topology_host(void); +void put_cxl_topology_host(struct device *dev); /* * cxl_decoder flags that define the type of memory / devices this diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 242e6374f4a1..527b027dcf24 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -221,12 +221,59 @@ static int enumerate_switch_decoders(struct cxl_port *port, return 0; } +/* + * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) + * single ported host-bridges need not publish a decoder capability when a + * passthrough decode can be assumed, i.e. all transactions that the uport sees + * are claimed and passed to the single dport. Disable the range until the first + * CXL region is enumerated / activated. + */ +static int add_passthrough_decoder(struct cxl_port *port) +{ + int single_port_map[1], rc; + struct cxl_decoder *cxld; + struct cxl_dport *dport; + + device_lock_assert(&port->dev); + + cxld = cxl_switch_decoder_alloc(port, 1); + if (IS_ERR(cxld)) + return PTR_ERR(cxld); + + dport = list_first_entry(&port->dports, typeof(*dport), list); + single_port_map[0] = dport->port_id; + + rc = cxl_decoder_add_locked(cxld, single_port_map); + if (rc) + put_device(&cxld->dev); + else + rc = cxl_decoder_autoremove(&port->dev, cxld); + + if (rc == 0) + dev_dbg(&port->dev, "add: %s\n", dev_name(&cxld->dev)); + + return rc; +} + static int cxl_port_probe(struct device *dev) { struct cxl_port *port = to_cxl_port(dev); struct cxl_port_data *portdata; void __iomem *crb; - int rc; + int rc = 0; + + if (list_is_singular(&port->dports)) { + struct device *host_dev = get_cxl_topology_host(); + + /* + * Root ports (single host bridge downstream) are handled by + * platform driver + */ + if (port->uport != host_dev) + rc = add_passthrough_decoder(port); + put_cxl_topology_host(host_dev); + return rc; + } /* * All ports should have component registers except for the platform From patchwork Thu Dec 2 04:37:49 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651731 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3970AC4167B for ; Thu, 2 Dec 2021 04:40:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355533AbhLBEny (ORCPT ); Wed, 1 Dec 2021 23:43:54 -0500 Received: from mga09.intel.com ([134.134.136.24]:61244 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355539AbhLBEnq (ORCPT ); Wed, 1 Dec 2021 23:43:46 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438388" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438388" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:11 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717501" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:10 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 13/14] cxl/port: Cleanup adding passthrough decoders Date: Wed, 1 Dec 2021 20:37:49 -0800 Message-Id: <20211202043750.3501494-14-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org This makes the resulting code easier to read and matches similar code already existing in the port driver. Signed-off-by: Ben Widawsky --- drivers/cxl/port.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 527b027dcf24..4100cf395ec3 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -243,13 +243,8 @@ static int add_passthrough_decoder(struct cxl_port *port) dport = list_first_entry(&port->dports, typeof(*dport), list); single_port_map[0] = dport->port_id; - rc = cxl_decoder_add_locked(cxld, single_port_map); - if (rc) - put_device(&cxld->dev); - else - rc = cxl_decoder_autoremove(&port->dev, cxld); - - if (rc == 0) + rc = add_hdm_decoder(port, cxld, single_port_map); + if (!rc) dev_dbg(&port->dev, "add: %s\n", dev_name(&cxld->dev)); return rc; From patchwork Thu Dec 2 04:37:50 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 12651735 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 092B2C4321E for ; Thu, 2 Dec 2021 04:40:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1355526AbhLBEnx (ORCPT ); Wed, 1 Dec 2021 23:43:53 -0500 Received: from mga09.intel.com ([134.134.136.24]:61242 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1355541AbhLBEnr (ORCPT ); Wed, 1 Dec 2021 23:43:47 -0500 X-IronPort-AV: E=McAfee;i="6200,9189,10185"; a="236438390" X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="236438390" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:11 -0800 X-IronPort-AV: E=Sophos;i="5.87,281,1631602800"; d="scan'208";a="745717507" Received: from liudanie-mobl1.amr.corp.intel.com (HELO bad-guy.kumite) ([10.252.143.85]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Dec 2021 20:40:11 -0800 From: Ben Widawsky To: linux-cxl@vger.kernel.org Cc: Ben Widawsky , Randy Dunlap , Alison Schofield , Dan Williams , Ira Weiny , Jonathan Cameron , Vishal Verma Subject: [PATCH v2 14/14] cxl/mem: Introduce cxl_mem driver Date: Wed, 1 Dec 2021 20:37:50 -0800 Message-Id: <20211202043750.3501494-15-ben.widawsky@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20211202043750.3501494-1-ben.widawsky@intel.com> References: <20211202043750.3501494-1-ben.widawsky@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-cxl@vger.kernel.org Add a driver that is capable of determining whether a device is in a CXL.mem routed part of the topology. This driver allows a higher level driver - such as one controlling CXL regions, which is itself a set of CXL devices - to easily determine if the CXL devices are CXL.mem capable by checking if the driver has bound. CXL memory device services may also be provided by this driver though none are needed as of yet. cxl_mem also plays the part of registering itself as an endpoint port, which is a required step to enumerate the device's HDM decoder resources. Even though cxl_mem driver is the only consumer of the new cxl_scan_ports() introduced in cxl_core, because that functionality has PCIe specificity it is kept out of this driver. As part of this patch, find_dport_by_dev() is promoted to the cxl_core's set of APIs for use by the new driver. Reported-by: Randy Dunlap Signed-off-by: Ben Widawsky --- Changes since v1: - Remove duplicative CXL_MEM config option (Randy Dunlap) - Remove stray newline (Jonathan) - Cleanup port addition (Jonathan) - Move consumer of opaque data to this patch (Jonathan) - Add documentation for root_port link (Jonathan) - Handle DVSEC checking in this driver (Dan) - Mark DVSEC ranges as in use in case BIOS didn't (Dan) --- Documentation/ABI/testing/sysfs-bus-cxl | 9 + .../driver-api/cxl/memory-devices.rst | 9 + drivers/cxl/Makefile | 2 + drivers/cxl/acpi.c | 17 +- drivers/cxl/core/Makefile | 1 + drivers/cxl/core/bus.c | 125 ++++++++ drivers/cxl/core/core.h | 3 + drivers/cxl/core/memdev.c | 2 +- drivers/cxl/core/pci.c | 117 +++++++ drivers/cxl/cxl.h | 8 + drivers/cxl/cxlmem.h | 3 + drivers/cxl/mem.c | 285 ++++++++++++++++++ drivers/cxl/pci.h | 3 + drivers/cxl/port.c | 12 +- tools/testing/cxl/Kbuild | 1 + 15 files changed, 578 insertions(+), 19 deletions(-) create mode 100644 drivers/cxl/core/pci.c create mode 100644 drivers/cxl/mem.c diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl index 0b6a2e6e8fbb..9b7f3c272138 100644 --- a/Documentation/ABI/testing/sysfs-bus-cxl +++ b/Documentation/ABI/testing/sysfs-bus-cxl @@ -7,6 +7,15 @@ Description: Memory Device Output Payload in the CXL-2.0 specification. +What: /sys/bus/cxl/devices/memX/root_port +Date: November, 2021 +KernelVersion: v5.17 +Contact: linux-cxl@vger.kernel.org +Description: + (RO) Link to the upstream CXL-2.0 root port. This link may be + used by userspace to help build a representation of the CXL + topology. + What: /sys/bus/cxl/devices/memX/ram/size Date: December, 2020 KernelVersion: v5.12 diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst index fbf0393cdddc..b4ff5f209c34 100644 --- a/Documentation/driver-api/cxl/memory-devices.rst +++ b/Documentation/driver-api/cxl/memory-devices.rst @@ -28,6 +28,9 @@ CXL Memory Device .. kernel-doc:: drivers/cxl/pci.c :internal: +.. kernel-doc:: drivers/cxl/mem.c + :doc: cxl mem + CXL Port -------- .. kernel-doc:: drivers/cxl/port.c @@ -47,6 +50,12 @@ CXL Core .. kernel-doc:: drivers/cxl/core/bus.c :identifiers: +.. kernel-doc:: drivers/cxl/core/pci.c + :doc: cxl core pci + +.. kernel-doc:: drivers/cxl/core/pci.c + :identifiers: + .. kernel-doc:: drivers/cxl/core/pmem.c :doc: cxl pmem diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile index 56fcac2323cb..ce267ef11d93 100644 --- a/drivers/cxl/Makefile +++ b/drivers/cxl/Makefile @@ -1,10 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CXL_BUS) += core/ obj-$(CONFIG_CXL_PCI) += cxl_pci.o +obj-$(CONFIG_CXL_MEM) += cxl_mem.o obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o obj-$(CONFIG_CXL_PORT) += cxl_port.o +cxl_mem-y := mem.o cxl_pci-y := pci.o cxl_acpi-y := acpi.o cxl_pmem-y := pmem.o diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 7bb5699fc1ce..be4f4b767d37 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -172,21 +172,6 @@ __mock int match_add_root_ports(struct pci_dev *pdev, void *data) return 0; } -static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device *dev) -{ - struct cxl_dport *dport; - - device_lock(&port->dev); - list_for_each_entry(dport, &port->dports, list) - if (dport->dport == dev) { - device_unlock(&port->dev); - return dport; - } - - device_unlock(&port->dev); - return NULL; -} - __mock struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev) { @@ -217,7 +202,7 @@ static int add_host_bridge_uport(struct device *match, void *arg) if (!bridge) return 0; - dport = find_dport_by_dev(root_port, match); + dport = cxl_find_dport_by_dev(root_port, match); if (!dport) { dev_dbg(host, "host bridge expected and not found\n"); return 0; diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index 40ab50318daf..5b8ec478fb0b 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -7,3 +7,4 @@ cxl_core-y += pmem.o cxl_core-y += regs.o cxl_core-y += memdev.o cxl_core-y += mbox.o +cxl_core-y += pci.o diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 34a308708a99..e8063cb7c5c8 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "core.h" /** @@ -531,6 +532,111 @@ struct cxl_port *devm_cxl_add_port(struct device *uport, } EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); +static int add_upstream_port(struct device *host, struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct cxl_port *parent_port; + struct cxl_register_map map; + struct cxl_port *port; + int rc; + + /* A port is useless if there are no component registers */ + rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); + if (rc) + return rc; + + parent_port = find_parent_cxl_port(pdev); + if (!parent_port) + return -ENODEV; + + if (!parent_port->dev.driver) { + dev_dbg(dev, "Upstream port has no driver\n"); + put_device(&parent_port->dev); + return -ENODEV; + } + + port = devm_cxl_add_port(dev, cxl_regmap_to_base(pdev, &map), + parent_port); + put_device(&parent_port->dev); + if (IS_ERR(port)) + dev_err(dev, "Failed to add upstream port %ld\n", + PTR_ERR(port)); + else + dev_dbg(dev, "Added CXL port\n"); + + return rc; +} + +static int add_downstream_port(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct cxl_port *parent_port; + struct cxl_register_map map; + u32 lnkcap, port_num; + int rc; + + /* + * Ports are to be scanned from top down. Therefore, the upstream port + * must already exist. + */ + parent_port = find_parent_cxl_port(pdev); + if (!parent_port) + return -ENODEV; + + if (!parent_port->dev.driver) { + dev_dbg(dev, "Host port to dport has no driver\n"); + put_device(&parent_port->dev); + return -ENODEV; + } + + if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP, + &lnkcap) != PCIBIOS_SUCCESSFUL) + return 1; + port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); + + rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); + if (rc) + dev_dbg(dev, "Failed to obtain component registers\n"); + + rc = cxl_add_dport(parent_port, dev, port_num, + cxl_regmap_to_base(pdev, &map), false); + put_device(&parent_port->dev); + if (rc) + dev_err(dev, "Failed to add downstream port to %s\n", + dev_name(&parent_port->dev)); + else + dev_dbg(dev, "Added downstream port to %s\n", + dev_name(&parent_port->dev)); + + return rc; +} + +static int match_add_ports(struct pci_dev *pdev, void *data) +{ + struct device *dev = &pdev->dev; + struct device *host = data; + + if (is_cxl_switch_usp((dev))) + return add_upstream_port(host, pdev); + else if (is_cxl_switch_dsp((dev))) + return add_downstream_port(pdev); + else + return 0; +} + +/** + * cxl_scan_ports() - Adds all ports for the subtree beginning with @dport + * @dport: Beginning node of the CXL topology + */ +void cxl_scan_ports(struct cxl_dport *dport) +{ + struct device *d = dport->dport; + struct pci_dev *pdev = to_pci_dev(d); + + pci_walk_bus(pdev->bus, match_add_ports, &dport->port->dev); +} +EXPORT_SYMBOL_NS_GPL(cxl_scan_ports, CXL); + static struct cxl_dport *find_dport(struct cxl_port *port, int id) { struct cxl_dport *dport; @@ -614,6 +720,23 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id, } EXPORT_SYMBOL_NS_GPL(cxl_add_dport, CXL); +struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port, + struct device *dev) +{ + struct cxl_dport *dport; + + device_lock(&port->dev); + list_for_each_entry(dport, &port->dports, list) + if (dport->dport == dev) { + device_unlock(&port->dev); + return dport; + } + + device_unlock(&port->dev); + return NULL; +} +EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL); + static int decoder_populate_targets(struct cxl_decoder *cxld, struct cxl_port *port, int *target_map) { @@ -921,6 +1044,8 @@ static int cxl_device_id(struct device *dev) return CXL_DEVICE_NVDIMM; if (dev->type == &cxl_port_type) return CXL_DEVICE_PORT; + if (dev->type == &cxl_memdev_type) + return CXL_DEVICE_MEMORY_EXPANDER; return 0; } diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index e0c9aacc4e9c..c5836f071eaa 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -6,6 +6,7 @@ extern const struct device_type cxl_nvdimm_bridge_type; extern const struct device_type cxl_nvdimm_type; +extern const struct device_type cxl_memdev_type; extern struct attribute_group cxl_base_attribute_group; @@ -20,4 +21,6 @@ void cxl_memdev_exit(void); void cxl_mbox_init(void); void cxl_mbox_exit(void); +struct cxl_port *find_parent_cxl_port(struct pci_dev *pdev); + #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 61029cb7ac62..149665fd2d3f 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -127,7 +127,7 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = { NULL, }; -static const struct device_type cxl_memdev_type = { +const struct device_type cxl_memdev_type = { .name = "cxl_memdev", .release = cxl_memdev_release, .devnode = cxl_memdev_devnode, diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c new file mode 100644 index 000000000000..6821f31a4e52 --- /dev/null +++ b/drivers/cxl/core/pci.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2021 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include "core.h" + +/** + * DOC: cxl core pci + * + * Compute Express Link protocols are layered on top of PCIe. CXL core provides + * a set of helpers for CXL interactions which occur via PCIe. + */ + +/** + * find_parent_cxl_port() - Finds parent port through PCIe mechanisms + * @pdev: PCIe USP or DSP to find an upstream port for + * + * Once all CXL ports are enumerated, there is no need to reference the PCIe + * parallel universe as all downstream ports are contained in a linked list, and + * all upstream ports are accessible via pointer. During the enumeration, it is + * very convenient to be able to peak up one level in the hierarchy without + * needing the established relationship between data structures so that the + * parenting can be done as the ports/dports are created. + * + * A reference is kept to the found port. + */ +struct cxl_port *find_parent_cxl_port(struct pci_dev *pdev) +{ + struct device *parent_dev, *gparent_dev; + const int type = pci_pcie_type(pdev); + + /* Parent is either a downstream port, or root port */ + parent_dev = get_device(pdev->dev.parent); + + if (is_cxl_switch_usp(&pdev->dev)) { + if (dev_WARN_ONCE(&pdev->dev, + type != PCI_EXP_TYPE_DOWNSTREAM && + type != PCI_EXP_TYPE_ROOT_PORT, + "Parent not downstream\n")) + goto err; + + /* + * Grandparent is either an upstream port or a platform device that has + * been added as a cxl_port already. + */ + gparent_dev = get_device(parent_dev->parent); + put_device(parent_dev); + + return to_cxl_port(gparent_dev); + } else if (is_cxl_switch_dsp(&pdev->dev)) { + if (dev_WARN_ONCE(&pdev->dev, type != PCI_EXP_TYPE_UPSTREAM, + "Parent not upstream")) + goto err; + return to_cxl_port(parent_dev); + } + +err: + dev_WARN(&pdev->dev, "Invalid topology\n"); + put_device(parent_dev); + return NULL; +} + +/* + * Unlike endpoints, switches don't discern CXL.mem capability. Simply finding + * the DVSEC is sufficient. + */ +static bool is_cxl_switch(struct pci_dev *pdev) +{ + return pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DVSEC_PORT_EXTENSIONS); +} + +/** + * is_cxl_switch_usp() - Is the device a CXL.mem enabled switch + * @dev: Device to query for switch type + * + * If the device is a CXL.mem capable upstream switch port return true; + * otherwise return false. + */ +bool is_cxl_switch_usp(struct device *dev) +{ + struct pci_dev *pdev; + + if (!dev_is_pci(dev)) + return false; + + pdev = to_pci_dev(dev); + + return pci_is_pcie(pdev) && + pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && + is_cxl_switch(pdev); +} +EXPORT_SYMBOL_NS_GPL(is_cxl_switch_usp, CXL); + +/** + * is_cxl_switch_dsp() - Is the device a CXL.mem enabled switch + * @dev: Device to query for switch type + * + * If the device is a CXL.mem capable downstream switch port return true; + * otherwise return false. + */ +bool is_cxl_switch_dsp(struct device *dev) +{ + struct pci_dev *pdev; + + if (!dev_is_pci(dev)) + return false; + + pdev = to_pci_dev(dev); + + return pci_is_pcie(pdev) && + pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM && + is_cxl_switch(pdev); +} +EXPORT_SYMBOL_NS_GPL(is_cxl_switch_dsp, CXL); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index df25dd20ff95..9e3091857906 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -275,6 +275,7 @@ struct cxl_walk_context { * @decoder_ida: allocator for decoder ids * @component_reg_phys: component register capability base address (optional) * @rescan_work: worker object for bus rescans after port additions + * @data: opaque data with driver specific usage */ struct cxl_port { struct device dev; @@ -284,6 +285,7 @@ struct cxl_port { struct ida decoder_ida; resource_size_t component_reg_phys; struct work_struct rescan_work; + void *data; }; /** @@ -294,6 +296,7 @@ struct cxl_port { * @port: reference to cxl_port that contains this downstream port * @list: node for a cxl_port's list of cxl_dport instances * @root_port_link: node for global list of root ports + * @data: Opaque data passed by other drivers, used by port driver */ struct cxl_dport { struct device *dport; @@ -302,16 +305,20 @@ struct cxl_dport { struct cxl_port *port; struct list_head list; struct list_head root_port_link; + void *data; }; struct cxl_port *to_cxl_port(struct device *dev); struct cxl_port *devm_cxl_add_port(struct device *uport, resource_size_t component_reg_phys, struct cxl_port *parent_port); +void cxl_scan_ports(struct cxl_dport *root_port); int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id, resource_size_t component_reg_phys, bool root_port); struct cxl_dport *cxl_get_root_dport(struct device *dev); +struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port, + struct device *dev); struct cxl_decoder *to_cxl_decoder(struct device *dev); bool is_root_decoder(struct device *dev); @@ -350,6 +357,7 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv); #define CXL_DEVICE_NVDIMM_BRIDGE 1 #define CXL_DEVICE_NVDIMM 2 #define CXL_DEVICE_PORT 3 +#define CXL_DEVICE_MEMORY_EXPANDER 4 #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*") #define CXL_MODALIAS_FMT "cxl:t%d" diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index ebb4d1cdded2..68cc143d2273 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -35,12 +35,15 @@ * @cdev: char dev core object for ioctl operations * @cxlds: The device state backing this device * @id: id number of this memdev instance. + * @component_reg_phys: register base of component registers + * @root_port: Hostbridge's root port connected to this endpoint */ struct cxl_memdev { struct device dev; struct cdev cdev; struct cxl_dev_state *cxlds; int id; + struct cxl_dport *root_port; }; static inline struct cxl_memdev *to_cxl_memdev(struct device *dev) diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c new file mode 100644 index 000000000000..aaaabaeef24f --- /dev/null +++ b/drivers/cxl/mem.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2021 Intel Corporation. All rights reserved. */ +#include +#include +#include + +#include "cxlmem.h" +#include "pci.h" + +/** + * DOC: cxl mem + * + * CXL memory endpoint devices and switches are CXL capable devices that are + * participating in CXL.mem protocol. Their functionality builds on top of the + * CXL.io protocol that allows enumerating and configuring components via + * standard PCI mechanisms. + * + * The cxl_mem driver owns kicking off the enumeration of this CXL.mem + * capability. With the detection of a CXL capable endpoint, the driver will + * walk up to find the platform specific port it is connected to, and determine + * if there are intervening switches in the path. If there are switches, a + * secondary action to enumerate those (implemented in cxl_core). Finally the + * cxl_mem driver will add the device it is bound to as a CXL port for use in + * higher level operations. + */ + +struct walk_ctx { + struct cxl_dport *root_port; + bool has_switch; +}; + +/** + * walk_to_root_port() - Walk up to root port + * @dev: Device to walk up from + * @ctx: Information to populate while walking + * + * A platform specific driver such as cxl_acpi is responsible for scanning CXL + * topologies in a top-down fashion. If the CXL memory device is directly + * connected to the top level hostbridge, nothing else needs to be done. If + * however there are CXL components (ie. a CXL switch) in between an endpoint + * and a hostbridge the platform specific driver must be notified after all the + * components are enumerated. + */ +static void walk_to_root_port(struct device *dev, struct walk_ctx *ctx) +{ + struct cxl_dport *root_port; + + if (!dev->parent) + return; + + root_port = cxl_get_root_dport(dev); + if (root_port) + ctx->root_port = root_port; + + if (is_cxl_switch_usp(dev)) + ctx->has_switch = true; + + walk_to_root_port(dev->parent, ctx); +} + +static void remove_endpoint(void *_cxlmd) +{ + struct cxl_memdev *cxlmd = _cxlmd; + + if (cxlmd->root_port) + sysfs_remove_link(&cxlmd->dev.kobj, "root_port"); +} + +static int wait_for_media(struct cxl_memdev *cxlmd) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_endpoint_dvsec_info *info = cxlds->info; + int rc; + + if (!info) + return -ENXIO; + + if (!info->mem_enabled) + return -EBUSY; + + rc = cxlds->wait_media_ready(cxlds); + if (rc) + return rc; + + /* + * We know the device is active, and enabled, if any ranges are non-zero + * we'll need to check later before adding the port since that owns the + * HDM decoder registers. + */ + return 0; +} + +static int create_endpoint(struct device *dev, struct cxl_port *parent, + struct cxl_dport *dport) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_port *endpoint; + int rc; + + endpoint = devm_cxl_add_port(dev, cxlds->component_reg_phys, parent); + if (IS_ERR(endpoint)) + return PTR_ERR(endpoint); + + rc = sysfs_create_link(&cxlmd->dev.kobj, &dport->dport->kobj, + "root_port"); + if (rc) { + device_del(&endpoint->dev); + return rc; + } + dev_dbg(dev, "add: %s\n", dev_name(&endpoint->dev)); + + return devm_add_action_or_reset(dev, remove_endpoint, cxlmd); +} + +/** + * hdm_decode_init() - Setup HDM decoding for the endpoint + * @cxlds: Device state + * + * Additionally, enables global HDM decoding. Warning: don't call this outside + * of probe. Once probe is complete, the port driver owns all access to the HDM + * decoder registers. + * + * Returns: false if DVSEC Ranges are being used instead of HDM decoders; + * otherwise returns true. + */ +static bool hdm_decode_init(struct cxl_dev_state *cxlds) +{ + struct cxl_endpoint_dvsec_info *info = cxlds->info; + struct cxl_register_map map; + struct cxl_component_reg_map *cmap = &map.component_map; + + bool global_enable; + void __iomem *crb; + u32 global_ctrl; + + /* map hdm decoder */ + crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); + if (!crb) { + dev_dbg(cxlds->dev, "Failed to map component registers\n"); + return info->ranges; + } + + cxl_probe_component_regs(cxlds->dev, crb, cmap); + if (!cmap->hdm_decoder.valid) { + iounmap(crb); + dev_dbg(cxlds->dev, "Invalid HDM decoder registers\n"); + return info->ranges; + } + + global_ctrl = readl(crb + cmap->hdm_decoder.offset + + CXL_HDM_DECODER_CTRL_OFFSET); + global_enable = global_ctrl & CXL_HDM_DECODER_ENABLE; + if (!global_enable && info->ranges) { + iounmap(crb); + dev_dbg(cxlds->dev, "DVSEC regions\n"); + return false; + } + + /* + * Turn on global enable now since DVSEC ranges aren't being used and + * we'll eventually want the decoder enabled. This also prevents special + * casing in the port driver since this only applies to endpoints. + */ + if (!global_enable) { + dev_dbg(cxlds->dev, "Enabling HDM decode\n"); + writel(global_ctrl | CXL_HDM_DECODER_ENABLE, + crb + cmap->hdm_decoder.offset + + CXL_HDM_DECODER_CTRL_OFFSET); + } + + iounmap(crb); + return true; +} + +static int cxl_mem_probe(struct device *dev) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_port *hostbridge, *parent_port; + struct walk_ctx ctx = { NULL, false }; + struct cxl_dport *dport; + int rc; + + rc = wait_for_media(cxlmd); + if (rc) { + dev_err(dev, "Media not active (%d)\n", rc); + return rc; + } + + /* + * If DVSEC ranges are being used instead of HDM decoder registers there + * is no use in trying to manage those. + */ + if (!hdm_decode_init(cxlds)) { + struct cxl_endpoint_dvsec_info *info = cxlds->info; + int i; + + /* */ + for (i = 0; i < 2; i++) { + u64 base, size; + + /* + * Give a nice warning to the user that BIOS has really + * botched things for them if it didn't place DVSEC + * ranges in the memory map. + */ + base = info->dvsec_range[i].start; + size = range_len(&info->dvsec_range[i]); + if (size && !region_intersects(base, size, + IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE)) { + dev_err(dev, + "DVSEC range %#llx-%#llx must be reserved by BIOS, but isn't\n", + base, base + size - 1); + } + } + dev_err(dev, + "Active DVSEC range registers in use. Will not bind.\n"); + return -EBUSY; + } + + walk_to_root_port(dev, &ctx); + + /* + * Couldn't find a CXL capable root port. This may happen even with a + * CXL capable topology if cxl_acpi hasn't completed yet. A rescan will + * occur. + */ + if (!ctx.root_port) + return -ENODEV; + + hostbridge = ctx.root_port->port; + device_lock(&hostbridge->dev); + + /* hostbridge has no port driver, the topology isn't enabled yet */ + if (!hostbridge->dev.driver) { + device_unlock(&hostbridge->dev); + return -ENODEV; + } + + /* No switch + found root port means we're done */ + if (!ctx.has_switch) { + parent_port = to_cxl_port(&hostbridge->dev); + dport = ctx.root_port; + goto out; + } + + /* Walk down from the root port and add all switches */ + cxl_scan_ports(ctx.root_port); + +#if 0 + /* FIXME: Find the parent_port without PCI domain */ + parent_port = find_parent_cxl_port(to_pci_dev(dev)); + dport = cxl_find_dport_by_dev(parent_port, dev->parent); + if (!dport) { + rc = -ENODEV; + goto err_out; + } +#endif + +out: + rc = create_endpoint(dev, parent_port, dport); + if (rc) + goto err_out; + + cxlmd->root_port = ctx.root_port; + +err_out: + device_unlock(&hostbridge->dev); + return rc; +} + +static struct cxl_driver cxl_mem_driver = { + .name = "cxl_mem", + .probe = cxl_mem_probe, + .id = CXL_DEVICE_MEMORY_EXPANDER, +}; + +module_cxl_driver(cxl_mem_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(CXL); +MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER); +MODULE_SOFTDEP("pre: cxl_port"); diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h index 7eb38030e376..bf527399a5de 100644 --- a/drivers/cxl/pci.h +++ b/drivers/cxl/pci.h @@ -69,4 +69,7 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev, return pci_resource_start(pdev, map->barno) + map->block_offset; } +bool is_cxl_switch_usp(struct device *dev); +bool is_cxl_switch_dsp(struct device *dev); + #endif /* __CXL_PCI_H__ */ diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 4100cf395ec3..21cc5484877f 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -106,8 +106,16 @@ static u64 get_decoder_size(void __iomem *hdm_decoder, int n) static bool is_endpoint_port(struct cxl_port *port) { - /* Endpoints can't be ports... yet! */ - return false; + /* + * It's tempting to just check list_empty(port->dports) here, but this + * might get called before dports are setup for a port. + */ + + if (!port->uport->driver) + return false; + + return to_cxl_drv(port->uport->driver)->id == + CXL_DEVICE_MEMORY_EXPANDER; } static void rescan_ports(struct work_struct *work) diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index 1acdf2fc31c5..4c2359772f3c 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -30,6 +30,7 @@ cxl_core-y += $(CXL_CORE_SRC)/pmem.o cxl_core-y += $(CXL_CORE_SRC)/regs.o cxl_core-y += $(CXL_CORE_SRC)/memdev.o cxl_core-y += $(CXL_CORE_SRC)/mbox.o +cxl_core-y += $(CXL_CORE_SRC)/pci.o cxl_core-y += config_check.o cxl_core-y += mock_pmem.o