diff mbox series

[v5,03/12] ACPI: HMAT: Introduce 2 levels of generic port access class

Message ID 20240206222951.1833098-4-dave.jiang@intel.com (mailing list archive)
State Handled Elsewhere, archived
Headers show
Series cxl: Add support to report region access coordinates to numa nodes | expand

Commit Message

Dave Jiang Feb. 6, 2024, 10:28 p.m. UTC
In order to compute access0 and access1 classes for CXL memory, 2 levels
of generic port information must be stored. Access0 will indicate the
generic port access coordinates to the closest initiator and access1
will indicate the generic port access coordinates to the cloest CPU.

Cc: Rafael J. Wysocki <rafael@kernel.org>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/acpi/numa/hmat.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

Comments

Jonathan Cameron Feb. 15, 2024, 4:44 p.m. UTC | #1
On Tue, 6 Feb 2024 15:28:31 -0700
Dave Jiang <dave.jiang@intel.com> wrote:

> In order to compute access0 and access1 classes for CXL memory, 2 levels
> of generic port information must be stored. Access0 will indicate the
> generic port access coordinates to the closest initiator and access1
> will indicate the generic port access coordinates to the cloest CPU.
> 
> Cc: Rafael J. Wysocki <rafael@kernel.org>
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>

Grumble.  I never liked Memory Proximity Domain Attributes Structure.
Adds little value and for ports I don't think you should consider it
(because it's about GI or Processor to Memory connections and
 Generic Ports aren't allowed in either field).

Other than dropping that short cut, LGTM.

> ---
>  drivers/acpi/numa/hmat.c | 18 ++++++++++++------
>  1 file changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
> index e0144cfbf1f3..8dbb0e366059 100644
> --- a/drivers/acpi/numa/hmat.c
> +++ b/drivers/acpi/numa/hmat.c
> @@ -59,7 +59,8 @@ struct target_cache {
>  };
>  
>  enum {
> -	NODE_ACCESS_CLASS_GENPORT_SINK = ACCESS_COORDINATE_MAX,
> +	NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX,
> +	NODE_ACCESS_CLASS_GENPORT_SINK_CPU,
>  	NODE_ACCESS_CLASS_MAX,
>  };
>  
> @@ -141,7 +142,7 @@ int acpi_get_genport_coordinates(u32 uid,
>  	if (!target)
>  		return -ENOENT;
>  
> -	*coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK];
> +	*coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
>  
>  	return 0;
>  }
> @@ -695,7 +696,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
>  	int i;
>  
>  	/* Don't update for generic port if there's no device handle */
> -	if (access == NODE_ACCESS_CLASS_GENPORT_SINK &&
> +	if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
> +	     access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
>  	    !(*(u16 *)target->gen_port_device_handle))
>  		return;
>  
> @@ -707,7 +709,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
>  	 */
>  	if (target->processor_pxm != PXM_INVAL) {
>  		cpu_nid = pxm_to_node(target->processor_pxm);
> -		if (access == ACCESS_COORDINATE_LOCAL ||
> +		if ((access == ACCESS_COORDINATE_LOCAL ||
> +		     access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL) &&
The comment above this says:
	/*
	 * If the Address Range Structure provides a local processor pxm, set
	 * only that one. Otherwise, find the best performance attributes and
	 * collect all initiators that match.
	 */

Assuming that is correct, under what circumstances is it relevant to a
generic port?  I'm hoping no one builds systems with RAM that is local to
a port?

Note that the comment requires some archaeology - the Address Range structure
has been renamed as Memory Proximity Domain Attributes Structure. (see ACPI 6.2)
And the 'local processor PXM was probably originally Processor Proximity Domain.
That has now become "Proximity Domain for the Attached Initiator." It's
used only basically override the HMAT distances and say:
Memory domain X is directly attached to Processor Y
Here we don't have a memory domain (generic port) and so I don't think that
case is relevant.

So leave this block alone.

>  		    node_state(cpu_nid, N_CPU)) {
>  			set_bit(target->processor_pxm, p_nodes);
>  			return;
> @@ -736,7 +739,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
>  		list_for_each_entry(initiator, &initiators, node) {
>  			u32 value;
>  
> -			if (access == ACCESS_COORDINATE_CPU &&
> +			if ((access == ACCESS_COORDINATE_CPU &&
> +			     access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&

This one looks to be correct.

>  			    !initiator->has_cpu) {
>  				clear_bit(initiator->processor_pxm, p_nodes);
>  				continue;
> @@ -775,7 +779,9 @@ static void hmat_update_generic_target(struct memory_target *target)
>  	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
>  
>  	hmat_update_target_attrs(target, p_nodes,
> -				 NODE_ACCESS_CLASS_GENPORT_SINK);
> +				 NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL);
> +	hmat_update_target_attrs(target, p_nodes,
> +				 NODE_ACCESS_CLASS_GENPORT_SINK_CPU);
>  }
>  
>  static void hmat_register_target_initiators(struct memory_target *target)
diff mbox series

Patch

diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index e0144cfbf1f3..8dbb0e366059 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -59,7 +59,8 @@  struct target_cache {
 };
 
 enum {
-	NODE_ACCESS_CLASS_GENPORT_SINK = ACCESS_COORDINATE_MAX,
+	NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX,
+	NODE_ACCESS_CLASS_GENPORT_SINK_CPU,
 	NODE_ACCESS_CLASS_MAX,
 };
 
@@ -141,7 +142,7 @@  int acpi_get_genport_coordinates(u32 uid,
 	if (!target)
 		return -ENOENT;
 
-	*coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK];
+	*coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
 
 	return 0;
 }
@@ -695,7 +696,8 @@  static void hmat_update_target_attrs(struct memory_target *target,
 	int i;
 
 	/* Don't update for generic port if there's no device handle */
-	if (access == NODE_ACCESS_CLASS_GENPORT_SINK &&
+	if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
+	     access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
 	    !(*(u16 *)target->gen_port_device_handle))
 		return;
 
@@ -707,7 +709,8 @@  static void hmat_update_target_attrs(struct memory_target *target,
 	 */
 	if (target->processor_pxm != PXM_INVAL) {
 		cpu_nid = pxm_to_node(target->processor_pxm);
-		if (access == ACCESS_COORDINATE_LOCAL ||
+		if ((access == ACCESS_COORDINATE_LOCAL ||
+		     access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL) &&
 		    node_state(cpu_nid, N_CPU)) {
 			set_bit(target->processor_pxm, p_nodes);
 			return;
@@ -736,7 +739,8 @@  static void hmat_update_target_attrs(struct memory_target *target,
 		list_for_each_entry(initiator, &initiators, node) {
 			u32 value;
 
-			if (access == ACCESS_COORDINATE_CPU &&
+			if ((access == ACCESS_COORDINATE_CPU &&
+			     access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
 			    !initiator->has_cpu) {
 				clear_bit(initiator->processor_pxm, p_nodes);
 				continue;
@@ -775,7 +779,9 @@  static void hmat_update_generic_target(struct memory_target *target)
 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
 
 	hmat_update_target_attrs(target, p_nodes,
-				 NODE_ACCESS_CLASS_GENPORT_SINK);
+				 NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL);
+	hmat_update_target_attrs(target, p_nodes,
+				 NODE_ACCESS_CLASS_GENPORT_SINK_CPU);
 }
 
 static void hmat_register_target_initiators(struct memory_target *target)