Message ID | 168333151653.2290593.15815859836381468990.stgit@djiang5-mobl3 (mailing list archive) |
---|---|
State | Handled Elsewhere, archived |
Headers | show |
Series | acpi: numa: add target support for generic port to HMAT parsing | expand |
On Fri, 05 May 2023 17:05:16 -0700 Dave Jiang <dave.jiang@intel.com> wrote: > Create enums to provide named indexing for the access coordinate array. > This is in preparation for adding generic port support which will add a > third index in the array to keep the generic port attributes separate from > the memory attributes. > > Signed-off-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> > --- > drivers/acpi/numa/hmat.c | 35 ++++++++++++++++++++++++----------- > 1 file changed, 24 insertions(+), 11 deletions(-) > > diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c > index f9ff992038fa..abed728bf09d 100644 > --- a/drivers/acpi/numa/hmat.c > +++ b/drivers/acpi/numa/hmat.c > @@ -57,12 +57,18 @@ struct target_cache { > struct node_cache_attrs cache_attrs; > }; > > +enum { > + NODE_ACCESS_CLASS_0 = 0, > + NODE_ACCESS_CLASS_1, > + NODE_ACCESS_CLASS_MAX, > +}; > + > struct memory_target { > struct list_head node; > unsigned int memory_pxm; > unsigned int processor_pxm; > struct resource memregions; > - struct access_coordinate coord[2]; > + struct access_coordinate coord[NODE_ACCESS_CLASS_MAX]; > struct list_head caches; > struct node_cache_attrs cache_attrs; > bool registered; > @@ -338,10 +344,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header, > if (mem_hier == ACPI_HMAT_MEMORY) { > target = find_mem_target(targs[targ]); > if (target && target->processor_pxm == inits[init]) { > - hmat_update_target_access(target, type, value, 0); > + hmat_update_target_access(target, type, value, > + NODE_ACCESS_CLASS_0); > /* If the node has a CPU, update access 1 */ > if (node_state(pxm_to_node(inits[init]), N_CPU)) > - hmat_update_target_access(target, type, value, 1); > + hmat_update_target_access(target, type, value, > + NODE_ACCESS_CLASS_1); > } > } > } > @@ -600,10 +608,12 @@ static void hmat_register_target_initiators(struct memory_target *target) > */ > if (target->processor_pxm != PXM_INVAL) { > cpu_nid = pxm_to_node(target->processor_pxm); > - register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); > + register_memory_node_under_compute_node(mem_nid, cpu_nid, > + NODE_ACCESS_CLASS_0); > access0done = true; > if (node_state(cpu_nid, N_CPU)) { > - register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); > + register_memory_node_under_compute_node(mem_nid, cpu_nid, > + NODE_ACCESS_CLASS_1); > return; > } > } > @@ -644,12 +654,13 @@ static void hmat_register_target_initiators(struct memory_target *target) > } > if (best) > hmat_update_target_access(target, loc->hmat_loc->data_type, > - best, 0); > + best, NODE_ACCESS_CLASS_0); > } > > for_each_set_bit(i, p_nodes, MAX_NUMNODES) { > cpu_nid = pxm_to_node(i); > - register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); > + register_memory_node_under_compute_node(mem_nid, cpu_nid, > + NODE_ACCESS_CLASS_0); > } > } > > @@ -681,11 +692,13 @@ static void hmat_register_target_initiators(struct memory_target *target) > clear_bit(initiator->processor_pxm, p_nodes); > } > if (best) > - hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1); > + hmat_update_target_access(target, loc->hmat_loc->data_type, best, > + NODE_ACCESS_CLASS_1); > } > for_each_set_bit(i, p_nodes, MAX_NUMNODES) { > cpu_nid = pxm_to_node(i); > - register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); > + register_memory_node_under_compute_node(mem_nid, cpu_nid, > + NODE_ACCESS_CLASS_1); > } > } > > @@ -746,8 +759,8 @@ static void hmat_register_target(struct memory_target *target) > if (!target->registered) { > hmat_register_target_initiators(target); > hmat_register_target_cache(target); > - hmat_register_target_perf(target, 0); > - hmat_register_target_perf(target, 1); > + hmat_register_target_perf(target, NODE_ACCESS_CLASS_0); > + hmat_register_target_perf(target, NODE_ACCESS_CLASS_1); > target->registered = true; > } > mutex_unlock(&target_lock); > >
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c index f9ff992038fa..abed728bf09d 100644 --- a/drivers/acpi/numa/hmat.c +++ b/drivers/acpi/numa/hmat.c @@ -57,12 +57,18 @@ struct target_cache { struct node_cache_attrs cache_attrs; }; +enum { + NODE_ACCESS_CLASS_0 = 0, + NODE_ACCESS_CLASS_1, + NODE_ACCESS_CLASS_MAX, +}; + struct memory_target { struct list_head node; unsigned int memory_pxm; unsigned int processor_pxm; struct resource memregions; - struct access_coordinate coord[2]; + struct access_coordinate coord[NODE_ACCESS_CLASS_MAX]; struct list_head caches; struct node_cache_attrs cache_attrs; bool registered; @@ -338,10 +344,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header, if (mem_hier == ACPI_HMAT_MEMORY) { target = find_mem_target(targs[targ]); if (target && target->processor_pxm == inits[init]) { - hmat_update_target_access(target, type, value, 0); + hmat_update_target_access(target, type, value, + NODE_ACCESS_CLASS_0); /* If the node has a CPU, update access 1 */ if (node_state(pxm_to_node(inits[init]), N_CPU)) - hmat_update_target_access(target, type, value, 1); + hmat_update_target_access(target, type, value, + NODE_ACCESS_CLASS_1); } } } @@ -600,10 +608,12 @@ static void hmat_register_target_initiators(struct memory_target *target) */ if (target->processor_pxm != PXM_INVAL) { cpu_nid = pxm_to_node(target->processor_pxm); - register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); + register_memory_node_under_compute_node(mem_nid, cpu_nid, + NODE_ACCESS_CLASS_0); access0done = true; if (node_state(cpu_nid, N_CPU)) { - register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); + register_memory_node_under_compute_node(mem_nid, cpu_nid, + NODE_ACCESS_CLASS_1); return; } } @@ -644,12 +654,13 @@ static void hmat_register_target_initiators(struct memory_target *target) } if (best) hmat_update_target_access(target, loc->hmat_loc->data_type, - best, 0); + best, NODE_ACCESS_CLASS_0); } for_each_set_bit(i, p_nodes, MAX_NUMNODES) { cpu_nid = pxm_to_node(i); - register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); + register_memory_node_under_compute_node(mem_nid, cpu_nid, + NODE_ACCESS_CLASS_0); } } @@ -681,11 +692,13 @@ static void hmat_register_target_initiators(struct memory_target *target) clear_bit(initiator->processor_pxm, p_nodes); } if (best) - hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1); + hmat_update_target_access(target, loc->hmat_loc->data_type, best, + NODE_ACCESS_CLASS_1); } for_each_set_bit(i, p_nodes, MAX_NUMNODES) { cpu_nid = pxm_to_node(i); - register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); + register_memory_node_under_compute_node(mem_nid, cpu_nid, + NODE_ACCESS_CLASS_1); } } @@ -746,8 +759,8 @@ static void hmat_register_target(struct memory_target *target) if (!target->registered) { hmat_register_target_initiators(target); hmat_register_target_cache(target); - hmat_register_target_perf(target, 0); - hmat_register_target_perf(target, 1); + hmat_register_target_perf(target, NODE_ACCESS_CLASS_0); + hmat_register_target_perf(target, NODE_ACCESS_CLASS_1); target->registered = true; } mutex_unlock(&target_lock);
Create enums to provide named indexing for the access coordinate array. This is in preparation for adding generic port support which will add a third index in the array to keep the generic port attributes separate from the memory attributes. Signed-off-by: Dave Jiang <dave.jiang@intel.com> --- drivers/acpi/numa/hmat.c | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-)