diff mbox

[v5,7/9] arm64: Topology, rename cluster_id

Message ID 20171201222330.18863-8-jeremy.linton@arm.com (mailing list archive)
State Deferred
Headers show

Commit Message

Jeremy Linton Dec. 1, 2017, 10:23 p.m. UTC
Lets match the name of the arm64 topology field
to the kernel macro that uses it.

Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
---
 arch/arm64/include/asm/topology.h |  4 ++--
 arch/arm64/kernel/topology.c      | 27 ++++++++++++++-------------
 2 files changed, 16 insertions(+), 15 deletions(-)

Comments

Lorenzo Pieralisi Dec. 13, 2017, 6:02 p.m. UTC | #1
[+Morten, Dietmar]

$SUBJECT should be:

arm64: topology: rename cluster_id

On Fri, Dec 01, 2017 at 04:23:28PM -0600, Jeremy Linton wrote:
> Lets match the name of the arm64 topology field
> to the kernel macro that uses it.
> 
> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
> ---
>  arch/arm64/include/asm/topology.h |  4 ++--
>  arch/arm64/kernel/topology.c      | 27 ++++++++++++++-------------
>  2 files changed, 16 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
> index c4f2d50491eb..118136268f66 100644
> --- a/arch/arm64/include/asm/topology.h
> +++ b/arch/arm64/include/asm/topology.h
> @@ -7,14 +7,14 @@
>  struct cpu_topology {
>  	int thread_id;
>  	int core_id;
> -	int cluster_id;
> +	int physical_id;

package_id ?

It has been debated before, I know. Should we keep the cluster_id too
(even if it would be 1:1 mapped to package_id - for now) ?

There is also arch/arm to take into account, again, this patch is
just renaming (as it should have named since the beginning) a
topology level but we should consider everything from a legacy
perspective.

Lorenzo

>  	cpumask_t thread_sibling;
>  	cpumask_t core_sibling;
>  };
>  
>  extern struct cpu_topology cpu_topology[NR_CPUS];
>  
> -#define topology_physical_package_id(cpu)	(cpu_topology[cpu].cluster_id)
> +#define topology_physical_package_id(cpu)	(cpu_topology[cpu].physical_id)
>  #define topology_core_id(cpu)		(cpu_topology[cpu].core_id)
>  #define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling)
>  #define topology_sibling_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling)
> diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
> index 8d48b233e6ce..74a8a5173a35 100644
> --- a/arch/arm64/kernel/topology.c
> +++ b/arch/arm64/kernel/topology.c
> @@ -51,7 +51,7 @@ static int __init get_cpu_for_node(struct device_node *node)
>  	return -1;
>  }
>  
> -static int __init parse_core(struct device_node *core, int cluster_id,
> +static int __init parse_core(struct device_node *core, int physical_id,
>  			     int core_id)
>  {
>  	char name[10];
> @@ -67,7 +67,7 @@ static int __init parse_core(struct device_node *core, int cluster_id,
>  			leaf = false;
>  			cpu = get_cpu_for_node(t);
>  			if (cpu >= 0) {
> -				cpu_topology[cpu].cluster_id = cluster_id;
> +				cpu_topology[cpu].physical_id = physical_id;
>  				cpu_topology[cpu].core_id = core_id;
>  				cpu_topology[cpu].thread_id = i;
>  			} else {
> @@ -89,7 +89,7 @@ static int __init parse_core(struct device_node *core, int cluster_id,
>  			return -EINVAL;
>  		}
>  
> -		cpu_topology[cpu].cluster_id = cluster_id;
> +		cpu_topology[cpu].physical_id = physical_id;
>  		cpu_topology[cpu].core_id = core_id;
>  	} else if (leaf) {
>  		pr_err("%pOF: Can't get CPU for leaf core\n", core);
> @@ -105,7 +105,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
>  	bool leaf = true;
>  	bool has_cores = false;
>  	struct device_node *c;
> -	static int cluster_id __initdata;
> +	static int physical_id __initdata;
>  	int core_id = 0;
>  	int i, ret;
>  
> @@ -144,7 +144,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
>  			}
>  
>  			if (leaf) {
> -				ret = parse_core(c, cluster_id, core_id++);
> +				ret = parse_core(c, physical_id, core_id++);
>  			} else {
>  				pr_err("%pOF: Non-leaf cluster with core %s\n",
>  				       cluster, name);
> @@ -162,7 +162,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
>  		pr_warn("%pOF: empty cluster\n", cluster);
>  
>  	if (leaf)
> -		cluster_id++;
> +		physical_id++;
>  
>  	return 0;
>  }
> @@ -198,7 +198,7 @@ static int __init parse_dt_topology(void)
>  	 * only mark cores described in the DT as possible.
>  	 */
>  	for_each_possible_cpu(cpu)
> -		if (cpu_topology[cpu].cluster_id == -1)
> +		if (cpu_topology[cpu].physical_id == -1)
>  			ret = -EINVAL;
>  
>  out_map:
> @@ -228,7 +228,7 @@ static void update_siblings_masks(unsigned int cpuid)
>  	for_each_possible_cpu(cpu) {
>  		cpu_topo = &cpu_topology[cpu];
>  
> -		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
> +		if (cpuid_topo->physical_id != cpu_topo->physical_id)
>  			continue;
>  
>  		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
> @@ -249,7 +249,7 @@ void store_cpu_topology(unsigned int cpuid)
>  	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
>  	u64 mpidr;
>  
> -	if (cpuid_topo->cluster_id != -1)
> +	if (cpuid_topo->physical_id != -1)
>  		goto topology_populated;
>  
>  	mpidr = read_cpuid_mpidr();
> @@ -263,19 +263,19 @@ void store_cpu_topology(unsigned int cpuid)
>  		/* Multiprocessor system : Multi-threads per core */
>  		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>  		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> -		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
> +		cpuid_topo->physical_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
>  					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
>  	} else {
>  		/* Multiprocessor system : Single-thread per core */
>  		cpuid_topo->thread_id  = -1;
>  		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> -		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
> +		cpuid_topo->physical_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
>  					 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
>  					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
>  	}
>  
>  	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
> -		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
> +		 cpuid, cpuid_topo->physical_id, cpuid_topo->core_id,
>  		 cpuid_topo->thread_id, mpidr);
>  
>  topology_populated:
> @@ -291,7 +291,7 @@ static void __init reset_cpu_topology(void)
>  
>  		cpu_topo->thread_id = -1;
>  		cpu_topo->core_id = 0;
> -		cpu_topo->cluster_id = -1;
> +		cpu_topo->physical_id = -1;
>  
>  		cpumask_clear(&cpu_topo->core_sibling);
>  		cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
> @@ -300,6 +300,7 @@ static void __init reset_cpu_topology(void)
>  	}
>  }
>  
> +
>  void __init init_cpu_topology(void)
>  {
>  	reset_cpu_topology();
> -- 
> 2.13.5
>
Jeremy Linton Dec. 15, 2017, 4:36 p.m. UTC | #2
Hi,

On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
> [+Morten, Dietmar]
> 
> $SUBJECT should be:
> 
> arm64: topology: rename cluster_id

Sure..

> 
> On Fri, Dec 01, 2017 at 04:23:28PM -0600, Jeremy Linton wrote:
>> Lets match the name of the arm64 topology field
>> to the kernel macro that uses it.
>>
>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
>> ---
>>   arch/arm64/include/asm/topology.h |  4 ++--
>>   arch/arm64/kernel/topology.c      | 27 ++++++++++++++-------------
>>   2 files changed, 16 insertions(+), 15 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
>> index c4f2d50491eb..118136268f66 100644
>> --- a/arch/arm64/include/asm/topology.h
>> +++ b/arch/arm64/include/asm/topology.h
>> @@ -7,14 +7,14 @@
>>   struct cpu_topology {
>>   	int thread_id;
>>   	int core_id;
>> -	int cluster_id;
>> +	int physical_id;
> 
> package_id ?

Given the macro is topology_physical_package_id, either makes sense to 
me. <shrug> I will change it in the next set.


> 
> It has been debated before, I know. Should we keep the cluster_id too
> (even if it would be 1:1 mapped to package_id - for now) ?

Well given that this patch replaces the patch that did that at your 
request..

I was hoping someone else would comment here, but my take at this point 
is that it doesn't really matter in a functional sense at the moment.
Like the chiplet discussion it can be the subject of a future patch 
along with the patches which tweak the scheduler to understand the split.

BTW, given that i'm OoO next week, and the following that are the 
holidays, I don't intend to repost this for a couple weeks. I don't 
think there are any issues with this set.

> 
> There is also arch/arm to take into account, again, this patch is
> just renaming (as it should have named since the beginning) a
> topology level but we should consider everything from a legacy
> perspective.
> 
> Lorenzo
> 
>>   	cpumask_t thread_sibling;
>>   	cpumask_t core_sibling;
>>   };
>>   
>>   extern struct cpu_topology cpu_topology[NR_CPUS];
>>   
>> -#define topology_physical_package_id(cpu)	(cpu_topology[cpu].cluster_id)
>> +#define topology_physical_package_id(cpu)	(cpu_topology[cpu].physical_id)
>>   #define topology_core_id(cpu)		(cpu_topology[cpu].core_id)
>>   #define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling)
>>   #define topology_sibling_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling)
>> diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
>> index 8d48b233e6ce..74a8a5173a35 100644
>> --- a/arch/arm64/kernel/topology.c
>> +++ b/arch/arm64/kernel/topology.c
>> @@ -51,7 +51,7 @@ static int __init get_cpu_for_node(struct device_node *node)
>>   	return -1;
>>   }
>>   
>> -static int __init parse_core(struct device_node *core, int cluster_id,
>> +static int __init parse_core(struct device_node *core, int physical_id,
>>   			     int core_id)
>>   {
>>   	char name[10];
>> @@ -67,7 +67,7 @@ static int __init parse_core(struct device_node *core, int cluster_id,
>>   			leaf = false;
>>   			cpu = get_cpu_for_node(t);
>>   			if (cpu >= 0) {
>> -				cpu_topology[cpu].cluster_id = cluster_id;
>> +				cpu_topology[cpu].physical_id = physical_id;
>>   				cpu_topology[cpu].core_id = core_id;
>>   				cpu_topology[cpu].thread_id = i;
>>   			} else {
>> @@ -89,7 +89,7 @@ static int __init parse_core(struct device_node *core, int cluster_id,
>>   			return -EINVAL;
>>   		}
>>   
>> -		cpu_topology[cpu].cluster_id = cluster_id;
>> +		cpu_topology[cpu].physical_id = physical_id;
>>   		cpu_topology[cpu].core_id = core_id;
>>   	} else if (leaf) {
>>   		pr_err("%pOF: Can't get CPU for leaf core\n", core);
>> @@ -105,7 +105,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
>>   	bool leaf = true;
>>   	bool has_cores = false;
>>   	struct device_node *c;
>> -	static int cluster_id __initdata;
>> +	static int physical_id __initdata;
>>   	int core_id = 0;
>>   	int i, ret;
>>   
>> @@ -144,7 +144,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
>>   			}
>>   
>>   			if (leaf) {
>> -				ret = parse_core(c, cluster_id, core_id++);
>> +				ret = parse_core(c, physical_id, core_id++);
>>   			} else {
>>   				pr_err("%pOF: Non-leaf cluster with core %s\n",
>>   				       cluster, name);
>> @@ -162,7 +162,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
>>   		pr_warn("%pOF: empty cluster\n", cluster);
>>   
>>   	if (leaf)
>> -		cluster_id++;
>> +		physical_id++;
>>   
>>   	return 0;
>>   }
>> @@ -198,7 +198,7 @@ static int __init parse_dt_topology(void)
>>   	 * only mark cores described in the DT as possible.
>>   	 */
>>   	for_each_possible_cpu(cpu)
>> -		if (cpu_topology[cpu].cluster_id == -1)
>> +		if (cpu_topology[cpu].physical_id == -1)
>>   			ret = -EINVAL;
>>   
>>   out_map:
>> @@ -228,7 +228,7 @@ static void update_siblings_masks(unsigned int cpuid)
>>   	for_each_possible_cpu(cpu) {
>>   		cpu_topo = &cpu_topology[cpu];
>>   
>> -		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
>> +		if (cpuid_topo->physical_id != cpu_topo->physical_id)
>>   			continue;
>>   
>>   		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
>> @@ -249,7 +249,7 @@ void store_cpu_topology(unsigned int cpuid)
>>   	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
>>   	u64 mpidr;
>>   
>> -	if (cpuid_topo->cluster_id != -1)
>> +	if (cpuid_topo->physical_id != -1)
>>   		goto topology_populated;
>>   
>>   	mpidr = read_cpuid_mpidr();
>> @@ -263,19 +263,19 @@ void store_cpu_topology(unsigned int cpuid)
>>   		/* Multiprocessor system : Multi-threads per core */
>>   		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>>   		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
>> -		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
>> +		cpuid_topo->physical_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
>>   					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
>>   	} else {
>>   		/* Multiprocessor system : Single-thread per core */
>>   		cpuid_topo->thread_id  = -1;
>>   		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>> -		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
>> +		cpuid_topo->physical_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
>>   					 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
>>   					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
>>   	}
>>   
>>   	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
>> -		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
>> +		 cpuid, cpuid_topo->physical_id, cpuid_topo->core_id,
>>   		 cpuid_topo->thread_id, mpidr);
>>   
>>   topology_populated:
>> @@ -291,7 +291,7 @@ static void __init reset_cpu_topology(void)
>>   
>>   		cpu_topo->thread_id = -1;
>>   		cpu_topo->core_id = 0;
>> -		cpu_topo->cluster_id = -1;
>> +		cpu_topo->physical_id = -1;
>>   
>>   		cpumask_clear(&cpu_topo->core_sibling);
>>   		cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
>> @@ -300,6 +300,7 @@ static void __init reset_cpu_topology(void)
>>   	}
>>   }
>>   
>> +
>>   void __init init_cpu_topology(void)
>>   {
>>   	reset_cpu_topology();
>> -- 
>> 2.13.5
>>
Morten Rasmussen Dec. 18, 2017, 12:42 p.m. UTC | #3
On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
> Hi,
> 
> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
> >[+Morten, Dietmar]
> >
> >$SUBJECT should be:
> >
> >arm64: topology: rename cluster_id
> 
> Sure..
> 
> >
> >On Fri, Dec 01, 2017 at 04:23:28PM -0600, Jeremy Linton wrote:
> >>Lets match the name of the arm64 topology field
> >>to the kernel macro that uses it.
> >>
> >>Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
> >>---
> >>  arch/arm64/include/asm/topology.h |  4 ++--
> >>  arch/arm64/kernel/topology.c      | 27 ++++++++++++++-------------
> >>  2 files changed, 16 insertions(+), 15 deletions(-)
> >>
> >>diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
> >>index c4f2d50491eb..118136268f66 100644
> >>--- a/arch/arm64/include/asm/topology.h
> >>+++ b/arch/arm64/include/asm/topology.h
> >>@@ -7,14 +7,14 @@
> >>  struct cpu_topology {
> >>  	int thread_id;
> >>  	int core_id;
> >>-	int cluster_id;
> >>+	int physical_id;
> >
> >package_id ?
> 
> Given the macro is topology_physical_package_id, either makes sense to me.
> <shrug> I will change it in the next set.

I would vote for package_id too. arch/arm has 'socket_id' though.

> >
> >It has been debated before, I know. Should we keep the cluster_id too
> >(even if it would be 1:1 mapped to package_id - for now) ?
> 
> Well given that this patch replaces the patch that did that at your
> request..
> 
> I was hoping someone else would comment here, but my take at this point is
> that it doesn't really matter in a functional sense at the moment.
> Like the chiplet discussion it can be the subject of a future patch along
> with the patches which tweak the scheduler to understand the split.
> 
> BTW, given that i'm OoO next week, and the following that are the holidays,
> I don't intend to repost this for a couple weeks. I don't think there are
> any issues with this set.
> 
> >
> >There is also arch/arm to take into account, again, this patch is
> >just renaming (as it should have named since the beginning) a
> >topology level but we should consider everything from a legacy
> >perspective.

arch/arm has gone for thread/core/socket for the three topology levels
it supports.

I'm not sure what short term value keeping cluster_id has? Isn't it just
about where we make the package = cluster assignment? Currently it is in
the definition of topology_physical_package_id. If we keep cluster_id
and add package_id, it gets moved into the MPIDR/DT parsing code. 

Keeping cluster_id and introducing a topology_cluster_id function could
help cleaning up some of the users of topology_physical_package_id that
currently assumes package_id == cluster_id though.

Morten
Lorenzo Pieralisi Dec. 18, 2017, 3:47 p.m. UTC | #4
On Mon, Dec 18, 2017 at 12:42:29PM +0000, Morten Rasmussen wrote:
> On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
> > Hi,
> > 
> > On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
> > >[+Morten, Dietmar]
> > >
> > >$SUBJECT should be:
> > >
> > >arm64: topology: rename cluster_id
> > 
> > Sure..
> > 
> > >
> > >On Fri, Dec 01, 2017 at 04:23:28PM -0600, Jeremy Linton wrote:
> > >>Lets match the name of the arm64 topology field
> > >>to the kernel macro that uses it.
> > >>
> > >>Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
> > >>---
> > >>  arch/arm64/include/asm/topology.h |  4 ++--
> > >>  arch/arm64/kernel/topology.c      | 27 ++++++++++++++-------------
> > >>  2 files changed, 16 insertions(+), 15 deletions(-)
> > >>
> > >>diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
> > >>index c4f2d50491eb..118136268f66 100644
> > >>--- a/arch/arm64/include/asm/topology.h
> > >>+++ b/arch/arm64/include/asm/topology.h
> > >>@@ -7,14 +7,14 @@
> > >>  struct cpu_topology {
> > >>  	int thread_id;
> > >>  	int core_id;
> > >>-	int cluster_id;
> > >>+	int physical_id;
> > >
> > >package_id ?
> > 
> > Given the macro is topology_physical_package_id, either makes sense to me.
> > <shrug> I will change it in the next set.
> 
> I would vote for package_id too. arch/arm has 'socket_id' though.
> 
> > >
> > >It has been debated before, I know. Should we keep the cluster_id too
> > >(even if it would be 1:1 mapped to package_id - for now) ?
> > 
> > Well given that this patch replaces the patch that did that at your
> > request..
> > 
> > I was hoping someone else would comment here, but my take at this point is
> > that it doesn't really matter in a functional sense at the moment.
> > Like the chiplet discussion it can be the subject of a future patch along
> > with the patches which tweak the scheduler to understand the split.
> > 
> > BTW, given that i'm OoO next week, and the following that are the holidays,
> > I don't intend to repost this for a couple weeks. I don't think there are
> > any issues with this set.
> > 
> > >
> > >There is also arch/arm to take into account, again, this patch is
> > >just renaming (as it should have named since the beginning) a
> > >topology level but we should consider everything from a legacy
> > >perspective.
> 
> arch/arm has gone for thread/core/socket for the three topology levels
> it supports.
> 
> I'm not sure what short term value keeping cluster_id has? Isn't it just
> about where we make the package = cluster assignment? Currently it is in
> the definition of topology_physical_package_id. If we keep cluster_id
> and add package_id, it gets moved into the MPIDR/DT parsing code. 
> 
> Keeping cluster_id and introducing a topology_cluster_id function could
> help cleaning up some of the users of topology_physical_package_id that
> currently assumes package_id == cluster_id though.

I think we should settle for a name (eg package_id), remove cluster_id
and convert arch/arm socket_id to the same naming used on arm64 (for
consistency - it is just a variable rename).

This leaves us with the naming "cluster" only in DT topology bindings,
which should be fine, given that "cluster" in that context is just a
"processor-container" - yes we could have chosen a better naming in
the first place but that's what it is.

We should nuke the existing users of topology_physical_package_id()
to identify clusters, I would not add another function for that purpose,
let's nip it in the bud.

Lorenzo
Morten Rasmussen Dec. 19, 2017, 9:38 a.m. UTC | #5
On Mon, Dec 18, 2017 at 03:47:03PM +0000, Lorenzo Pieralisi wrote:
> On Mon, Dec 18, 2017 at 12:42:29PM +0000, Morten Rasmussen wrote:
> > On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
> > > Hi,
> > > 
> > > On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
> > > >[+Morten, Dietmar]
> > > >
> > > >$SUBJECT should be:
> > > >
> > > >arm64: topology: rename cluster_id
> > > 
> > > Sure..
> > > 
> > > >
> > > >On Fri, Dec 01, 2017 at 04:23:28PM -0600, Jeremy Linton wrote:
> > > >>Lets match the name of the arm64 topology field
> > > >>to the kernel macro that uses it.
> > > >>
> > > >>Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
> > > >>---
> > > >>  arch/arm64/include/asm/topology.h |  4 ++--
> > > >>  arch/arm64/kernel/topology.c      | 27 ++++++++++++++-------------
> > > >>  2 files changed, 16 insertions(+), 15 deletions(-)
> > > >>
> > > >>diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
> > > >>index c4f2d50491eb..118136268f66 100644
> > > >>--- a/arch/arm64/include/asm/topology.h
> > > >>+++ b/arch/arm64/include/asm/topology.h
> > > >>@@ -7,14 +7,14 @@
> > > >>  struct cpu_topology {
> > > >>  	int thread_id;
> > > >>  	int core_id;
> > > >>-	int cluster_id;
> > > >>+	int physical_id;
> > > >
> > > >package_id ?
> > > 
> > > Given the macro is topology_physical_package_id, either makes sense to me.
> > > <shrug> I will change it in the next set.
> > 
> > I would vote for package_id too. arch/arm has 'socket_id' though.
> > 
> > > >
> > > >It has been debated before, I know. Should we keep the cluster_id too
> > > >(even if it would be 1:1 mapped to package_id - for now) ?
> > > 
> > > Well given that this patch replaces the patch that did that at your
> > > request..
> > > 
> > > I was hoping someone else would comment here, but my take at this point is
> > > that it doesn't really matter in a functional sense at the moment.
> > > Like the chiplet discussion it can be the subject of a future patch along
> > > with the patches which tweak the scheduler to understand the split.
> > > 
> > > BTW, given that i'm OoO next week, and the following that are the holidays,
> > > I don't intend to repost this for a couple weeks. I don't think there are
> > > any issues with this set.
> > > 
> > > >
> > > >There is also arch/arm to take into account, again, this patch is
> > > >just renaming (as it should have named since the beginning) a
> > > >topology level but we should consider everything from a legacy
> > > >perspective.
> > 
> > arch/arm has gone for thread/core/socket for the three topology levels
> > it supports.
> > 
> > I'm not sure what short term value keeping cluster_id has? Isn't it just
> > about where we make the package = cluster assignment? Currently it is in
> > the definition of topology_physical_package_id. If we keep cluster_id
> > and add package_id, it gets moved into the MPIDR/DT parsing code. 
> > 
> > Keeping cluster_id and introducing a topology_cluster_id function could
> > help cleaning up some of the users of topology_physical_package_id that
> > currently assumes package_id == cluster_id though.
> 
> I think we should settle for a name (eg package_id), remove cluster_id
> and convert arch/arm socket_id to the same naming used on arm64 (for
> consistency - it is just a variable rename).

Agreed. 

> This leaves us with the naming "cluster" only in DT topology bindings,
> which should be fine, given that "cluster" in that context is just a
> "processor-container" - yes we could have chosen a better naming in
> the first place but that's what it is.

I think having "clusters" in DT is fine as it represent the actual
hardware topology and uses the actual "Arm" term to describe it. The
default topology in Linux just doesn't have an equivalent topology
level, but that can be fixed. DT is however missing a notion of package.

> We should nuke the existing users of topology_physical_package_id()
> to identify clusters, I would not add another function for that purpose,
> let's nip it in the bud.

Even better if that can be pulled of.

Morten
Xiongfeng Wang Jan. 2, 2018, 2:29 a.m. UTC | #6
Hi,

On 2017/12/18 20:42, Morten Rasmussen wrote:
> On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
>> Hi,
>>
>> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
>>> [+Morten, Dietmar]
>>>
>>> $SUBJECT should be:
>>>
>>> arm64: topology: rename cluster_id
>>
[cut]
>>
>> I was hoping someone else would comment here, but my take at this point is
>> that it doesn't really matter in a functional sense at the moment.
>> Like the chiplet discussion it can be the subject of a future patch along
>> with the patches which tweak the scheduler to understand the split.
>>
>> BTW, given that i'm OoO next week, and the following that are the holidays,
>> I don't intend to repost this for a couple weeks. I don't think there are
>> any issues with this set.
>>
>>>
>>> There is also arch/arm to take into account, again, this patch is
>>> just renaming (as it should have named since the beginning) a
>>> topology level but we should consider everything from a legacy
>>> perspective.
> 
> arch/arm has gone for thread/core/socket for the three topology levels
> it supports.
> 
> I'm not sure what short term value keeping cluster_id has? Isn't it just
> about where we make the package = cluster assignment? Currently it is in
> the definition of topology_physical_package_id. If we keep cluster_id
> and add package_id, it gets moved into the MPIDR/DT parsing code. 
> 
> Keeping cluster_id and introducing a topology_cluster_id function could
> help cleaning up some of the users of topology_physical_package_id that
> currently assumes package_id == cluster_id though.

I think we still need the information describing which cores are in one cluster.
Many arm64 chips have the architecture core/cluster/socket. Cores in one cluster may
share a same L2 cache. That information can be used to build the sched_domain. If we put
cores in one cluster in one sched_domain, the performance will be better.(please see
kernel/sched/topology.c:1197, cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
sched_domain)
So I think we still need variable to record which cores are in one sched_domain for future use.

Thanks,
Xiongfeng

> 
> Morten
> 
> .
>
Morten Rasmussen Jan. 2, 2018, 11:30 a.m. UTC | #7
On Tue, Jan 02, 2018 at 10:29:35AM +0800, Xiongfeng Wang wrote:
> Hi,
> 
> On 2017/12/18 20:42, Morten Rasmussen wrote:
> > On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
> >> Hi,
> >>
> >> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
> >>> [+Morten, Dietmar]
> >>>
> >>> $SUBJECT should be:
> >>>
> >>> arm64: topology: rename cluster_id
> >>
> [cut]
> >>
> >> I was hoping someone else would comment here, but my take at this point is
> >> that it doesn't really matter in a functional sense at the moment.
> >> Like the chiplet discussion it can be the subject of a future patch along
> >> with the patches which tweak the scheduler to understand the split.
> >>
> >> BTW, given that i'm OoO next week, and the following that are the holidays,
> >> I don't intend to repost this for a couple weeks. I don't think there are
> >> any issues with this set.
> >>
> >>>
> >>> There is also arch/arm to take into account, again, this patch is
> >>> just renaming (as it should have named since the beginning) a
> >>> topology level but we should consider everything from a legacy
> >>> perspective.
> > 
> > arch/arm has gone for thread/core/socket for the three topology levels
> > it supports.
> > 
> > I'm not sure what short term value keeping cluster_id has? Isn't it just
> > about where we make the package = cluster assignment? Currently it is in
> > the definition of topology_physical_package_id. If we keep cluster_id
> > and add package_id, it gets moved into the MPIDR/DT parsing code. 
> > 
> > Keeping cluster_id and introducing a topology_cluster_id function could
> > help cleaning up some of the users of topology_physical_package_id that
> > currently assumes package_id == cluster_id though.
> 
> I think we still need the information describing which cores are in one cluster.
> Many arm64 chips have the architecture core/cluster/socket. Cores in one cluster may
> share a same L2 cache. That information can be used to build the sched_domain. If we put
> cores in one cluster in one sched_domain, the performance will be better.(please see
> kernel/sched/topology.c:1197, cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
> sched_domain)
> So I think we still need variable to record which cores are in one sched_domain for future use.

I agree that information about clusters is useful. The problem is that
we currently treat clusters as sockets (also known as dies or
physical_package_id depending on which bit of code you are looking at). We
don't handle the core/cluster/socket topology you mention as three
'levels' of grouping of cores. We currently create one sched_domain
levels for cores (in a cluster) and a parent sched_domain level of all
clusters in the system ignoring if those are in different sockets and
tell the scheduler that those clusters are all separate sockets. This
doesn't work very well for systems with many clusters and/or multiple
sockets.

How to solve that problem is a longer discussion which I'm happy to
have. This patch is just preparing to get rid of the assumption that
clusters are sockets as this is not in line with what other
architectures does and the assumptions made by the scheduler.

Morten
Sudeep Holla Jan. 3, 2018, 2:29 p.m. UTC | #8
On 02/01/18 02:29, Xiongfeng Wang wrote:
> Hi,
> 
> On 2017/12/18 20:42, Morten Rasmussen wrote:
>> On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
>>> Hi,
>>>
>>> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
>>>> [+Morten, Dietmar]
>>>>
>>>> $SUBJECT should be:
>>>>
>>>> arm64: topology: rename cluster_id
>>>
> [cut]
>>>
> I think we still need the information describing which cores are in one
> cluster. Many arm64 chips have the architecture core/cluster/socket. Cores
> in one cluster may share a same L2 cache. That information can be used to
> build the sched_domain. If we put cores in one cluster in one sched_domain,
> the performance will be better.(please see kernel/sched/topology.c:1197,
> cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
> sched_domain).

We get all the cache information from DT/ACPI PPTT(mainly topology) and now
even the geometry. So ideally, the sharing information must come from that.
Any other solution might end up in conflict if DT/PPTT and that mismatch.

> So I think we still need variable to record which cores are in one
> sched_domain for future use.

I tend to say no, at-least not as is.
Jeremy Linton Jan. 3, 2018, 5:32 p.m. UTC | #9
Hi,

On 01/03/2018 08:29 AM, Sudeep Holla wrote:
> 
> On 02/01/18 02:29, Xiongfeng Wang wrote:
>> Hi,
>>
>> On 2017/12/18 20:42, Morten Rasmussen wrote:
>>> On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
>>>> Hi,
>>>>
>>>> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
>>>>> [+Morten, Dietmar]
>>>>>
>>>>> $SUBJECT should be:
>>>>>
>>>>> arm64: topology: rename cluster_id
>>>>
>> [cut]
>>>>
>> I think we still need the information describing which cores are in one
>> cluster. Many arm64 chips have the architecture core/cluster/socket. Cores
>> in one cluster may share a same L2 cache. That information can be used to
>> build the sched_domain. If we put cores in one cluster in one sched_domain,
>> the performance will be better.(please see kernel/sched/topology.c:1197,
>> cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
>> sched_domain).
> 
> We get all the cache information from DT/ACPI PPTT(mainly topology) and now
> even the geometry. So ideally, the sharing information must come from that.
> Any other solution might end up in conflict if DT/PPTT and that mismatch.
> 
>> So I think we still need variable to record which cores are in one
>> sched_domain for future use.
> 
> I tend to say no, at-least not as is.
> 

Well, either way, with DynamiQ (and a55/a75) the cores have private 
L2's, which means that the cluster sharing is happening at what is then 
the L3 level. So, the code I had in earlier versions would have needed 
tweaks to deal with that anyway.

IMHO, if we want to detect this kind of sharing for future scheduling 
domains, it should probably be done independent of PPTT/DT/MIPDR by 
picking out shared cache levels from struct cacheinfo *. Which makes 
that change unrelated to the basic population of cacheinfo and 
cpu_topology in this patchset.
Sudeep Holla Jan. 3, 2018, 5:43 p.m. UTC | #10
On Wed, Jan 03, 2018 at 11:32:00AM -0600, Jeremy Linton wrote:
> Hi,
> 
> On 01/03/2018 08:29 AM, Sudeep Holla wrote:
> >
> >On 02/01/18 02:29, Xiongfeng Wang wrote:
> >>Hi,
> >>
> >>On 2017/12/18 20:42, Morten Rasmussen wrote:
> >>>On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
> >>>>Hi,
> >>>>
> >>>>On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
> >>>>>[+Morten, Dietmar]
> >>>>>
> >>>>>$SUBJECT should be:
> >>>>>
> >>>>>arm64: topology: rename cluster_id
> >>>>
> >>[cut]
> >>>>
> >>I think we still need the information describing which cores are in one
> >>cluster. Many arm64 chips have the architecture core/cluster/socket. Cores
> >>in one cluster may share a same L2 cache. That information can be used to
> >>build the sched_domain. If we put cores in one cluster in one sched_domain,
> >>the performance will be better.(please see kernel/sched/topology.c:1197,
> >>cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
> >>sched_domain).
> >
> >We get all the cache information from DT/ACPI PPTT(mainly topology) and now
> >even the geometry. So ideally, the sharing information must come from that.
> >Any other solution might end up in conflict if DT/PPTT and that mismatch.
> >
> >>So I think we still need variable to record which cores are in one
> >>sched_domain for future use.
> >
> >I tend to say no, at-least not as is.
> >
> 
> Well, either way, with DynamiQ (and a55/a75) the cores have private L2's,
> which means that the cluster sharing is happening at what is then the L3
> level. So, the code I had in earlier versions would have needed tweaks to
> deal with that anyway.
>

Indeed.

> IMHO, if we want to detect this kind of sharing for future scheduling
> domains, it should probably be done independent of PPTT/DT/MIPDR by picking
> out shared cache levels from struct cacheinfo *. Which makes that change
> unrelated to the basic population of cacheinfo and cpu_topology in this
> patchset.
>

Sure, that's what I meant above. i.e. we need to depend on firmware(DT/ACPI)
rather than architected way(which doesn't exist anyways). Since cacheinfo
abstracts DT/ACPI, it sounds right to use that to fetch any information
on cache topology.

--
Regards,
Sudeep
Xiongfeng Wang Jan. 4, 2018, 3:59 a.m. UTC | #11
On 2018/1/4 1:32, Jeremy Linton wrote:
> Hi,
> 
> On 01/03/2018 08:29 AM, Sudeep Holla wrote:
>>
>> On 02/01/18 02:29, Xiongfeng Wang wrote:
>>> Hi,
>>>
>>> On 2017/12/18 20:42, Morten Rasmussen wrote:
>>>> On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
>>>>> Hi,
>>>>>
>>>>> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
>>>>>> [+Morten, Dietmar]
>>>>>>
>>>>>> $SUBJECT should be:
>>>>>>
>>>>>> arm64: topology: rename cluster_id
>>>>>
>>> [cut]
>>>>>
>>> I think we still need the information describing which cores are in one
>>> cluster. Many arm64 chips have the architecture core/cluster/socket. Cores
>>> in one cluster may share a same L2 cache. That information can be used to
>>> build the sched_domain. If we put cores in one cluster in one sched_domain,
>>> the performance will be better.(please see kernel/sched/topology.c:1197,
>>> cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
>>> sched_domain).
>>
>> We get all the cache information from DT/ACPI PPTT(mainly topology) and now
>> even the geometry. So ideally, the sharing information must come from that.
>> Any other solution might end up in conflict if DT/PPTT and that mismatch.
>>
>>> So I think we still need variable to record which cores are in one
>>> sched_domain for future use.
>>
>> I tend to say no, at-least not as is.
>>
> 
> Well, either way, with DynamiQ (and a55/a75) the cores have private L2's, which means that the cluster sharing is happening at what is then the L3 level. So, the code I had in earlier versions would have needed tweaks to deal with that anyway.
> 
> IMHO, if we want to detect this kind of sharing for future scheduling domains, it should probably be done independent of PPTT/DT/MIPDR by picking out shared cache levels from struct cacheinfo *. Which makes that change unrelated to the basic population of cacheinfo and cpu_topology in this patchset.
> 
I think we need to build scheduling domains not only on the cache-sharing information,
but also some other information, such as which cores use the same cache coherent interconnect
(I don't know the detail, I just guess)

I think PPTT is used to report the cores topology, which cores are more related to each other.
They may share the same cache, or use the same CCI, or are physically near to each other.
I think we should use this information to build  MC(multi-cores) scheduling domains.

Or maybe  we can just discard the MC scheduling domain and handle this scheduling-domain-building
task to the NUMA subsystem entirely, I don't know if it is proper.

Thanks,
Xiongfeng

> 
> .
>
Xiongfeng Wang Jan. 4, 2018, 4:14 a.m. UTC | #12
On 2018/1/4 1:32, Jeremy Linton wrote:
> Hi,
> 
> On 01/03/2018 08:29 AM, Sudeep Holla wrote:
>>
>> On 02/01/18 02:29, Xiongfeng Wang wrote:
>>> Hi,
>>>
>>> On 2017/12/18 20:42, Morten Rasmussen wrote:
>>>> On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
>>>>> Hi,
>>>>>
>>>>> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
>>>>>> [+Morten, Dietmar]
>>>>>>
>>>>>> $SUBJECT should be:
>>>>>>
>>>>>> arm64: topology: rename cluster_id
>>>>>
>>> [cut]
>>>>>
>>> I think we still need the information describing which cores are in one
>>> cluster. Many arm64 chips have the architecture core/cluster/socket. Cores
>>> in one cluster may share a same L2 cache. That information can be used to
>>> build the sched_domain. If we put cores in one cluster in one sched_domain,
>>> the performance will be better.(please see kernel/sched/topology.c:1197,
>>> cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
>>> sched_domain).
>>
>> We get all the cache information from DT/ACPI PPTT(mainly topology) and now
>> even the geometry. So ideally, the sharing information must come from that.
>> Any other solution might end up in conflict if DT/PPTT and that mismatch.

Sorry, I didn't express myself clearly. There may be some misunderstanding above.
I mean that PPTT report the cores topology, such as a level of the topology tree maybe  cores in one cluster,
another level maybe cores in one package.
We not only need variable in 'struct topology' to record which cores are in one package,
but also need variable to record which cores are in one cluster.
>>
>>> So I think we still need variable to record which cores are in one
>>> sched_domain for future use.
>>
>> I tend to say no, at-least not as is.
>>
> 
> Well, either way, with DynamiQ (and a55/a75) the cores have private L2's, which means that the cluster sharing is happening at what is then the L3 level. So, the code I had in earlier versions would have needed tweaks to deal with that anyway.
> 
> IMHO, if we want to detect this kind of sharing for future scheduling domains, it should probably be done independent of PPTT/DT/MIPDR by picking out shared cache levels from struct cacheinfo *. Which makes that change unrelated to the basic population of cacheinfo and cpu_topology in this patchset.
> 
> 
> .
>
Jeremy Linton Jan. 4, 2018, 6 p.m. UTC | #13
Hi,

On 01/03/2018 09:59 PM, Xiongfeng Wang wrote:
> 
> 
> On 2018/1/4 1:32, Jeremy Linton wrote:
>> Hi,
>>
>> On 01/03/2018 08:29 AM, Sudeep Holla wrote:
>>>
>>> On 02/01/18 02:29, Xiongfeng Wang wrote:
>>>> Hi,
>>>>
>>>> On 2017/12/18 20:42, Morten Rasmussen wrote:
>>>>> On Fri, Dec 15, 2017 at 10:36:35AM -0600, Jeremy Linton wrote:
>>>>>> Hi,
>>>>>>
>>>>>> On 12/13/2017 12:02 PM, Lorenzo Pieralisi wrote:
>>>>>>> [+Morten, Dietmar]
>>>>>>>
>>>>>>> $SUBJECT should be:
>>>>>>>
>>>>>>> arm64: topology: rename cluster_id
>>>>>>
>>>> [cut]
>>>>>>
>>>> I think we still need the information describing which cores are in one
>>>> cluster. Many arm64 chips have the architecture core/cluster/socket. Cores
>>>> in one cluster may share a same L2 cache. That information can be used to
>>>> build the sched_domain. If we put cores in one cluster in one sched_domain,
>>>> the performance will be better.(please see kernel/sched/topology.c:1197,
>>>> cpu_coregroup_mask() uses 'core_sibling' to build a multi-core
>>>> sched_domain).
>>>
>>> We get all the cache information from DT/ACPI PPTT(mainly topology) and now
>>> even the geometry. So ideally, the sharing information must come from that.
>>> Any other solution might end up in conflict if DT/PPTT and that mismatch.
>>>
>>>> So I think we still need variable to record which cores are in one
>>>> sched_domain for future use.
>>>
>>> I tend to say no, at-least not as is.
>>>
>>
>> Well, either way, with DynamiQ (and a55/a75) the cores have private L2's, which means that the cluster sharing is happening at what is then the L3 level. So, the code I had in earlier versions would have needed tweaks to deal with that anyway.
>>
>> IMHO, if we want to detect this kind of sharing for future scheduling domains, it should probably be done independent of PPTT/DT/MIPDR by picking out shared cache levels from struct cacheinfo *. Which makes that change unrelated to the basic population of cacheinfo and cpu_topology in this patchset.
>>
> I think we need to build scheduling domains not only on the cache-sharing information,
> but also some other information, such as which cores use the same cache coherent interconnect
> (I don't know the detail, I just guess)
> 
> I think PPTT is used to report the cores topology, which cores are more related to each other.
> They may share the same cache, or use the same CCI, or are physically near to each other.
> I think we should use this information to build  MC(multi-cores) scheduling domains.
> 
> Or maybe  we can just discard the MC scheduling domain and handle this scheduling-domain-building
> task to the NUMA subsystem entirely, I don't know if it is proper.


For the immediate future what I would like is a way to identify where in 
the PPTT topology the NUMA domains begin (rather than assuming socket, 
which is the current plan). That allows the manufactures of systems 
(with say say MCM based topologies) to dictate at which level in the 
cpu/cache topology they want to start describing the topology with the 
SLIT/SRAT tables. I think that moves us in the direction you are 
indicating while still leaving the door open for something like a 
cluster level scheduling domain (based on cores sharing caches) or a 
split LLC domain (also based on cores sharing caches) that happens to be 
on die...
diff mbox

Patch

diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index c4f2d50491eb..118136268f66 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -7,14 +7,14 @@ 
 struct cpu_topology {
 	int thread_id;
 	int core_id;
-	int cluster_id;
+	int physical_id;
 	cpumask_t thread_sibling;
 	cpumask_t core_sibling;
 };
 
 extern struct cpu_topology cpu_topology[NR_CPUS];
 
-#define topology_physical_package_id(cpu)	(cpu_topology[cpu].cluster_id)
+#define topology_physical_package_id(cpu)	(cpu_topology[cpu].physical_id)
 #define topology_core_id(cpu)		(cpu_topology[cpu].core_id)
 #define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling)
 #define topology_sibling_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 8d48b233e6ce..74a8a5173a35 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -51,7 +51,7 @@  static int __init get_cpu_for_node(struct device_node *node)
 	return -1;
 }
 
-static int __init parse_core(struct device_node *core, int cluster_id,
+static int __init parse_core(struct device_node *core, int physical_id,
 			     int core_id)
 {
 	char name[10];
@@ -67,7 +67,7 @@  static int __init parse_core(struct device_node *core, int cluster_id,
 			leaf = false;
 			cpu = get_cpu_for_node(t);
 			if (cpu >= 0) {
-				cpu_topology[cpu].cluster_id = cluster_id;
+				cpu_topology[cpu].physical_id = physical_id;
 				cpu_topology[cpu].core_id = core_id;
 				cpu_topology[cpu].thread_id = i;
 			} else {
@@ -89,7 +89,7 @@  static int __init parse_core(struct device_node *core, int cluster_id,
 			return -EINVAL;
 		}
 
-		cpu_topology[cpu].cluster_id = cluster_id;
+		cpu_topology[cpu].physical_id = physical_id;
 		cpu_topology[cpu].core_id = core_id;
 	} else if (leaf) {
 		pr_err("%pOF: Can't get CPU for leaf core\n", core);
@@ -105,7 +105,7 @@  static int __init parse_cluster(struct device_node *cluster, int depth)
 	bool leaf = true;
 	bool has_cores = false;
 	struct device_node *c;
-	static int cluster_id __initdata;
+	static int physical_id __initdata;
 	int core_id = 0;
 	int i, ret;
 
@@ -144,7 +144,7 @@  static int __init parse_cluster(struct device_node *cluster, int depth)
 			}
 
 			if (leaf) {
-				ret = parse_core(c, cluster_id, core_id++);
+				ret = parse_core(c, physical_id, core_id++);
 			} else {
 				pr_err("%pOF: Non-leaf cluster with core %s\n",
 				       cluster, name);
@@ -162,7 +162,7 @@  static int __init parse_cluster(struct device_node *cluster, int depth)
 		pr_warn("%pOF: empty cluster\n", cluster);
 
 	if (leaf)
-		cluster_id++;
+		physical_id++;
 
 	return 0;
 }
@@ -198,7 +198,7 @@  static int __init parse_dt_topology(void)
 	 * only mark cores described in the DT as possible.
 	 */
 	for_each_possible_cpu(cpu)
-		if (cpu_topology[cpu].cluster_id == -1)
+		if (cpu_topology[cpu].physical_id == -1)
 			ret = -EINVAL;
 
 out_map:
@@ -228,7 +228,7 @@  static void update_siblings_masks(unsigned int cpuid)
 	for_each_possible_cpu(cpu) {
 		cpu_topo = &cpu_topology[cpu];
 
-		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+		if (cpuid_topo->physical_id != cpu_topo->physical_id)
 			continue;
 
 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
@@ -249,7 +249,7 @@  void store_cpu_topology(unsigned int cpuid)
 	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
 	u64 mpidr;
 
-	if (cpuid_topo->cluster_id != -1)
+	if (cpuid_topo->physical_id != -1)
 		goto topology_populated;
 
 	mpidr = read_cpuid_mpidr();
@@ -263,19 +263,19 @@  void store_cpu_topology(unsigned int cpuid)
 		/* Multiprocessor system : Multi-threads per core */
 		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
+		cpuid_topo->physical_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
 	} else {
 		/* Multiprocessor system : Single-thread per core */
 		cpuid_topo->thread_id  = -1;
 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
-		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
+		cpuid_topo->physical_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
 					 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
 	}
 
 	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
-		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
+		 cpuid, cpuid_topo->physical_id, cpuid_topo->core_id,
 		 cpuid_topo->thread_id, mpidr);
 
 topology_populated:
@@ -291,7 +291,7 @@  static void __init reset_cpu_topology(void)
 
 		cpu_topo->thread_id = -1;
 		cpu_topo->core_id = 0;
-		cpu_topo->cluster_id = -1;
+		cpu_topo->physical_id = -1;
 
 		cpumask_clear(&cpu_topo->core_sibling);
 		cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
@@ -300,6 +300,7 @@  static void __init reset_cpu_topology(void)
 	}
 }
 
+
 void __init init_cpu_topology(void)
 {
 	reset_cpu_topology();