diff mbox series

[v3,02/16] cacheinfo: Add helper to access any cache index for a given CPU

Message ID 20220525081416.3306043-3-sudeep.holla@arm.com (mailing list archive)
State New, archived
Headers show
Series arch_topology: Updates to add socket support and fix cluster ids | expand

Commit Message

Sudeep Holla May 25, 2022, 8:14 a.m. UTC
The cacheinfo for a given CPU at a given index is used at quite a few
places by fetching the base point for index 0 using the helper
per_cpu_cacheinfo(cpu) and offseting it by the required index.

Instead, add another helper to fetch the required pointer directly and
use it to simplify and improve readability.

Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/base/cacheinfo.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Comments

Gavin Shan June 1, 2022, 2:44 a.m. UTC | #1
Hi Sudeep,

On 5/25/22 4:14 PM, Sudeep Holla wrote:
> The cacheinfo for a given CPU at a given index is used at quite a few
> places by fetching the base point for index 0 using the helper
> per_cpu_cacheinfo(cpu) and offseting it by the required index.
> 
> Instead, add another helper to fetch the required pointer directly and
> use it to simplify and improve readability.
> 
> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> ---
>   drivers/base/cacheinfo.c | 14 +++++++-------
>   1 file changed, 7 insertions(+), 7 deletions(-)
> 

s/offseting/offsetting

It looks good to me with below nits fixed:

Reviewed-by: Gavin Shan <gshan@redhat.com>


> diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
> index b0bde272e2ae..c4547d8ac6f3 100644
> --- a/drivers/base/cacheinfo.c
> +++ b/drivers/base/cacheinfo.c
> @@ -25,6 +25,8 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
>   #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
>   #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
>   #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
> +#define per_cpu_cacheinfo_idx(cpu, idx)		\
> +				(per_cpu_cacheinfo(cpu) + (idx))
>   
>   struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
>   {
> @@ -172,7 +174,7 @@ static int cache_setup_of_node(unsigned int cpu)
>   	}
>   
>   	while (index < cache_leaves(cpu)) {
> -		this_leaf = this_cpu_ci->info_list + index;
> +		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
>   		if (this_leaf->level != 1)
>   			np = of_find_next_cache_node(np);
>   		else
> @@ -231,7 +233,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
>   	for (index = 0; index < cache_leaves(cpu); index++) {
>   		unsigned int i;
>   
> -		this_leaf = this_cpu_ci->info_list + index;
> +		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
>   		/* skip if shared_cpu_map is already populated */
>   		if (!cpumask_empty(&this_leaf->shared_cpu_map))
>   			continue;
> @@ -242,7 +244,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
>   
>   			if (i == cpu || !sib_cpu_ci->info_list)
>   				continue;/* skip if itself or no cacheinfo */
> -			sib_leaf = sib_cpu_ci->info_list + index;
> +			sib_leaf = per_cpu_cacheinfo_idx(i, index);
>   			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
>   				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
>   				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
> @@ -258,12 +260,11 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
>   
>   static void cache_shared_cpu_map_remove(unsigned int cpu)
>   {
> -	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
>   	struct cacheinfo *this_leaf, *sib_leaf;
>   	unsigned int sibling, index;
>   
>   	for (index = 0; index < cache_leaves(cpu); index++) {
> -		this_leaf = this_cpu_ci->info_list + index;
> +		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
>   		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
>   			struct cpu_cacheinfo *sib_cpu_ci;
> 

In cache_shared_cpu_map_remove(), the newly introduced macro
can be applied when the sibling CPU's cache info is fetched.

     sib_leaf = sib_cpu_ci->info_list + index;

     to

     sib_leaf = per_cpu_cacheinfo_idx(sibling, index);

   
> @@ -609,7 +610,6 @@ static int cache_add_dev(unsigned int cpu)
>   	int rc;
>   	struct device *ci_dev, *parent;
>   	struct cacheinfo *this_leaf;
> -	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
>   	const struct attribute_group **cache_groups;
>   
>   	rc = cpu_cache_sysfs_init(cpu);
> @@ -618,7 +618,7 @@ static int cache_add_dev(unsigned int cpu)
>   
>   	parent = per_cpu_cache_dev(cpu);
>   	for (i = 0; i < cache_leaves(cpu); i++) {
> -		this_leaf = this_cpu_ci->info_list + i;
> +		this_leaf = per_cpu_cacheinfo_idx(cpu, i);
>   		if (this_leaf->disable_sysfs)
>   			continue;
>   		if (this_leaf->type == CACHE_TYPE_NOCACHE)
> 

Thanks,
Gavin
Sudeep Holla June 1, 2022, 12:45 p.m. UTC | #2
On Wed, Jun 01, 2022 at 10:44:20AM +0800, Gavin Shan wrote:
> Hi Sudeep,
> 
> On 5/25/22 4:14 PM, Sudeep Holla wrote:
> > The cacheinfo for a given CPU at a given index is used at quite a few
> > places by fetching the base point for index 0 using the helper
> > per_cpu_cacheinfo(cpu) and offseting it by the required index.
> > 
> > Instead, add another helper to fetch the required pointer directly and
> > use it to simplify and improve readability.
> > 
> > Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> > ---
> >   drivers/base/cacheinfo.c | 14 +++++++-------
> >   1 file changed, 7 insertions(+), 7 deletions(-)
> > 
> 
> s/offseting/offsetting
> 
> It looks good to me with below nits fixed:
> 
> Reviewed-by: Gavin Shan <gshan@redhat.com>
> 
> 
> > diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
> > index b0bde272e2ae..c4547d8ac6f3 100644
> > --- a/drivers/base/cacheinfo.c
> > +++ b/drivers/base/cacheinfo.c
> > @@ -25,6 +25,8 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
> >   #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
> >   #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
> >   #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
> > +#define per_cpu_cacheinfo_idx(cpu, idx)		\
> > +				(per_cpu_cacheinfo(cpu) + (idx))
> >   struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
> >   {
> > @@ -172,7 +174,7 @@ static int cache_setup_of_node(unsigned int cpu)
> >   	}
> >   	while (index < cache_leaves(cpu)) {
> > -		this_leaf = this_cpu_ci->info_list + index;
> > +		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> >   		if (this_leaf->level != 1)
> >   			np = of_find_next_cache_node(np);
> >   		else
> > @@ -231,7 +233,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
> >   	for (index = 0; index < cache_leaves(cpu); index++) {
> >   		unsigned int i;
> > -		this_leaf = this_cpu_ci->info_list + index;
> > +		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> >   		/* skip if shared_cpu_map is already populated */
> >   		if (!cpumask_empty(&this_leaf->shared_cpu_map))
> >   			continue;
> > @@ -242,7 +244,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
> >   			if (i == cpu || !sib_cpu_ci->info_list)
> >   				continue;/* skip if itself or no cacheinfo */
> > -			sib_leaf = sib_cpu_ci->info_list + index;
> > +			sib_leaf = per_cpu_cacheinfo_idx(i, index);
> >   			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
> >   				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
> >   				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
> > @@ -258,12 +260,11 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
> >   static void cache_shared_cpu_map_remove(unsigned int cpu)
> >   {
> > -	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> >   	struct cacheinfo *this_leaf, *sib_leaf;
> >   	unsigned int sibling, index;
> >   	for (index = 0; index < cache_leaves(cpu); index++) {
> > -		this_leaf = this_cpu_ci->info_list + index;
> > +		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
> >   		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
> >   			struct cpu_cacheinfo *sib_cpu_ci;
> > 
> 
> In cache_shared_cpu_map_remove(), the newly introduced macro
> can be applied when the sibling CPU's cache info is fetched.
> 
>     sib_leaf = sib_cpu_ci->info_list + index;
> 
>     to
> 
>     sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
> 

Right, I clearly missed to identify this. Thanks again for the review, all
other comments are now fixed locally and pushed @[1], will post them as part
of next version.

--
Regards,
Sudeep

[1] https://git.kernel.org/sudeep.holla/h/arch_topology
diff mbox series

Patch

diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index b0bde272e2ae..c4547d8ac6f3 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -25,6 +25,8 @@  static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
 #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
 #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
 #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
+#define per_cpu_cacheinfo_idx(cpu, idx)		\
+				(per_cpu_cacheinfo(cpu) + (idx))
 
 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
 {
@@ -172,7 +174,7 @@  static int cache_setup_of_node(unsigned int cpu)
 	}
 
 	while (index < cache_leaves(cpu)) {
-		this_leaf = this_cpu_ci->info_list + index;
+		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
 		if (this_leaf->level != 1)
 			np = of_find_next_cache_node(np);
 		else
@@ -231,7 +233,7 @@  static int cache_shared_cpu_map_setup(unsigned int cpu)
 	for (index = 0; index < cache_leaves(cpu); index++) {
 		unsigned int i;
 
-		this_leaf = this_cpu_ci->info_list + index;
+		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
 		/* skip if shared_cpu_map is already populated */
 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
 			continue;
@@ -242,7 +244,7 @@  static int cache_shared_cpu_map_setup(unsigned int cpu)
 
 			if (i == cpu || !sib_cpu_ci->info_list)
 				continue;/* skip if itself or no cacheinfo */
-			sib_leaf = sib_cpu_ci->info_list + index;
+			sib_leaf = per_cpu_cacheinfo_idx(i, index);
 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
@@ -258,12 +260,11 @@  static int cache_shared_cpu_map_setup(unsigned int cpu)
 
 static void cache_shared_cpu_map_remove(unsigned int cpu)
 {
-	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 	struct cacheinfo *this_leaf, *sib_leaf;
 	unsigned int sibling, index;
 
 	for (index = 0; index < cache_leaves(cpu); index++) {
-		this_leaf = this_cpu_ci->info_list + index;
+		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
 			struct cpu_cacheinfo *sib_cpu_ci;
 
@@ -609,7 +610,6 @@  static int cache_add_dev(unsigned int cpu)
 	int rc;
 	struct device *ci_dev, *parent;
 	struct cacheinfo *this_leaf;
-	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 	const struct attribute_group **cache_groups;
 
 	rc = cpu_cache_sysfs_init(cpu);
@@ -618,7 +618,7 @@  static int cache_add_dev(unsigned int cpu)
 
 	parent = per_cpu_cache_dev(cpu);
 	for (i = 0; i < cache_leaves(cpu); i++) {
-		this_leaf = this_cpu_ci->info_list + i;
+		this_leaf = per_cpu_cacheinfo_idx(cpu, i);
 		if (this_leaf->disable_sysfs)
 			continue;
 		if (this_leaf->type == CACHE_TYPE_NOCACHE)