diff mbox

[RFCv5,16/46] sched: Allocate and initialize energy data structures

Message ID 1436293469-25707-17-git-send-email-morten.rasmussen@arm.com (mailing list archive)
State RFC
Headers show

Commit Message

Morten Rasmussen July 7, 2015, 6:23 p.m. UTC
From: Dietmar Eggemann <dietmar.eggemann@arm.com>

The per sched group sched_group_energy structure plus the related
idle_state and capacity_state arrays are allocated like the other sched
domain (sd) hierarchy data structures. This includes the freeing of
sched_group_energy structures which are not used.

Energy-aware scheduling allows that a system only has energy model data
up to a certain sd level (so called highest energy aware balancing sd
level). A check in init_sched_energy enforces that all sd's below this
sd level contain energy model data.

One problem is that the number of elements of the idle_state and the
capacity_state arrays is not fixed and has to be retrieved in
__sdt_alloc() to allocate memory for the sched_group_energy structure and
the two arrays in one chunk. The array pointers (idle_states and
cap_states) are initialized here to point to the correct place inside the
memory chunk.

The new function init_sched_energy() initializes the sched_group_energy
structure and the two arrays in case the sd topology level contains energy
information.

This patch has been tested with scheduler feature flag FORCE_SD_OVERLAP
enabled as well.

cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
 kernel/sched/core.c  | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 kernel/sched/sched.h | 33 +++++++++++++++++++
 2 files changed, 121 insertions(+), 1 deletion(-)

Comments

Peter Zijlstra Aug. 12, 2015, 10:04 a.m. UTC | #1
On Tue, Jul 07, 2015 at 07:23:59PM +0100, Morten Rasmussen wrote:
> +
> +	sge->nr_idle_states = fn(cpu)->nr_idle_states;
> +	sge->nr_cap_states = fn(cpu)->nr_cap_states;
> +	memcpy(sge->idle_states, fn(cpu)->idle_states,
> +	       sge->nr_idle_states*sizeof(struct idle_state));
> +	memcpy(sge->cap_states, fn(cpu)->cap_states,
> +	       sge->nr_cap_states*sizeof(struct capacity_state));

> +			if (fn && fn(j)) {
> +				nr_idle_states = fn(j)->nr_idle_states;
> +				nr_cap_states = fn(j)->nr_cap_states;
> +				BUG_ON(!nr_idle_states || !nr_cap_states);
> +			}

> +	for_each_cpu(i, &mask) {
> +		int y;
> +
> +		BUG_ON(fn(i)->nr_idle_states != fn(cpu)->nr_idle_states);
> +
> +		for (y = 0; y < (fn(i)->nr_idle_states); y++) {
> +			BUG_ON(fn(i)->idle_states[y].power !=
> +					fn(cpu)->idle_states[y].power);
> +		}
> +
> +		BUG_ON(fn(i)->nr_cap_states != fn(cpu)->nr_cap_states);
> +
> +		for (y = 0; y < (fn(i)->nr_cap_states); y++) {
> +			BUG_ON(fn(i)->cap_states[y].cap !=
> +					fn(cpu)->cap_states[y].cap);
> +			BUG_ON(fn(i)->cap_states[y].power !=
> +					fn(cpu)->cap_states[y].power);
> +		}
> +	}
> +}

Might it not make more sense to have:

	const struct blah *const blah = fn();

and use blah afterwards, instead of the repeated invocation of fn()?
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Zijlstra Aug. 12, 2015, 10:17 a.m. UTC | #2
On Tue, Jul 07, 2015 at 07:23:59PM +0100, Morten Rasmussen wrote:
> @@ -6647,10 +6703,24 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
>  		if (!sdd->sgc)
>  			return -ENOMEM;
>  
> +		sdd->sge = alloc_percpu(struct sched_group_energy *);
> +		if (!sdd->sge)
> +			return -ENOMEM;
> +
>  		for_each_cpu(j, cpu_map) {
>  			struct sched_domain *sd;
>  			struct sched_group *sg;
>  			struct sched_group_capacity *sgc;
> +			struct sched_group_energy *sge;
> +			sched_domain_energy_f fn = tl->energy;
> +			unsigned int nr_idle_states = 0;
> +			unsigned int nr_cap_states = 0;
> +
> +			if (fn && fn(j)) {
> +				nr_idle_states = fn(j)->nr_idle_states;
> +				nr_cap_states = fn(j)->nr_cap_states;
> +				BUG_ON(!nr_idle_states || !nr_cap_states);
> +			}
>  
>  		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
>  					GFP_KERNEL, cpu_to_node(j));
> @@ -6674,6 +6744,16 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
>  				return -ENOMEM;
>  
>  			*per_cpu_ptr(sdd->sgc, j) = sgc;
> +
> +			sge = kzalloc_node(sizeof(struct sched_group_energy) +
> +				nr_idle_states*sizeof(struct idle_state) +
> +				nr_cap_states*sizeof(struct capacity_state),
> +				GFP_KERNEL, cpu_to_node(j));
> +
> +			if (!sge)
> +				return -ENOMEM;
> +
> +			*per_cpu_ptr(sdd->sge, j) = sge;
>  		}
>  	}
>  

One more question, if fn() returns a full structure, why are we
allocating and copying the thing? Its all const read only data, right?

--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Dietmar Eggemann Aug. 12, 2015, 5:08 p.m. UTC | #3
On 12/08/15 11:04, Peter Zijlstra wrote:
> On Tue, Jul 07, 2015 at 07:23:59PM +0100, Morten Rasmussen wrote:
>> +
>> +	sge->nr_idle_states = fn(cpu)->nr_idle_states;
>> +	sge->nr_cap_states = fn(cpu)->nr_cap_states;
>> +	memcpy(sge->idle_states, fn(cpu)->idle_states,
>> +	       sge->nr_idle_states*sizeof(struct idle_state));
>> +	memcpy(sge->cap_states, fn(cpu)->cap_states,
>> +	       sge->nr_cap_states*sizeof(struct capacity_state));
> 
>> +			if (fn && fn(j)) {
>> +				nr_idle_states = fn(j)->nr_idle_states;
>> +				nr_cap_states = fn(j)->nr_cap_states;
>> +				BUG_ON(!nr_idle_states || !nr_cap_states);
>> +			}
> 
>> +	for_each_cpu(i, &mask) {
>> +		int y;
>> +
>> +		BUG_ON(fn(i)->nr_idle_states != fn(cpu)->nr_idle_states);
>> +
>> +		for (y = 0; y < (fn(i)->nr_idle_states); y++) {
>> +			BUG_ON(fn(i)->idle_states[y].power !=
>> +					fn(cpu)->idle_states[y].power);
>> +		}
>> +
>> +		BUG_ON(fn(i)->nr_cap_states != fn(cpu)->nr_cap_states);
>> +
>> +		for (y = 0; y < (fn(i)->nr_cap_states); y++) {
>> +			BUG_ON(fn(i)->cap_states[y].cap !=
>> +					fn(cpu)->cap_states[y].cap);
>> +			BUG_ON(fn(i)->cap_states[y].power !=
>> +					fn(cpu)->cap_states[y].power);
>> +		}
>> +	}
>> +}
> 
> Might it not make more sense to have:
> 
> 	const struct blah *const blah = fn();
> 
> and use blah afterwards, instead of the repeated invocation of fn()?

Absolutely! I can change this in the next release.

--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Dietmar Eggemann Aug. 12, 2015, 5:09 p.m. UTC | #4
On 12/08/15 11:17, Peter Zijlstra wrote:
> On Tue, Jul 07, 2015 at 07:23:59PM +0100, Morten Rasmussen wrote:
>> @@ -6647,10 +6703,24 @@ static int __sdt_alloc(const struct cpumask *cpu_map)

[...]

>> @@ -6674,6 +6744,16 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
>>  				return -ENOMEM;
>>  
>>  			*per_cpu_ptr(sdd->sgc, j) = sgc;
>> +
>> +			sge = kzalloc_node(sizeof(struct sched_group_energy) +
>> +				nr_idle_states*sizeof(struct idle_state) +
>> +				nr_cap_states*sizeof(struct capacity_state),
>> +				GFP_KERNEL, cpu_to_node(j));
>> +
>> +			if (!sge)
>> +				return -ENOMEM;
>> +
>> +			*per_cpu_ptr(sdd->sge, j) = sge;
>>  		}
>>  	}
>>  
> 
> One more question, if fn() returns a full structure, why are we
> allocating and copying the thing? Its all const read only data, right?
> 

Yeah, that's not strictly necessary. I could get rid of all the
allocation/copying/ and freeing code and just simply set sd->groups->sge
= fn(cpu) in init_sched_energy(). Plus delete the atomic_t ref in struct
sched_group_energy.

In this case, should I still keep the check_sched_energy_data() function
to verify that the scheduler got valid data via the struct
sched_domain_topology_level table from the arch, i.e. to make sure that
the per-cpu provided sd energy data is consistent for all cpus within
the sched group cpumask?

--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Zijlstra Aug. 12, 2015, 5:23 p.m. UTC | #5
On Wed, Aug 12, 2015 at 06:09:59PM +0100, Dietmar Eggemann wrote:
> > One more question, if fn() returns a full structure, why are we
> > allocating and copying the thing? Its all const read only data, right?
> > 
> 
> Yeah, that's not strictly necessary. I could get rid of all the
> allocation/copying/ and freeing code and just simply set sd->groups->sge
> = fn(cpu) in init_sched_energy(). Plus delete the atomic_t ref in struct
> sched_group_energy.
> 
> In this case, should I still keep the check_sched_energy_data() function
> to verify that the scheduler got valid data via the struct
> sched_domain_topology_level table from the arch, i.e. to make sure that
> the per-cpu provided sd energy data is consistent for all cpus within
> the sched group cpumask?

Oh yes very much. We want sanity checking of the data handed.
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6a06fe5..e09ded5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5731,6 +5731,9 @@  static void free_sched_groups(struct sched_group *sg, int free_sgc)
 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
 			kfree(sg->sgc);
 
+		if (free_sgc && atomic_dec_and_test(&sg->sge->ref))
+			kfree(sg->sge);
+
 		kfree(sg);
 		sg = tmp;
 	} while (sg != first);
@@ -5748,6 +5751,7 @@  static void free_sched_domain(struct rcu_head *rcu)
 		free_sched_groups(sd->groups, 1);
 	} else if (atomic_dec_and_test(&sd->groups->ref)) {
 		kfree(sd->groups->sgc);
+		kfree(sd->groups->sge);
 		kfree(sd->groups);
 	}
 	kfree(sd);
@@ -5965,6 +5969,8 @@  build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 		 */
 		sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
 
+		sg->sge = *per_cpu_ptr(sdd->sge, i);
+
 		/*
 		 * Make sure the first group of this domain contains the
 		 * canonical balance cpu. Otherwise the sched_domain iteration
@@ -6003,6 +6009,7 @@  static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
 		*sg = *per_cpu_ptr(sdd->sg, cpu);
 		(*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
 		atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
+		(*sg)->sge = *per_cpu_ptr(sdd->sge, cpu);
 	}
 
 	return cpu;
@@ -6092,6 +6099,52 @@  static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 	atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
 }
 
+static void init_sched_energy(int cpu, struct sched_domain *sd,
+			      struct sched_domain_topology_level *tl)
+{
+	struct sched_group *sg = sd->groups;
+	struct sched_group_energy *sge = sg->sge;
+	sched_domain_energy_f fn = tl->energy;
+	struct cpumask *mask = sched_group_cpus(sg);
+
+	if (fn && sd->child && !sd->child->groups->sge) {
+		pr_err("BUG: EAS setup broken for CPU%d\n", cpu);
+#ifdef CONFIG_SCHED_DEBUG
+		pr_err("     energy data on %s but not on %s domain\n",
+			sd->name, sd->child->name);
+#endif
+		return;
+	}
+
+	if (cpu != group_balance_cpu(sg))
+		return;
+
+	if (!fn || !fn(cpu)) {
+		sg->sge = NULL;
+		return;
+	}
+
+	atomic_set(&sg->sge->ref, 1); /* for claim_allocations */
+
+	if (cpumask_weight(mask) > 1)
+		check_sched_energy_data(cpu, fn, mask);
+
+	sge->nr_idle_states = fn(cpu)->nr_idle_states;
+	sge->nr_cap_states = fn(cpu)->nr_cap_states;
+	sge->idle_states = (struct idle_state *)
+			   ((void *)&sge->cap_states +
+			    sizeof(sge->cap_states));
+	sge->cap_states = (struct capacity_state *)
+			  ((void *)&sge->cap_states +
+			   sizeof(sge->cap_states) +
+			   sge->nr_idle_states *
+			   sizeof(struct idle_state));
+	memcpy(sge->idle_states, fn(cpu)->idle_states,
+	       sge->nr_idle_states*sizeof(struct idle_state));
+	memcpy(sge->cap_states, fn(cpu)->cap_states,
+	       sge->nr_cap_states*sizeof(struct capacity_state));
+}
+
 /*
  * Initializers for schedule domains
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -6182,6 +6235,9 @@  static void claim_allocations(int cpu, struct sched_domain *sd)
 
 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
+
+	if (atomic_read(&(*per_cpu_ptr(sdd->sge, cpu))->ref))
+		*per_cpu_ptr(sdd->sge, cpu) = NULL;
 }
 
 #ifdef CONFIG_NUMA
@@ -6647,10 +6703,24 @@  static int __sdt_alloc(const struct cpumask *cpu_map)
 		if (!sdd->sgc)
 			return -ENOMEM;
 
+		sdd->sge = alloc_percpu(struct sched_group_energy *);
+		if (!sdd->sge)
+			return -ENOMEM;
+
 		for_each_cpu(j, cpu_map) {
 			struct sched_domain *sd;
 			struct sched_group *sg;
 			struct sched_group_capacity *sgc;
+			struct sched_group_energy *sge;
+			sched_domain_energy_f fn = tl->energy;
+			unsigned int nr_idle_states = 0;
+			unsigned int nr_cap_states = 0;
+
+			if (fn && fn(j)) {
+				nr_idle_states = fn(j)->nr_idle_states;
+				nr_cap_states = fn(j)->nr_cap_states;
+				BUG_ON(!nr_idle_states || !nr_cap_states);
+			}
 
 		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
 					GFP_KERNEL, cpu_to_node(j));
@@ -6674,6 +6744,16 @@  static int __sdt_alloc(const struct cpumask *cpu_map)
 				return -ENOMEM;
 
 			*per_cpu_ptr(sdd->sgc, j) = sgc;
+
+			sge = kzalloc_node(sizeof(struct sched_group_energy) +
+				nr_idle_states*sizeof(struct idle_state) +
+				nr_cap_states*sizeof(struct capacity_state),
+				GFP_KERNEL, cpu_to_node(j));
+
+			if (!sge)
+				return -ENOMEM;
+
+			*per_cpu_ptr(sdd->sge, j) = sge;
 		}
 	}
 
@@ -6702,6 +6782,8 @@  static void __sdt_free(const struct cpumask *cpu_map)
 				kfree(*per_cpu_ptr(sdd->sg, j));
 			if (sdd->sgc)
 				kfree(*per_cpu_ptr(sdd->sgc, j));
+			if (sdd->sge)
+				kfree(*per_cpu_ptr(sdd->sge, j));
 		}
 		free_percpu(sdd->sd);
 		sdd->sd = NULL;
@@ -6709,6 +6791,8 @@  static void __sdt_free(const struct cpumask *cpu_map)
 		sdd->sg = NULL;
 		free_percpu(sdd->sgc);
 		sdd->sgc = NULL;
+		free_percpu(sdd->sge);
+		sdd->sge = NULL;
 	}
 }
 
@@ -6794,10 +6878,13 @@  static int build_sched_domains(const struct cpumask *cpu_map,
 
 	/* Calculate CPU capacity for physical packages and nodes */
 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
+		struct sched_domain_topology_level *tl = sched_domain_topology;
+
 		if (!cpumask_test_cpu(i, cpu_map))
 			continue;
 
-		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+			init_sched_energy(i, sd, tl);
 			claim_allocations(i, sd);
 			init_sched_groups_capacity(i, sd);
 		}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7b687c6..b9d7057 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -890,6 +890,39 @@  static inline unsigned int group_first_cpu(struct sched_group *group)
 
 extern int group_balance_cpu(struct sched_group *sg);
 
+/*
+ * Check that the per-cpu provided sd energy data is consistent for all cpus
+ * within the mask.
+ */
+static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
+					   const struct cpumask *cpumask)
+{
+	struct cpumask mask;
+	int i;
+
+	cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
+
+	for_each_cpu(i, &mask) {
+		int y;
+
+		BUG_ON(fn(i)->nr_idle_states != fn(cpu)->nr_idle_states);
+
+		for (y = 0; y < (fn(i)->nr_idle_states); y++) {
+			BUG_ON(fn(i)->idle_states[y].power !=
+					fn(cpu)->idle_states[y].power);
+		}
+
+		BUG_ON(fn(i)->nr_cap_states != fn(cpu)->nr_cap_states);
+
+		for (y = 0; y < (fn(i)->nr_cap_states); y++) {
+			BUG_ON(fn(i)->cap_states[y].cap !=
+					fn(cpu)->cap_states[y].cap);
+			BUG_ON(fn(i)->cap_states[y].power !=
+					fn(cpu)->cap_states[y].power);
+		}
+	}
+}
+
 #else
 
 static inline void sched_ttwu_pending(void) { }