diff mbox series

[v2,2/8] sched/topology: introduce sched_numa_find_next_cpu()

Message ID 20230420051946.7463-3-yury.norov@gmail.com (mailing list archive)
State Superseded
Headers show
Series sched/topology: add for_each_numa_cpu() macro | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Yury Norov April 20, 2023, 5:19 a.m. UTC
The function searches for the next CPU in a given cpumask according to
NUMA topology, so that it traverses cpus per-hop.

If the CPU is the last cpu in a given hop, sched_numa_find_next_cpu()
switches to the next hop, and picks the first CPU from there, excluding
those already traversed.

Signed-off-by: Yury Norov <yury.norov@gmail.com>
---
 include/linux/topology.h |  7 +++++++
 kernel/sched/topology.c  | 39 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 46 insertions(+)

Comments

Valentin Schneider April 25, 2023, 9:54 a.m. UTC | #1
On 19/04/23 22:19, Yury Norov wrote:
> +/*
> + * sched_numa_find_next_cpu() - given the NUMA topology, find the next cpu
> + * cpumask: cpumask to find a cpu from
> + * cpu: current cpu
> + * node: local node
> + * hop: (in/out) indicates distance order of current CPU to a local node
> + *
> + * The function searches for next cpu at a given NUMA distance, indicated
> + * by hop, and if nothing found, tries to find CPUs at a greater distance,
> + * starting from the beginning.
> + *
> + * Return: cpu, or >= nr_cpu_ids when nothing found.
> + */
> +int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsigned int *hop)
> +{
> +	unsigned long *cur, *prev;
> +	struct cpumask ***masks;
> +	unsigned int ret;
> +
> +	if (*hop >= sched_domains_numa_levels)
> +		return nr_cpu_ids;
> +
> +	masks = rcu_dereference(sched_domains_numa_masks);
> +	cur = cpumask_bits(masks[*hop][node]);
> +	if (*hop == 0)
> +		ret = find_next_and_bit(cpumask_bits(cpus), cur, nr_cpu_ids, cpu);
> +	else {
> +		prev = cpumask_bits(masks[*hop - 1][node]);
> +		ret = find_next_and_andnot_bit(cpumask_bits(cpus), cur, prev, nr_cpu_ids, cpu);
> +	}
> +
> +	if (ret < nr_cpu_ids)
> +		return ret;
> +
> +	*hop += 1;
> +	return sched_numa_find_next_cpu(cpus, 0, node, hop);

sched_domains_numa_levels is a fairly small number, so the recursion depth
isn't something we really need to worry about - still, the iterative
variant of this is fairly straightforward to get to:

diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index e850f16c003ae..4c9a9e48fef6d 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2151,23 +2151,27 @@ int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsi
 	struct cpumask ***masks;
 	unsigned int ret;
 
-	if (*hop >= sched_domains_numa_levels)
-		return nr_cpu_ids;
+	/*
+	 * Reset @cpu to 0 when increasing @hop, since CPU numbering has no
+	 * relationship with NUMA distance: a search at @hop+1 may yield CPUs
+	 * of lower ID than previously seen!
+	 */
+	for (; *hop >= sched_domains_numa_levels; *hop += 1, cpu = 0) {
+		masks = rcu_dereference(sched_domains_numa_masks);
+		cur = cpumask_bits(masks[*hop][node]);
+
+		if (*hop == 0) {
+			ret = find_next_and_bit(cpumask_bits(cpus), cur, nr_cpu_ids, cpu);
+		} else {
+			prev = cpumask_bits(masks[*hop - 1][node]);
+			ret = find_next_and_andnot_bit(cpumask_bits(cpus), cur, prev, nr_cpu_ids, cpu);
+		}
 
-	masks = rcu_dereference(sched_domains_numa_masks);
-	cur = cpumask_bits(masks[*hop][node]);
-	if (*hop == 0)
-		ret = find_next_and_bit(cpumask_bits(cpus), cur, nr_cpu_ids, cpu);
-	else {
-		prev = cpumask_bits(masks[*hop - 1][node]);
-		ret = find_next_and_andnot_bit(cpumask_bits(cpus), cur, prev, nr_cpu_ids, cpu);
+		if (ret < nr_cpu_ids)
+			return ret;
 	}
 
-	if (ret < nr_cpu_ids)
-		return ret;
-
-	*hop += 1;
-	return sched_numa_find_next_cpu(cpus, 0, node, hop);
+	return nr_cpu_ids;
 }
 EXPORT_SYMBOL_GPL(sched_numa_find_next_cpu);
Yury Norov April 26, 2023, 5:26 a.m. UTC | #2
On Tue, Apr 25, 2023 at 10:54:56AM +0100, Valentin Schneider wrote:
> On 19/04/23 22:19, Yury Norov wrote:
> > +/*
> > + * sched_numa_find_next_cpu() - given the NUMA topology, find the next cpu
> > + * cpumask: cpumask to find a cpu from
> > + * cpu: current cpu
> > + * node: local node
> > + * hop: (in/out) indicates distance order of current CPU to a local node
> > + *
> > + * The function searches for next cpu at a given NUMA distance, indicated
> > + * by hop, and if nothing found, tries to find CPUs at a greater distance,
> > + * starting from the beginning.
> > + *
> > + * Return: cpu, or >= nr_cpu_ids when nothing found.
> > + */
> > +int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsigned int *hop)
> > +{
> > +	unsigned long *cur, *prev;
> > +	struct cpumask ***masks;
> > +	unsigned int ret;
> > +
> > +	if (*hop >= sched_domains_numa_levels)
> > +		return nr_cpu_ids;
> > +
> > +	masks = rcu_dereference(sched_domains_numa_masks);
> > +	cur = cpumask_bits(masks[*hop][node]);
> > +	if (*hop == 0)
> > +		ret = find_next_and_bit(cpumask_bits(cpus), cur, nr_cpu_ids, cpu);
> > +	else {
> > +		prev = cpumask_bits(masks[*hop - 1][node]);
> > +		ret = find_next_and_andnot_bit(cpumask_bits(cpus), cur, prev, nr_cpu_ids, cpu);
> > +	}
> > +
> > +	if (ret < nr_cpu_ids)
> > +		return ret;
> > +
> > +	*hop += 1;
> > +	return sched_numa_find_next_cpu(cpus, 0, node, hop);
> 
> sched_domains_numa_levels is a fairly small number, so the recursion depth
> isn't something we really need to worry about - still, the iterative
> variant of this is fairly straightforward to get to:

This is a tail recursion. Compiler normally converts it into the loop just
as well. At least, my GCC does.
Valentin Schneider April 26, 2023, 9:17 a.m. UTC | #3
On 25/04/23 22:26, Yury Norov wrote:
> On Tue, Apr 25, 2023 at 10:54:56AM +0100, Valentin Schneider wrote:
>> On 19/04/23 22:19, Yury Norov wrote:
>> > +/*
>> > + * sched_numa_find_next_cpu() - given the NUMA topology, find the next cpu
>> > + * cpumask: cpumask to find a cpu from
>> > + * cpu: current cpu
>> > + * node: local node
>> > + * hop: (in/out) indicates distance order of current CPU to a local node
>> > + *
>> > + * The function searches for next cpu at a given NUMA distance, indicated
>> > + * by hop, and if nothing found, tries to find CPUs at a greater distance,
>> > + * starting from the beginning.
>> > + *
>> > + * Return: cpu, or >= nr_cpu_ids when nothing found.
>> > + */
>> > +int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsigned int *hop)
>> > +{
>> > +	unsigned long *cur, *prev;
>> > +	struct cpumask ***masks;
>> > +	unsigned int ret;
>> > +
>> > +	if (*hop >= sched_domains_numa_levels)
>> > +		return nr_cpu_ids;
>> > +
>> > +	masks = rcu_dereference(sched_domains_numa_masks);
>> > +	cur = cpumask_bits(masks[*hop][node]);
>> > +	if (*hop == 0)
>> > +		ret = find_next_and_bit(cpumask_bits(cpus), cur, nr_cpu_ids, cpu);
>> > +	else {
>> > +		prev = cpumask_bits(masks[*hop - 1][node]);
>> > +		ret = find_next_and_andnot_bit(cpumask_bits(cpus), cur, prev, nr_cpu_ids, cpu);
>> > +	}
>> > +
>> > +	if (ret < nr_cpu_ids)
>> > +		return ret;
>> > +
>> > +	*hop += 1;
>> > +	return sched_numa_find_next_cpu(cpus, 0, node, hop);
>> 
>> sched_domains_numa_levels is a fairly small number, so the recursion depth
>> isn't something we really need to worry about - still, the iterative
>> variant of this is fairly straightforward to get to:
>
> This is a tail recursion. Compiler normally converts it into the loop just
> as well. At least, my GCC does.

I'd hope so in 2023! I still prefer the iterative approach as I find it
more readable, but I'm not /too/ strongly attached to it.
diff mbox series

Patch

diff --git a/include/linux/topology.h b/include/linux/topology.h
index fea32377f7c7..13209095d6e2 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -247,6 +247,7 @@  static inline const struct cpumask *cpu_cpu_mask(int cpu)
 
 #ifdef CONFIG_NUMA
 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
+int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsigned int *hop);
 extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
 #else
 static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
@@ -254,6 +255,12 @@  static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, i
 	return cpumask_nth(cpu, cpus);
 }
 
+static __always_inline
+int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsigned int *hop)
+{
+	return find_next_bit(cpumask_bits(cpus), small_cpumask_bits, cpu);
+}
+
 static inline const struct cpumask *
 sched_numa_hop_mask(unsigned int node, unsigned int hops)
 {
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 051aaf65c749..fc163e4181e6 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2130,6 +2130,45 @@  int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
 }
 EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
 
+/*
+ * sched_numa_find_next_cpu() - given the NUMA topology, find the next cpu
+ * cpumask: cpumask to find a cpu from
+ * cpu: current cpu
+ * node: local node
+ * hop: (in/out) indicates distance order of current CPU to a local node
+ *
+ * The function searches for next cpu at a given NUMA distance, indicated
+ * by hop, and if nothing found, tries to find CPUs at a greater distance,
+ * starting from the beginning.
+ *
+ * Return: cpu, or >= nr_cpu_ids when nothing found.
+ */
+int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsigned int *hop)
+{
+	unsigned long *cur, *prev;
+	struct cpumask ***masks;
+	unsigned int ret;
+
+	if (*hop >= sched_domains_numa_levels)
+		return nr_cpu_ids;
+
+	masks = rcu_dereference(sched_domains_numa_masks);
+	cur = cpumask_bits(masks[*hop][node]);
+	if (*hop == 0)
+		ret = find_next_and_bit(cpumask_bits(cpus), cur, nr_cpu_ids, cpu);
+	else {
+		prev = cpumask_bits(masks[*hop - 1][node]);
+		ret = find_next_and_andnot_bit(cpumask_bits(cpus), cur, prev, nr_cpu_ids, cpu);
+	}
+
+	if (ret < nr_cpu_ids)
+		return ret;
+
+	*hop += 1;
+	return sched_numa_find_next_cpu(cpus, 0, node, hop);
+}
+EXPORT_SYMBOL_GPL(sched_numa_find_next_cpu);
+
 /**
  * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
  *                         @node