diff mbox series

[3/4] Revert "sched/topology: Introduce sched_numa_hop_mask()"

Message ID 20230925020528.777578-4-yury.norov@gmail.com (mailing list archive)
State Not Applicable
Headers show
Series sched: drop for_each_numa_hop_mask() | expand

Commit Message

Yury Norov Sept. 25, 2023, 2:05 a.m. UTC
This reverts commit 9feae65845f7b16376716fe70b7d4b9bf8721848.

Now that for_each_numa_hop_mask() is reverted, revert underlying
machinery.

Signed-off-by: Yury Norov <yury.norov@gmail.com>
Signed-off-by: Yury Norov <ynorov@nvidia.com>
---
 include/linux/topology.h |  7 -------
 kernel/sched/topology.c  | 33 ---------------------------------
 2 files changed, 40 deletions(-)

Comments

Keller, Jacob E Sept. 25, 2023, 10:46 p.m. UTC | #1
On 9/24/2023 7:05 PM, Yury Norov wrote:
> This reverts commit 9feae65845f7b16376716fe70b7d4b9bf8721848.
> 
> Now that for_each_numa_hop_mask() is reverted, revert underlying
> machinery.
> 
> Signed-off-by: Yury Norov <yury.norov@gmail.com>
> Signed-off-by: Yury Norov <ynorov@nvidia.com>
> ---
>  include/linux/topology.h |  7 -------
>  kernel/sched/topology.c  | 33 ---------------------------------
>  2 files changed, 40 deletions(-)
> 


Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>

> diff --git a/include/linux/topology.h b/include/linux/topology.h
> index 344c2362755a..72f264575698 100644
> --- a/include/linux/topology.h
> +++ b/include/linux/topology.h
> @@ -247,18 +247,11 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
>  
>  #ifdef CONFIG_NUMA
>  int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
> -extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
>  #else
>  static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
>  {
>  	return cpumask_nth(cpu, cpus);
>  }
> -
> -static inline const struct cpumask *
> -sched_numa_hop_mask(unsigned int node, unsigned int hops)
> -{
> -	return ERR_PTR(-EOPNOTSUPP);
> -}
>  #endif	/* CONFIG_NUMA */
>  
>  #endif /* _LINUX_TOPOLOGY_H */
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 05a5bc678c08..3f1c09a9ef6d 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -2143,39 +2143,6 @@ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
> -
> -/**
> - * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
> - *                         @node
> - * @node: The node to count hops from.
> - * @hops: Include CPUs up to that many hops away. 0 means local node.
> - *
> - * Return: On success, a pointer to a cpumask of CPUs at most @hops away from
> - * @node, an error value otherwise.
> - *
> - * Requires rcu_lock to be held. Returned cpumask is only valid within that
> - * read-side section, copy it if required beyond that.
> - *
> - * Note that not all hops are equal in distance; see sched_init_numa() for how
> - * distances and masks are handled.
> - * Also note that this is a reflection of sched_domains_numa_masks, which may change
> - * during the lifetime of the system (offline nodes are taken out of the masks).
> - */
> -const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
> -{
> -	struct cpumask ***masks;
> -
> -	if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
> -		return ERR_PTR(-EINVAL);
> -
> -	masks = rcu_dereference(sched_domains_numa_masks);
> -	if (!masks)
> -		return ERR_PTR(-EBUSY);
> -
> -	return masks[hops][node];
> -}
> -EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
> -
>  #endif /* CONFIG_NUMA */
>  
>  static int __sdt_alloc(const struct cpumask *cpu_map)
diff mbox series

Patch

diff --git a/include/linux/topology.h b/include/linux/topology.h
index 344c2362755a..72f264575698 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -247,18 +247,11 @@  static inline const struct cpumask *cpu_cpu_mask(int cpu)
 
 #ifdef CONFIG_NUMA
 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
-extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
 #else
 static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
 {
 	return cpumask_nth(cpu, cpus);
 }
-
-static inline const struct cpumask *
-sched_numa_hop_mask(unsigned int node, unsigned int hops)
-{
-	return ERR_PTR(-EOPNOTSUPP);
-}
 #endif	/* CONFIG_NUMA */
 
 #endif /* _LINUX_TOPOLOGY_H */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 05a5bc678c08..3f1c09a9ef6d 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2143,39 +2143,6 @@  int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
 	return ret;
 }
 EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
-
-/**
- * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
- *                         @node
- * @node: The node to count hops from.
- * @hops: Include CPUs up to that many hops away. 0 means local node.
- *
- * Return: On success, a pointer to a cpumask of CPUs at most @hops away from
- * @node, an error value otherwise.
- *
- * Requires rcu_lock to be held. Returned cpumask is only valid within that
- * read-side section, copy it if required beyond that.
- *
- * Note that not all hops are equal in distance; see sched_init_numa() for how
- * distances and masks are handled.
- * Also note that this is a reflection of sched_domains_numa_masks, which may change
- * during the lifetime of the system (offline nodes are taken out of the masks).
- */
-const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
-{
-	struct cpumask ***masks;
-
-	if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
-		return ERR_PTR(-EINVAL);
-
-	masks = rcu_dereference(sched_domains_numa_masks);
-	if (!masks)
-		return ERR_PTR(-EBUSY);
-
-	return masks[hops][node];
-}
-EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
-
 #endif /* CONFIG_NUMA */
 
 static int __sdt_alloc(const struct cpumask *cpu_map)