Message ID | 20220816180727.387807-3-vschneid@redhat.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | cpumask, sched/topology: NUMA-aware CPU spreading interface | expand |
On Tue, Aug 16, 2022 at 07:07:24PM +0100, Valentin Schneider wrote: > for_each_cpu_and() is very convenient as it saves having to allocate a > temporary cpumask to store the result of cpumask_and(). The same issue > applies to cpumask_andnot() which doesn't actually need temporary storage > for iteration purposes. > > Following what has been done for for_each_cpu_and(), introduce > for_each_cpu_andnot(). > > Signed-off-by: Valentin Schneider <vschneid@redhat.com> > --- > include/linux/cpumask.h | 32 ++++++++++++++++++++++++++++++++ > lib/cpumask.c | 19 +++++++++++++++++++ > 2 files changed, 51 insertions(+) > > diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h > index fe29ac7cc469..a8b2ca160e57 100644 > --- a/include/linux/cpumask.h > +++ b/include/linux/cpumask.h > @@ -157,6 +157,13 @@ static inline unsigned int cpumask_next_and(int n, > return n+1; > } > > +static inline unsigned int cpumask_next_andnot(int n, > + const struct cpumask *srcp, > + const struct cpumask *andp) > +{ > + return n+1; > +} > + It looks like the patch is not based on top of 6.0, where UP cpumask operations were fixed. Can you please rebase? Thanks, Yury > static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, > int start, bool wrap) > { > @@ -194,6 +201,8 @@ static inline int cpumask_any_distribute(const struct cpumask *srcp) > for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) > #define for_each_cpu_and(cpu, mask1, mask2) \ > for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) > +#define for_each_cpu_andnot(cpu, mask1, mask2) \ > + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) > #else > /** > * cpumask_first - get the first cpu in a cpumask > @@ -259,6 +268,9 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) > } > > int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); > +int __pure cpumask_next_andnot(int n, > + const struct cpumask *src1p, > + const struct cpumask *src2p); > int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu); > unsigned int cpumask_local_spread(unsigned int i, int node); > int cpumask_any_and_distribute(const struct cpumask *src1p, > @@ -324,6 +336,26 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool > for ((cpu) = -1; \ > (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ > (cpu) < nr_cpu_ids;) > + > +/** > + * for_each_cpu_andnot - iterate over every cpu in one mask but not in another > + * @cpu: the (optionally unsigned) integer iterator > + * @mask1: the first cpumask pointer > + * @mask2: the second cpumask pointer > + * > + * This saves a temporary CPU mask in many places. It is equivalent to: > + * struct cpumask tmp; > + * cpumask_andnot(&tmp, &mask1, &mask2); > + * for_each_cpu(cpu, &tmp) > + * ... > + * > + * After the loop, cpu is >= nr_cpu_ids. > + */ > +#define for_each_cpu_andnot(cpu, mask1, mask2) \ > + for ((cpu) = -1; \ > + (cpu) = cpumask_next_andnot((cpu), (mask1), (mask2)), \ > + (cpu) < nr_cpu_ids;) > + > #endif /* SMP */ > > #define CPU_BITS_NONE \ > diff --git a/lib/cpumask.c b/lib/cpumask.c > index a971a82d2f43..6896ff4a08fd 100644 > --- a/lib/cpumask.c > +++ b/lib/cpumask.c > @@ -42,6 +42,25 @@ int cpumask_next_and(int n, const struct cpumask *src1p, > } > EXPORT_SYMBOL(cpumask_next_and); > > +/** > + * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p > + * @n: the cpu prior to the place to search (ie. return will be > @n) > + * @src1p: the first cpumask pointer > + * @src2p: the second cpumask pointer > + * > + * Returns >= nr_cpu_ids if no further cpus set in *src1p & ~*src2p. > + */ > +int cpumask_next_andnot(int n, const struct cpumask *src1p, > + const struct cpumask *src2p) > +{ > + /* -1 is a legal arg here. */ > + if (n != -1) > + cpumask_check(n); > + return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p), > + nr_cpumask_bits, n + 1); > +} > +EXPORT_SYMBOL(cpumask_next_andnot); > + > /** > * cpumask_any_but - return a "random" in a cpumask, but not this one. > * @mask: the cpumask to search > -- > 2.31.1
On 16/08/22 15:24, Yury Norov wrote: > On Tue, Aug 16, 2022 at 07:07:24PM +0100, Valentin Schneider wrote: >> for_each_cpu_and() is very convenient as it saves having to allocate a >> temporary cpumask to store the result of cpumask_and(). The same issue >> applies to cpumask_andnot() which doesn't actually need temporary storage >> for iteration purposes. >> >> Following what has been done for for_each_cpu_and(), introduce >> for_each_cpu_andnot(). >> >> Signed-off-by: Valentin Schneider <vschneid@redhat.com> >> --- >> include/linux/cpumask.h | 32 ++++++++++++++++++++++++++++++++ >> lib/cpumask.c | 19 +++++++++++++++++++ >> 2 files changed, 51 insertions(+) >> >> diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h >> index fe29ac7cc469..a8b2ca160e57 100644 >> --- a/include/linux/cpumask.h >> +++ b/include/linux/cpumask.h >> @@ -157,6 +157,13 @@ static inline unsigned int cpumask_next_and(int n, >> return n+1; >> } >> >> +static inline unsigned int cpumask_next_andnot(int n, >> + const struct cpumask *srcp, >> + const struct cpumask *andp) >> +{ >> + return n+1; >> +} >> + > > It looks like the patch is not based on top of 6.0, where UP cpumask > operations were fixed. Can you please rebase? > Right, this is based on tip/sched/core, I'll rebase it. Sorry about that!
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index fe29ac7cc469..a8b2ca160e57 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -157,6 +157,13 @@ static inline unsigned int cpumask_next_and(int n, return n+1; } +static inline unsigned int cpumask_next_andnot(int n, + const struct cpumask *srcp, + const struct cpumask *andp) +{ + return n+1; +} + static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) { @@ -194,6 +201,8 @@ static inline int cpumask_any_distribute(const struct cpumask *srcp) for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) #define for_each_cpu_and(cpu, mask1, mask2) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) +#define for_each_cpu_andnot(cpu, mask1, mask2) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) #else /** * cpumask_first - get the first cpu in a cpumask @@ -259,6 +268,9 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) } int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); +int __pure cpumask_next_andnot(int n, + const struct cpumask *src1p, + const struct cpumask *src2p); int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu); unsigned int cpumask_local_spread(unsigned int i, int node); int cpumask_any_and_distribute(const struct cpumask *src1p, @@ -324,6 +336,26 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool for ((cpu) = -1; \ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ (cpu) < nr_cpu_ids;) + +/** + * for_each_cpu_andnot - iterate over every cpu in one mask but not in another + * @cpu: the (optionally unsigned) integer iterator + * @mask1: the first cpumask pointer + * @mask2: the second cpumask pointer + * + * This saves a temporary CPU mask in many places. It is equivalent to: + * struct cpumask tmp; + * cpumask_andnot(&tmp, &mask1, &mask2); + * for_each_cpu(cpu, &tmp) + * ... + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_andnot(cpu, mask1, mask2) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next_andnot((cpu), (mask1), (mask2)), \ + (cpu) < nr_cpu_ids;) + #endif /* SMP */ #define CPU_BITS_NONE \ diff --git a/lib/cpumask.c b/lib/cpumask.c index a971a82d2f43..6896ff4a08fd 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -42,6 +42,25 @@ int cpumask_next_and(int n, const struct cpumask *src1p, } EXPORT_SYMBOL(cpumask_next_and); +/** + * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @src1p: the first cpumask pointer + * @src2p: the second cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set in *src1p & ~*src2p. + */ +int cpumask_next_andnot(int n, const struct cpumask *src1p, + const struct cpumask *src2p) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits, n + 1); +} +EXPORT_SYMBOL(cpumask_next_andnot); + /** * cpumask_any_but - return a "random" in a cpumask, but not this one. * @mask: the cpumask to search
for_each_cpu_and() is very convenient as it saves having to allocate a temporary cpumask to store the result of cpumask_and(). The same issue applies to cpumask_andnot() which doesn't actually need temporary storage for iteration purposes. Following what has been done for for_each_cpu_and(), introduce for_each_cpu_andnot(). Signed-off-by: Valentin Schneider <vschneid@redhat.com> --- include/linux/cpumask.h | 32 ++++++++++++++++++++++++++++++++ lib/cpumask.c | 19 +++++++++++++++++++ 2 files changed, 51 insertions(+)