diff mbox series

[RFC,bitmap-for-next,3/4] lib/cpumask: Introduce cpumask_next_and_wrap()

Message ID 20221006122112.663119-4-vschneid@redhat.com (mailing list archive)
State New, archived
Headers show
Series lib/cpumask, blk_mq: Fix blk_mq_hctx_next_cpu() vs cpumask_check() | expand

Commit Message

Valentin Schneider Oct. 6, 2022, 12:21 p.m. UTC
This leverages the newly-introduced CPUMASK_NEXT_WRAP() macro.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 include/linux/cpumask.h | 22 ++++++++++++++++++++++
 lib/cpumask.c           | 23 +++++++++++++++++++++++
 2 files changed, 45 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 286804bfe3b7..e0b674263e57 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -272,8 +272,30 @@  unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, boo
 
 	return cpumask_first(mask);
 }
+static inline unsigned int cpumask_next_and_wrap(int n,
+						 const struct cpumask *mask1,
+						 const struct cpumask *mask2,
+						 int start, bool wrap)
+{
+	cpumask_check(start);
+	/* n is a prior cpu */
+	cpumask_check(n + 1);
+
+	/*
+	 * Return the first available CPU when wrapping, or when starting before cpu0,
+	 * since there is only one valid option.
+	 */
+	if (wrap && n >= 0)
+		return nr_cpumask_bits;
+
+	return cpumask_first_and(mask1, mask2);
+}
 #else
 unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+unsigned int __pure cpumask_next_and_wrap(int n,
+					  const struct cpumask *mask1,
+					  const struct cpumask *mask2,
+					  int start, bool wrap);
 #endif
 
 /**
diff --git a/lib/cpumask.c b/lib/cpumask.c
index f8174fa3d752..c689348df0bf 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -44,6 +44,29 @@  unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, boo
 }
 EXPORT_SYMBOL(cpumask_next_wrap);
 
+/**
+ * cpumask_next_and_wrap - Get the next CPU in a mask, starting from a given
+ *                         position and wrapping around to visit all CPUs.
+ * @n: the cpu prior to the place to search
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+unsigned int cpumask_next_and_wrap(int n,
+				   const struct cpumask *mask1,
+				   const struct cpumask *mask2,
+				   int start, bool wrap)
+{
+	return CPUMASK_NEXT_WRAP(cpumask_next_and(n, mask1, mask2), n, start, wrap);
+}
+EXPORT_SYMBOL(cpumask_next_and_wrap);
+
 /* These are not inline because of header tangles. */
 #ifdef CONFIG_CPUMASK_OFFSTACK
 /**