diff mbox series

[16/50] sched.h: Move (spin|rwlock)_needbreak() to spinlock.h

Message ID 20231216032651.3553101-6-kent.overstreet@linux.dev (mailing list archive)
State New
Headers show
Series big header dependency cleanup targeting sched.h | expand

Commit Message

Kent Overstreet Dec. 16, 2023, 3:26 a.m. UTC
This lets us kill the dependency on spinlock.h.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
---
 include/linux/sched.h    | 31 -------------------------------
 include/linux/spinlock.h | 31 +++++++++++++++++++++++++++++++
 2 files changed, 31 insertions(+), 31 deletions(-)

Comments

Leonardo Bras Jan. 15, 2024, 8:31 p.m. UTC | #1
On Fri, Dec 15, 2023 at 10:26:15PM -0500, Kent Overstreet wrote:
> This lets us kill the dependency on spinlock.h.
> 
> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> ---
>  include/linux/sched.h    | 31 -------------------------------
>  include/linux/spinlock.h | 31 +++++++++++++++++++++++++++++++
>  2 files changed, 31 insertions(+), 31 deletions(-)
> 
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 5a5b7b122682..7501a3451a20 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -2227,37 +2227,6 @@ static inline bool preempt_model_preemptible(void)
>  	return preempt_model_full() || preempt_model_rt();
>  }
>  
> -/*
> - * Does a critical section need to be broken due to another
> - * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
> - * but a general need for low latency)
> - */
> -static inline int spin_needbreak(spinlock_t *lock)
> -{
> -#ifdef CONFIG_PREEMPTION
> -	return spin_is_contended(lock);
> -#else
> -	return 0;
> -#endif
> -}
> -
> -/*
> - * Check if a rwlock is contended.
> - * Returns non-zero if there is another task waiting on the rwlock.
> - * Returns zero if the lock is not contended or the system / underlying
> - * rwlock implementation does not support contention detection.
> - * Technically does not depend on CONFIG_PREEMPTION, but a general need
> - * for low latency.
> - */
> -static inline int rwlock_needbreak(rwlock_t *lock)
> -{
> -#ifdef CONFIG_PREEMPTION
> -	return rwlock_is_contended(lock);
> -#else
> -	return 0;
> -#endif
> -}
> -
>  static __always_inline bool need_resched(void)
>  {
>  	return unlikely(tif_need_resched());
> diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
> index 31d3d747a9db..0c71f06454d9 100644
> --- a/include/linux/spinlock.h
> +++ b/include/linux/spinlock.h
> @@ -449,6 +449,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
>  	return raw_spin_is_contended(&lock->rlock);
>  }
>  
> +/*
> + * Does a critical section need to be broken due to another
> + * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
> + * but a general need for low latency)
> + */
> +static inline int spin_needbreak(spinlock_t *lock)
> +{
> +#ifdef CONFIG_PREEMPTION
> +	return spin_is_contended(lock);
> +#else
> +	return 0;
> +#endif
> +}
> +
> +/*
> + * Check if a rwlock is contended.
> + * Returns non-zero if there is another task waiting on the rwlock.
> + * Returns zero if the lock is not contended or the system / underlying
> + * rwlock implementation does not support contention detection.
> + * Technically does not depend on CONFIG_PREEMPTION, but a general need
> + * for low latency.
> + */
> +static inline int rwlock_needbreak(rwlock_t *lock)
> +{
> +#ifdef CONFIG_PREEMPTION
> +	return rwlock_is_contended(lock);
> +#else
> +	return 0;
> +#endif
> +}
> +
>  #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
>  
>  #else  /* !CONFIG_PREEMPT_RT */
> -- 
> 2.43.0



Hello Kent,

This patch is breaking PREEMPT_RT builds, but it can be easily fixed.

I sent a patch on the fix, please take a look:
https://lore.kernel.org/all/20240115201935.2326400-1-leobras@redhat.com/

Thanks!
Leo
diff mbox series

Patch

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5a5b7b122682..7501a3451a20 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2227,37 +2227,6 @@  static inline bool preempt_model_preemptible(void)
 	return preempt_model_full() || preempt_model_rt();
 }
 
-/*
- * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
- */
-static inline int spin_needbreak(spinlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
-	return spin_is_contended(lock);
-#else
-	return 0;
-#endif
-}
-
-/*
- * Check if a rwlock is contended.
- * Returns non-zero if there is another task waiting on the rwlock.
- * Returns zero if the lock is not contended or the system / underlying
- * rwlock implementation does not support contention detection.
- * Technically does not depend on CONFIG_PREEMPTION, but a general need
- * for low latency.
- */
-static inline int rwlock_needbreak(rwlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
-	return rwlock_is_contended(lock);
-#else
-	return 0;
-#endif
-}
-
 static __always_inline bool need_resched(void)
 {
 	return unlikely(tif_need_resched());
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 31d3d747a9db..0c71f06454d9 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -449,6 +449,37 @@  static __always_inline int spin_is_contended(spinlock_t *lock)
 	return raw_spin_is_contended(&lock->rlock);
 }
 
+/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+	return spin_is_contended(lock);
+#else
+	return 0;
+#endif
+}
+
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+	return rwlock_is_contended(lock);
+#else
+	return 0;
+#endif
+}
+
 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
 
 #else  /* !CONFIG_PREEMPT_RT */