diff mbox series

[RFC,53/86] sched: fixup __cond_resched_*()

Message ID 20231107215742.363031-54-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series Make the kernel preemptible | expand

Commit Message

Ankur Arora Nov. 7, 2023, 9:57 p.m. UTC
Remove the call to _cond_resched(). The rescheduling happens
implicitly when we give up the lock.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 kernel/sched/core.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 15db5fb7acc7..e1b0759ed3ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8595,12 +8595,8 @@  EXPORT_SYMBOL(_cond_resched);
 #endif
 
 /*
- * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
- * call schedule, and on return reacquire the lock.
- *
- * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
- * operations here to prevent schedule() from being called twice (once via
- * spin_unlock(), once by hand).
+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock
+ * (implicitly calling schedule), and reacquire the lock.
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
@@ -8611,7 +8607,7 @@  int __cond_resched_lock(spinlock_t *lock)
 
 	if (spin_needbreak(lock) || resched) {
 		spin_unlock(lock);
-		if (!_cond_resched())
+		if (!resched)
 			cpu_relax();
 		ret = 1;
 		spin_lock(lock);
@@ -8629,7 +8625,7 @@  int __cond_resched_rwlock_read(rwlock_t *lock)
 
 	if (rwlock_needbreak(lock) || resched) {
 		read_unlock(lock);
-		if (!_cond_resched())
+		if (!resched)
 			cpu_relax();
 		ret = 1;
 		read_lock(lock);
@@ -8647,7 +8643,7 @@  int __cond_resched_rwlock_write(rwlock_t *lock)
 
 	if (rwlock_needbreak(lock) || resched) {
 		write_unlock(lock);
-		if (!_cond_resched())
+		if (!resched)
 			cpu_relax();
 		ret = 1;
 		write_lock(lock);