@@ -2341,15 +2341,6 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
return 0;
}
-static bool current_pending_io(void)
-{
- struct io_uring_task *tctx = current->io_uring;
-
- if (!tctx)
- return false;
- return percpu_counter_read_positive(&tctx->inflight);
-}
-
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
@@ -2367,19 +2358,11 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (unlikely(io_should_wake(iowq)))
return 0;
- /*
- * Mark us as being in io_wait if we have pending requests, so cpufreq
- * can take into account that the task is waiting for IO - turns out
- * to be important for low QD IO.
- */
- if (current_pending_io())
- current->in_iowait = 1;
ret = 0;
if (iowq->timeout == KTIME_MAX)
schedule();
else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
ret = -ETIME;
- current->in_iowait = 0;
return ret;
}
Setting in_iowait was introduced in commit 8a796565cec3 ("io_uring: Use io_schedule* in cqring wait") to tackle a perf regression that was caused by menu taking iowait into account for synchronous IO and thus not selecting deeper states like in the io_uring counterpart. That behaviour is gone, so the workaround can be removed. Signed-off-by: Christian Loehle <christian.loehle@arm.com> --- io_uring/io_uring.c | 17 ----------------- 1 file changed, 17 deletions(-)