@@ -2468,21 +2468,13 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
ktime_t *timeout)
{
int ret;
- unsigned long check_cq;
+ if (unlikely(READ_ONCE(ctx->check_cq)))
+ return 1;
/* make sure we run task_work before checking for signals */
ret = io_run_task_work_sig(ctx);
if (ret || io_should_wake(iowq))
return ret;
-
- check_cq = READ_ONCE(ctx->check_cq);
- if (unlikely(check_cq)) {
- /* let the caller flush overflows, retry */
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- return 1;
- if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
- return -EBADR;
- }
if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
return -ETIME;
@@ -2548,13 +2540,25 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events);
do {
- if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
- finish_wait(&ctx->cq_wait, &iowq.wq);
- io_cqring_do_overflow_flush(ctx);
- }
+ unsigned long check_cq;
+
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+
+ check_cq = READ_ONCE(ctx->check_cq);
+ if (unlikely(check_cq)) {
+ /* let the caller flush overflows, retry */
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) {
+ finish_wait(&ctx->cq_wait, &iowq.wq);
+ io_cqring_do_overflow_flush(ctx);
+ }
+ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
+ ret = -EBADR;
+ break;
+ }
+ }
+
if (__io_cqring_events_user(ctx) >= min_events)
break;
cond_resched();