@@ -860,7 +860,7 @@ bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32
io_cq_lock(ctx);
__io_flush_post_cqes(ctx);
/* no need to flush - flush is deferred */
- spin_unlock(&ctx->completion_lock);
+ io_cq_unlock(ctx);
}
/* For defered completions this is not as strict as it is otherwise,
@@ -93,6 +93,11 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx)
spin_lock(&ctx->completion_lock);
}
+static inline void io_cq_unlock(struct io_ring_ctx *ctx)
+{
+ spin_unlock(&ctx->completion_lock);
+}
+
void io_cq_unlock_post(struct io_ring_ctx *ctx);
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
There is one newly added place when we lock ring with io_cq_lock() but unlocking is hand coded calling spin_unlock directly. It's ugly and troublesome in the long run. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/io_uring.c | 2 +- io_uring/io_uring.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-)