@@ -1698,7 +1698,8 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
- if (iocb->ki_eventfd && eventfd_signal_count()) {
+ if (iocb->ki_eventfd &&
+ eventfd_signal_count(iocb->ki_eventfd)) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
schedule_work(&req->work);
@@ -25,6 +25,8 @@
#include <linux/idr.h>
#include <linux/uio.h>
+#define EVENTFD_WAKE_DEPTH 0
+
DEFINE_PER_CPU(int, eventfd_wake_count);
static DEFINE_IDA(eventfd_ida);
@@ -42,9 +44,17 @@ struct eventfd_ctx {
*/
__u64 count;
unsigned int flags;
+ int __percpu *wake_count;
int id;
};
+bool eventfd_signal_count(struct eventfd_ctx *ctx)
+{
+ return (this_cpu_read(*ctx->wake_count) ||
+ this_cpu_read(eventfd_wake_count) > EVENTFD_WAKE_DEPTH);
+}
+EXPORT_SYMBOL_GPL(eventfd_signal_count);
+
/**
* eventfd_signal - Adds @n to the eventfd counter.
* @ctx: [in] Pointer to the eventfd context.
@@ -71,17 +81,19 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
* it returns true, the eventfd_signal() call should be deferred to a
* safe context.
*/
- if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+ if (WARN_ON_ONCE(eventfd_signal_count(ctx)))
return 0;
spin_lock_irqsave(&ctx->wqh.lock, flags);
this_cpu_inc(eventfd_wake_count);
+ this_cpu_inc(*ctx->wake_count);
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
this_cpu_dec(eventfd_wake_count);
+ this_cpu_dec(*ctx->wake_count);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;
@@ -92,6 +104,7 @@ static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
if (ctx->id >= 0)
ida_simple_remove(&eventfd_ida, ctx->id);
+ free_percpu(ctx->wake_count);
kfree(ctx);
}
@@ -423,6 +436,11 @@ static int do_eventfd(unsigned int count, int flags)
kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
+ ctx->wake_count = alloc_percpu(int);
+ if (!ctx->wake_count) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
ctx->count = count;
ctx->flags = flags;
ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
@@ -45,10 +45,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
DECLARE_PER_CPU(int, eventfd_wake_count);
-static inline bool eventfd_signal_count(void)
-{
- return this_cpu_read(eventfd_wake_count);
-}
+bool eventfd_signal_count(struct eventfd_ctx *ctx);
#else /* CONFIG_EVENTFD */
Now we have a global percpu counter to limit the recursion depth of eventfd_signal(). This can avoid deadlock or stack overflow. But in stack overflow case, it should be OK to increase the recursion depth if needed. So we add a percpu counter in eventfd_ctx to limit the recursion depth for deadlock case. Then it could be fine to increase the global percpu counter later. Signed-off-by: Xie Yongji <xieyongji@bytedance.com> --- fs/aio.c | 3 ++- fs/eventfd.c | 20 +++++++++++++++++++- include/linux/eventfd.h | 5 +---- 3 files changed, 22 insertions(+), 6 deletions(-)