Message ID | 20231122-vfs-eventfd-signal-v2-4-bd549b14ce0c@kernel.org (mailing list archive) |
---|---|
State | Accepted |
Commit | b7638ad0c7802ea854599ce753d0e6d20690f7e2 |
Headers | show |
Series | eventfd: simplify signal helpers | expand |
On Wed 22-11-23 13:48:25, Christian Brauner wrote: > No caller care about the return value. > > Signed-off-by: Christian Brauner <brauner@kernel.org> Yup. Feel free to add: Reviewed-by: Jan Kara <jack@suse.cz> Honza > --- > fs/eventfd.c | 40 +++++++++++++++------------------------- > include/linux/eventfd.h | 16 +++++++--------- > 2 files changed, 22 insertions(+), 34 deletions(-) > > diff --git a/fs/eventfd.c b/fs/eventfd.c > index a9a6de920fb4..13be2fb7fc96 100644 > --- a/fs/eventfd.c > +++ b/fs/eventfd.c > @@ -43,10 +43,19 @@ struct eventfd_ctx { > int id; > }; > > -__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) > +/** > + * eventfd_signal - Adds @n to the eventfd counter. > + * @ctx: [in] Pointer to the eventfd context. > + * @mask: [in] poll mask > + * > + * This function is supposed to be called by the kernel in paths that do not > + * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX > + * value, and we signal this as overflow condition by returning a EPOLLERR > + * to poll(2). > + */ > +void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) > { > unsigned long flags; > - __u64 n = 1; > > /* > * Deadlock or stack overflow issues can happen if we recurse here > @@ -57,37 +66,18 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) > * safe context. > */ > if (WARN_ON_ONCE(current->in_eventfd)) > - return 0; > + return; > > spin_lock_irqsave(&ctx->wqh.lock, flags); > current->in_eventfd = 1; > - if (ULLONG_MAX - ctx->count < n) > - n = ULLONG_MAX - ctx->count; > - ctx->count += n; > + if (ctx->count < ULLONG_MAX) > + ctx->count++; > if (waitqueue_active(&ctx->wqh)) > wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); > current->in_eventfd = 0; > spin_unlock_irqrestore(&ctx->wqh.lock, flags); > - > - return n == 1; > -} > - > -/** > - * eventfd_signal - Adds @n to the eventfd counter. > - * @ctx: [in] Pointer to the eventfd context. > - * > - * This function is supposed to be called by the kernel in paths that do not > - * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX > - * value, and we signal this as overflow condition by returning a EPOLLERR > - * to poll(2). > - * > - * Returns the amount by which the counter was incremented. > - */ > -__u64 eventfd_signal(struct eventfd_ctx *ctx) > -{ > - return eventfd_signal_mask(ctx, 0); > } > -EXPORT_SYMBOL_GPL(eventfd_signal); > +EXPORT_SYMBOL_GPL(eventfd_signal_mask); > > static void eventfd_free_ctx(struct eventfd_ctx *ctx) > { > diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h > index 4f8aac7eb62a..fea7c4eb01d6 100644 > --- a/include/linux/eventfd.h > +++ b/include/linux/eventfd.h > @@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx); > struct file *eventfd_fget(int fd); > struct eventfd_ctx *eventfd_ctx_fdget(int fd); > struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); > -__u64 eventfd_signal(struct eventfd_ctx *ctx); > -__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask); > +void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask); > int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, > __u64 *cnt); > void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); > @@ -58,14 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) > return ERR_PTR(-ENOSYS); > } > > -static inline int eventfd_signal(struct eventfd_ctx *ctx) > +static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, unsigned mask) > { > - return -ENOSYS; > -} > - > -static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, unsigned mask) > -{ > - return -ENOSYS; > } > > static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) > @@ -91,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) > > #endif > > +static inline void eventfd_signal(struct eventfd_ctx *ctx) > +{ > + eventfd_signal_mask(ctx, 0); > +} > + > #endif /* _LINUX_EVENTFD_H */ > > > -- > 2.42.0 >
diff --git a/fs/eventfd.c b/fs/eventfd.c index a9a6de920fb4..13be2fb7fc96 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -43,10 +43,19 @@ struct eventfd_ctx { int id; }; -__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) +/** + * eventfd_signal - Adds @n to the eventfd counter. + * @ctx: [in] Pointer to the eventfd context. + * @mask: [in] poll mask + * + * This function is supposed to be called by the kernel in paths that do not + * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX + * value, and we signal this as overflow condition by returning a EPOLLERR + * to poll(2). + */ +void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) { unsigned long flags; - __u64 n = 1; /* * Deadlock or stack overflow issues can happen if we recurse here @@ -57,37 +66,18 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) * safe context. */ if (WARN_ON_ONCE(current->in_eventfd)) - return 0; + return; spin_lock_irqsave(&ctx->wqh.lock, flags); current->in_eventfd = 1; - if (ULLONG_MAX - ctx->count < n) - n = ULLONG_MAX - ctx->count; - ctx->count += n; + if (ctx->count < ULLONG_MAX) + ctx->count++; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); current->in_eventfd = 0; spin_unlock_irqrestore(&ctx->wqh.lock, flags); - - return n == 1; -} - -/** - * eventfd_signal - Adds @n to the eventfd counter. - * @ctx: [in] Pointer to the eventfd context. - * - * This function is supposed to be called by the kernel in paths that do not - * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX - * value, and we signal this as overflow condition by returning a EPOLLERR - * to poll(2). - * - * Returns the amount by which the counter was incremented. - */ -__u64 eventfd_signal(struct eventfd_ctx *ctx) -{ - return eventfd_signal_mask(ctx, 0); } -EXPORT_SYMBOL_GPL(eventfd_signal); +EXPORT_SYMBOL_GPL(eventfd_signal_mask); static void eventfd_free_ctx(struct eventfd_ctx *ctx) { diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 4f8aac7eb62a..fea7c4eb01d6 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); -__u64 eventfd_signal(struct eventfd_ctx *ctx); -__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask); +void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); @@ -58,14 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) return ERR_PTR(-ENOSYS); } -static inline int eventfd_signal(struct eventfd_ctx *ctx) +static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, unsigned mask) { - return -ENOSYS; -} - -static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, unsigned mask) -{ - return -ENOSYS; } static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) @@ -91,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) #endif +static inline void eventfd_signal(struct eventfd_ctx *ctx) +{ + eventfd_signal_mask(ctx, 0); +} + #endif /* _LINUX_EVENTFD_H */
No caller care about the return value. Signed-off-by: Christian Brauner <brauner@kernel.org> --- fs/eventfd.c | 40 +++++++++++++++------------------------- include/linux/eventfd.h | 16 +++++++--------- 2 files changed, 22 insertions(+), 34 deletions(-)