diff mbox series

[2/8] epoll: simplify signal handling

Message ID 20201106231635.3528496-3-soheil.kdev@gmail.com (mailing list archive)
State New, archived
Headers show
Series simplify ep_poll | expand

Commit Message

Soheil Hassas Yeganeh Nov. 6, 2020, 11:16 p.m. UTC
From: Soheil Hassas Yeganeh <soheil@google.com>

Check signals before locking ep->lock, and immediately return
-EINTR if there is any signal pending.

This saves a few loads, stores, and branches from the hot path
and simplifies the loop structure for follow up patches.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Khazhismel Kumykov <khazhy@google.com>
---
 fs/eventpoll.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 117b1c395ae4..80c560dad6a3 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1818,7 +1818,7 @@  static inline struct timespec64 ep_set_mstimeout(long ms)
 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 		   int maxevents, long timeout)
 {
-	int res = 0, eavail, timed_out = 0;
+	int res, eavail, timed_out = 0;
 	u64 slack = 0;
 	wait_queue_entry_t wait;
 	ktime_t expires, *to = NULL;
@@ -1865,6 +1865,9 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 	ep_reset_busy_poll_napi_id(ep);
 
 	do {
+		if (signal_pending(current))
+			return -EINTR;
+
 		/*
 		 * Internally init_wait() uses autoremove_wake_function(),
 		 * thus wait entry is removed from the wait queue on each
@@ -1894,15 +1897,12 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 		 * important.
 		 */
 		eavail = ep_events_available(ep);
-		if (!eavail) {
-			if (signal_pending(current))
-				res = -EINTR;
-			else
-				__add_wait_queue_exclusive(&ep->wq, &wait);
-		}
+		if (!eavail)
+			__add_wait_queue_exclusive(&ep->wq, &wait);
+
 		write_unlock_irq(&ep->lock);
 
-		if (!eavail && !res)
+		if (!eavail)
 			timed_out = !schedule_hrtimeout_range(to, slack,
 							      HRTIMER_MODE_ABS);
 
@@ -1938,14 +1938,14 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 		 * finding more events available and fetching
 		 * repeatedly.
 		 */
-		res = -EINTR;
+		return -EINTR;
 	}
 	/*
 	 * Try to transfer events to user space. In case we get 0 events and
 	 * there's still timeout left over, we go trying again in search of
 	 * more luck.
 	 */
-	if (!res && eavail &&
+	if (eavail &&
 	    !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
 		goto fetch_events;