diff mbox series

[2/2] io_uring/napi: pass ktime to io_napi_adjust_timeout

Message ID 4f5b8e8eed4f53a1879e031a6712b25381adc23d.1722003776.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series improve net busy polling time conversion | expand

Commit Message

Pavel Begunkov July 26, 2024, 2:24 p.m. UTC
Pass the waiting time for __io_napi_adjust_timeout as ktime and get rid
of all timespec64 conversions. It's especially simpler since the caller
already have a ktime.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c |  6 ++++--
 io_uring/napi.c     | 14 +++-----------
 io_uring/napi.h     |  8 ++++----
 3 files changed, 11 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 2626424f5d73..3942db160f18 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2416,12 +2416,14 @@  static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 
 	if (uts) {
 		struct timespec64 ts;
+		ktime_t dt;
 
 		if (get_timespec64(&ts, uts))
 			return -EFAULT;
 
-		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
-		io_napi_adjust_timeout(ctx, &iowq, &ts);
+		dt = timespec64_to_ktime(ts);
+		iowq.timeout = ktime_add(dt, ktime_get());
+		io_napi_adjust_timeout(ctx, &iowq, dt);
 	}
 
 	if (sig) {
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 6bdb267e9c33..4fd6bb331e1e 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -282,20 +282,12 @@  int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
  * the NAPI timeout accordingly.
  */
 void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
-			      struct timespec64 *ts)
+			      ktime_t to_wait)
 {
 	ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
 
-	if (ts) {
-		struct timespec64 poll_to_ts;
-
-		poll_to_ts = ns_to_timespec64(ktime_to_ns(poll_dt));
-		if (timespec64_compare(ts, &poll_to_ts) < 0) {
-			s64 poll_to_ns = timespec64_to_ns(ts);
-			if (poll_to_ns > 0)
-				poll_dt = ns_to_ktime(poll_to_ns);
-		}
-	}
+	if (to_wait)
+		poll_dt = min(poll_dt, to_wait);
 
 	iowq->napi_busy_poll_dt = poll_dt;
 }
diff --git a/io_uring/napi.h b/io_uring/napi.h
index babbee36cd3e..88f1c21d5548 100644
--- a/io_uring/napi.h
+++ b/io_uring/napi.h
@@ -18,7 +18,7 @@  int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
 void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
 
 void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
-		struct io_wait_queue *iowq, struct timespec64 *ts);
+		struct io_wait_queue *iowq, ktime_t to_wait);
 void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
 int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
 
@@ -29,11 +29,11 @@  static inline bool io_napi(struct io_ring_ctx *ctx)
 
 static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
 					  struct io_wait_queue *iowq,
-					  struct timespec64 *ts)
+					  ktime_t to_wait)
 {
 	if (!io_napi(ctx))
 		return;
-	__io_napi_adjust_timeout(ctx, iowq, ts);
+	__io_napi_adjust_timeout(ctx, iowq, to_wait);
 }
 
 static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
@@ -88,7 +88,7 @@  static inline void io_napi_add(struct io_kiocb *req)
 }
 static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
 					  struct io_wait_queue *iowq,
-					  struct timespec64 *ts)
+					  ktime_t to_wait)
 {
 }
 static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,