diff mbox series

[v2,1/2] timer: introduce upper bound timers

Message ID 20220324091500.2638745-2-asavkov@redhat.com (mailing list archive)
State Deferred
Headers show
Series Upper bound mode for kernel timers | expand

Checks

Context Check Description
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 17747 this patch: 17747
netdev/cc_maintainers fail 2 maintainers not CCed: sboyd@kernel.org john.stultz@linaro.org
netdev/build_clang success Errors and warnings before: 3889 this patch: 3889
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 17362 this patch: 17362
netdev/checkpatch warning WARNING: line length of 101 exceeds 80 columns WARNING: line length of 81 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline success Was 0 now: 0
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Artem Savkov March 24, 2022, 9:14 a.m. UTC
Current timer wheel implementation is optimized for performance and
energy usage but lacks in precision. This, normally, is not a problem as
most timers that use timer wheel are used for timeouts and thus rarely
expire, instead they often get canceled or modified before expiration.
Even when they don't, expiring a bit late is not an issue for timeout
timers.

TCP keepalive timer is a special case, it's aim is to prevent timeouts,
so triggering earlier rather than later is desired behavior. In a
reported case the user had a 3600s keepalive timer for preventing firewall
disconnects (on a 3650s interval). They observed keepalive timers coming
in up to four minutes late, causing unexpected disconnects.

This commit adds TIMER_UPPER_BOUND flag which allows creation of timers
that would expire at most at specified time or earlier.

This was previously discussed here:
https://lore.kernel.org/all/20210302001054.4qgrvnkltvkgikzr@treble/T/#u

Suggested-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Artem Savkov <asavkov@redhat.com>
---
 include/linux/timer.h |  6 +++++-
 kernel/time/timer.c   | 36 ++++++++++++++++++++++--------------
 2 files changed, 27 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/timer.h b/include/linux/timer.h
index fda13c9d1256..4b2456501be6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -60,6 +60,9 @@  struct timer_list {
  * function is invoked via mod_timer() or add_timer().  If the timer
  * should be placed on a particular CPU, then add_timer_on() has to be
  * used.
+ *
+ * @TIMER_UPPER_BOUND: Unlike normal timers which trigger at specified time or
+ * later, upper bound timer will expire at most at specified time or earlier.
  */
 #define TIMER_CPUMASK		0x0003FFFF
 #define TIMER_MIGRATING		0x00040000
@@ -67,7 +70,8 @@  struct timer_list {
 #define TIMER_DEFERRABLE	0x00080000
 #define TIMER_PINNED		0x00100000
 #define TIMER_IRQSAFE		0x00200000
-#define TIMER_INIT_FLAGS	(TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
+#define TIMER_UPPER_BOUND	0x00400000
+#define TIMER_INIT_FLAGS	(TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE | TIMER_UPPER_BOUND)
 #define TIMER_ARRAYSHIFT	22
 #define TIMER_ARRAYMASK		0xFFC00000
 
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 85f1021ad459..f4965644d728 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -491,7 +491,7 @@  static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
  * time.
  */
 static inline unsigned calc_index(unsigned long expires, unsigned lvl,
-				  unsigned long *bucket_expiry)
+				  unsigned long *bucket_expiry, bool upper_bound)
 {
 
 	/*
@@ -501,34 +501,39 @@  static inline unsigned calc_index(unsigned long expires, unsigned lvl,
 	 * - Truncation of the expiry time in the outer wheel levels
 	 *
 	 * Round up with level granularity to prevent this.
+	 * Do not perform round up in case of upper bound timer.
 	 */
-	expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+	if (upper_bound)
+		expires = expires >> LVL_SHIFT(lvl);
+	else
+		expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+
 	*bucket_expiry = expires << LVL_SHIFT(lvl);
 	return LVL_OFFS(lvl) + (expires & LVL_MASK);
 }
 
 static int calc_wheel_index(unsigned long expires, unsigned long clk,
-			    unsigned long *bucket_expiry)
+			    unsigned long *bucket_expiry, bool upper_bound)
 {
 	unsigned long delta = expires - clk;
 	unsigned int idx;
 
 	if (delta < LVL_START(1)) {
-		idx = calc_index(expires, 0, bucket_expiry);
+		idx = calc_index(expires, 0, bucket_expiry, upper_bound);
 	} else if (delta < LVL_START(2)) {
-		idx = calc_index(expires, 1, bucket_expiry);
+		idx = calc_index(expires, 1, bucket_expiry, upper_bound);
 	} else if (delta < LVL_START(3)) {
-		idx = calc_index(expires, 2, bucket_expiry);
+		idx = calc_index(expires, 2, bucket_expiry, upper_bound);
 	} else if (delta < LVL_START(4)) {
-		idx = calc_index(expires, 3, bucket_expiry);
+		idx = calc_index(expires, 3, bucket_expiry, upper_bound);
 	} else if (delta < LVL_START(5)) {
-		idx = calc_index(expires, 4, bucket_expiry);
+		idx = calc_index(expires, 4, bucket_expiry, upper_bound);
 	} else if (delta < LVL_START(6)) {
-		idx = calc_index(expires, 5, bucket_expiry);
+		idx = calc_index(expires, 5, bucket_expiry, upper_bound);
 	} else if (delta < LVL_START(7)) {
-		idx = calc_index(expires, 6, bucket_expiry);
+		idx = calc_index(expires, 6, bucket_expiry, upper_bound);
 	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
-		idx = calc_index(expires, 7, bucket_expiry);
+		idx = calc_index(expires, 7, bucket_expiry, upper_bound);
 	} else if ((long) delta < 0) {
 		idx = clk & LVL_MASK;
 		*bucket_expiry = clk;
@@ -540,7 +545,8 @@  static int calc_wheel_index(unsigned long expires, unsigned long clk,
 		if (delta >= WHEEL_TIMEOUT_CUTOFF)
 			expires = clk + WHEEL_TIMEOUT_MAX;
 
-		idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
+		idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry,
+				 upper_bound);
 	}
 	return idx;
 }
@@ -607,7 +613,8 @@  static void internal_add_timer(struct timer_base *base, struct timer_list *timer
 	unsigned long bucket_expiry;
 	unsigned int idx;
 
-	idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
+	idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry,
+			       timer->flags & TIMER_UPPER_BOUND);
 	enqueue_timer(base, timer, idx, bucket_expiry);
 }
 
@@ -1000,7 +1007,8 @@  __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
 		}
 
 		clk = base->clk;
-		idx = calc_wheel_index(expires, clk, &bucket_expiry);
+		idx = calc_wheel_index(expires, clk, &bucket_expiry,
+				       timer->flags & TIMER_UPPER_BOUND);
 
 		/*
 		 * Retrieve and compare the array index of the pending