diff mbox series

[v2,02/12] genirq: Use unsigned long for IRQ counters

Message ID 20210915175848.162260-3-me@ilammy.net (mailing list archive)
State New, archived
Headers show
Series proc/stat: Maintain monotonicity of "intr" and "softirq" | expand

Commit Message

Alexei Lozovsky Sept. 15, 2021, 5:58 p.m. UTC
Widen the counters to unsigned long. In fact, some counters already
use unsigned long and we merely make the other ones agree. that's
one of the reasons for the change: using the same type everywhere
means that counter sum wraps around in a consistent manner,
allowing accurate accounting of the total number of interrupts.

Another aspect is simply widening the type on architectures where
this is possible (i.e., 64-bit ones). 32-bit architectures will keep
32-bit counters, 64-bit archs will be able to use 64-bit counters.
Since 64-bit counters have such huge range, it's unlikely that they
will wrap around in the first place.

Signed-off-by: Alexei Lozovsky <me@ilammy.net>
---
 fs/proc/softirqs.c          | 2 +-
 fs/proc/stat.c              | 2 +-
 include/linux/kernel_stat.h | 6 +++---
 kernel/rcu/tree.h           | 2 +-
 kernel/rcu/tree_stall.h     | 4 ++--
 5 files changed, 8 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index 12901dcf57e2..2bc05c23d22e 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -19,7 +19,7 @@  static int show_softirqs(struct seq_file *p, void *v)
 	for (i = 0; i < NR_SOFTIRQS; i++) {
 		seq_printf(p, "%12s:", softirq_to_name[i]);
 		for_each_possible_cpu(j)
-			seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
+			seq_printf(p, " %10lu", kstat_softirqs_cpu(i, j));
 		seq_putc(p, '\n');
 	}
 	return 0;
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 6561a06ef905..d9d89d7a959c 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -142,7 +142,7 @@  static int show_stat(struct seq_file *p, void *v)
 		sum		+= arch_irq_stat_cpu(i);
 
 		for (j = 0; j < NR_SOFTIRQS; j++) {
-			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
+			unsigned long softirq_stat = kstat_softirqs_cpu(j, i);
 
 			per_softirq_sums[j] += softirq_stat;
 			sum_softirq += softirq_stat;
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 90f2e2faf999..41541ce67dfa 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -37,7 +37,7 @@  struct kernel_cpustat {
 
 struct kernel_stat {
 	unsigned long irqs_sum;
-	unsigned int softirqs[NR_SOFTIRQS];
+	unsigned long softirqs[NR_SOFTIRQS];
 };
 
 DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -59,7 +59,7 @@  static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
 	__this_cpu_inc(kstat.softirqs[irq]);
 }
 
-static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
+static inline unsigned long kstat_softirqs_cpu(unsigned int irq, int cpu)
 {
 	return READ_ONCE(kstat_cpu(cpu).softirqs[irq]);
 }
@@ -72,7 +72,7 @@  extern unsigned int kstat_irqs_usr(unsigned int irq);
 /*
  * Number of interrupts per cpu, since bootup
  */
-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
 {
 	return READ_ONCE(kstat_cpu(cpu).irqs_sum);
 }
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 305cf6aeb408..94e4f022f995 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -242,7 +242,7 @@  struct rcu_data {
 	char rcu_cpu_has_work;
 
 	/* 7) Diagnostic data, including RCU CPU stall warnings. */
-	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
+	unsigned long softirq_snap;	/* Snapshot of softirq activity. */
 	/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
 	struct irq_work rcu_iw;		/* Check for non-irq activity. */
 	bool rcu_iw_pending;		/* Is ->rcu_iw pending? */
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 6c76988cc019..35e67275b5b4 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -435,7 +435,7 @@  static void print_cpu_stall_info(int cpu)
 	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
 	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
 			rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
-	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
+	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%lu/%lu fqs=%ld %s%s\n",
 	       cpu,
 	       "O."[!!cpu_online(cpu)],
 	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -510,7 +510,7 @@  static void rcu_check_gp_kthread_expired_fqs_timer(void)
 		       data_race(rcu_state.gp_flags),
 		       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
 		       gpk->__state);
-		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
+		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%lu\n",
 		       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
 	}
 }