@@ -9,30 +9,30 @@ typedef struct {
#if IS_ENABLED(CONFIG_KVM_INTEL)
u8 kvm_cpu_l1tf_flush_l1d;
#endif
- unsigned int __nmi_count; /* arch dependent */
+ unsigned long __nmi_count; /* arch dependent */
#ifdef CONFIG_X86_LOCAL_APIC
- unsigned int apic_timer_irqs; /* arch dependent */
- unsigned int irq_spurious_count;
- unsigned int icr_read_retry_count;
+ unsigned long apic_timer_irqs; /* arch dependent */
+ unsigned long irq_spurious_count;
+ unsigned long icr_read_retry_count;
#endif
#ifdef CONFIG_HAVE_KVM
unsigned int kvm_posted_intr_ipis;
unsigned int kvm_posted_intr_wakeup_ipis;
unsigned int kvm_posted_intr_nested_ipis;
#endif
- unsigned int x86_platform_ipis; /* arch dependent */
- unsigned int apic_perf_irqs;
- unsigned int apic_irq_work_irqs;
+ unsigned long x86_platform_ipis; /* arch dependent */
+ unsigned long apic_perf_irqs;
+ unsigned long apic_irq_work_irqs;
#ifdef CONFIG_SMP
- unsigned int irq_resched_count;
- unsigned int irq_call_count;
+ unsigned long irq_resched_count;
+ unsigned long irq_call_count;
#endif
unsigned int irq_tlb_count;
#ifdef CONFIG_X86_THERMAL_VECTOR
- unsigned int irq_thermal_count;
+ unsigned long irq_thermal_count;
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
- unsigned int irq_threshold_count;
+ unsigned long irq_threshold_count;
#endif
#ifdef CONFIG_X86_MCE_AMD
unsigned int irq_deferred_error_count;
@@ -255,8 +255,8 @@ bool mce_is_memory_error(struct mce *m);
bool mce_is_correctable(struct mce *m);
int mce_usable_address(struct mce *m);
-DECLARE_PER_CPU(unsigned, mce_exception_count);
-DECLARE_PER_CPU(unsigned, mce_poll_count);
+DECLARE_PER_CPU(unsigned long, mce_exception_count);
+DECLARE_PER_CPU(unsigned long, mce_poll_count);
typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
@@ -63,7 +63,7 @@ static DEFINE_MUTEX(mce_sysfs_mutex);
#define SPINUNIT 100 /* 100ns */
-DEFINE_PER_CPU(unsigned, mce_exception_count);
+DEFINE_PER_CPU(unsigned long, mce_exception_count);
DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
@@ -718,7 +718,7 @@ static void mce_read_aux(struct mce *m, int i)
}
}
-DEFINE_PER_CPU(unsigned, mce_poll_count);
+DEFINE_PER_CPU(unsigned long, mce_poll_count);
/*
* Poll for corrected events or events that happened before reset.
@@ -62,45 +62,45 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s: ", prec, "NMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->__nmi_count));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->__nmi_count));
seq_puts(p, " Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->apic_timer_irqs));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->apic_timer_irqs));
seq_puts(p, " Local timer interrupts\n");
seq_printf(p, "%*s: ", prec, "SPU");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_spurious_count));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->irq_spurious_count));
seq_puts(p, " Spurious interrupts\n");
seq_printf(p, "%*s: ", prec, "PMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->apic_perf_irqs));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->apic_perf_irqs));
seq_puts(p, " Performance monitoring interrupts\n");
seq_printf(p, "%*s: ", prec, "IWI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->apic_irq_work_irqs));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->apic_irq_work_irqs));
seq_puts(p, " IRQ work interrupts\n");
seq_printf(p, "%*s: ", prec, "RTR");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->icr_read_retry_count));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->icr_read_retry_count));
seq_puts(p, " APIC ICR read retries\n");
if (x86_platform_ipi_callback) {
seq_printf(p, "%*s: ", prec, "PLT");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->x86_platform_ipis));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->x86_platform_ipis));
seq_puts(p, " Platform interrupts\n");
}
#endif
#ifdef CONFIG_SMP
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_resched_count));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->irq_resched_count));
seq_puts(p, " Rescheduling interrupts\n");
seq_printf(p, "%*s: ", prec, "CAL");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_call_count));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->irq_call_count));
seq_puts(p, " Function call interrupts\n");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
@@ -110,13 +110,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
#ifdef CONFIG_X86_THERMAL_VECTOR
seq_printf(p, "%*s: ", prec, "TRM");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_thermal_count));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->irq_thermal_count));
seq_puts(p, " Thermal event interrupts\n");
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
seq_printf(p, "%*s: ", prec, "THR");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_threshold_count));
+ seq_printf(p, "%10lu ", READ_ONCE(irq_stats(j)->irq_threshold_count));
seq_puts(p, " Threshold APIC interrupts\n");
#endif
#ifdef CONFIG_X86_MCE_AMD
@@ -128,11 +128,11 @@ int arch_show_interrupts(struct seq_file *p, int prec)
#ifdef CONFIG_X86_MCE
seq_printf(p, "%*s: ", prec, "MCE");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(per_cpu(mce_exception_count, j)));
+ seq_printf(p, "%10lu ", READ_ONCE(per_cpu(mce_exception_count, j)));
seq_puts(p, " Machine check exceptions\n");
seq_printf(p, "%*s: ", prec, "MCP");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", READ_ONCE(per_cpu(mce_poll_count, j)));
+ seq_printf(p, "%10lu ", READ_ONCE(per_cpu(mce_poll_count, j)));
seq_puts(p, " Machine check polls\n");
#endif
#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
Similarly to PowerPC in the previous patches, bump the counters from irq_cpustat_t to a wider type for better wrap around handling on x86_64. Not all of them, just the ones reported via procfs. Also grab mce_exception_count and mce_poll_count which are reported along with counters from irq_cpustat_t. Signed-off-by: Alexei Lozovsky <me@ilammy.net> --- arch/x86/include/asm/hardirq.h | 22 +++++++++++----------- arch/x86/include/asm/mce.h | 4 ++-- arch/x86/kernel/cpu/mce/core.c | 4 ++-- arch/x86/kernel/irq.c | 26 +++++++++++++------------- 4 files changed, 28 insertions(+), 28 deletions(-) So, about the "not all of them" part. What about all other counters that are present in irq_cpustat_t, printed out by arch_show_interrupts() for /proc/interrupts, but are not included in arch_irq_stat_cpu() and don't show up in total counter of /proc/stat? These ones: kvm_posted_intr_ipis kvm_posted_intr_wakeup_ipis kvm_posted_intr_nested_ipis irq_tlb_count irq_deferred_error_count irq_hv_callback_count irq_hv_reenlightenment_count hyperv_stimer0_count I have a feeling they should be included into the total interrupt counter sum (and be widened to unsigned long as well). Should they?