@@ -22,6 +22,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
@@ -97,6 +97,7 @@ SECTIONS
_text = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
@@ -98,6 +98,7 @@ SECTIONS
IRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.warning)
@@ -111,6 +111,7 @@ SECTIONS
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
HYPERVISOR_TEXT
KPROBES_TEXT
@@ -122,6 +122,7 @@ SECTIONS
ENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
HYPERVISOR_TEXT
@@ -52,6 +52,7 @@ SECTIONS
KPROBES_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
@@ -33,6 +33,7 @@ SECTIONS
#ifndef CONFIG_SCHEDULE_L1
SCHED_TEXT
#endif
+ CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@@ -70,6 +70,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@@ -43,6 +43,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.text.__*)
@@ -63,6 +63,7 @@ SECTIONS
*(.text..tlbmiss)
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
#ifdef CONFIG_DEBUG_INFO
INIT_TEXT
@@ -29,6 +29,7 @@ SECTIONS
_stext = . ;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
#if defined(CONFIG_ROMKERNEL)
*(.int_redirect)
@@ -50,6 +50,7 @@ SECTIONS
_text = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
@@ -46,6 +46,7 @@ SECTIONS {
__end_ivt_text = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.linkonce.t*)
@@ -31,6 +31,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
@@ -45,6 +45,7 @@ SECTIONS {
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
. = ALIGN(16);
@@ -16,6 +16,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
@@ -16,6 +16,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
@@ -21,6 +21,7 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -33,6 +33,7 @@ SECTIONS {
EXIT_TEXT
EXIT_CALL
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -55,6 +55,7 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -30,6 +30,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
@@ -37,6 +37,7 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@@ -47,6 +47,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -69,6 +69,7 @@ SECTIONS
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -52,6 +52,7 @@ SECTIONS
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text .fixup __ftr_alt_* .ref.text)
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -35,6 +35,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -40,6 +40,7 @@ SECTIONS
_text = .; /* Text and read-only data */
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.text.*)
@@ -36,6 +36,7 @@ SECTIONS
TEXT_TEXT
EXTRA_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -49,6 +49,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -50,7 +50,7 @@ STD_ENTRY(smp_nap)
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
* as a result return to the function that called _cpu_idle().
*/
-STD_ENTRY(_cpu_idle)
+STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
movei r1, 1
IRQ_ENABLE_LOAD(r2, r3)
mtspr INTERRUPT_CRITICAL_SECTION, r1
@@ -42,6 +42,7 @@ SECTIONS
.text : AT (ADDR(.text) - LOAD_OFFSET) {
HEAD_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -68,6 +68,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.stub .text.* .gnu.linkonce.t.*)
@@ -28,6 +28,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
/* .gnu.warning sections are handled specially by elf32.em. */
@@ -37,6 +37,7 @@ SECTIONS
.text : { /* Real text segment */
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
@@ -4,6 +4,10 @@
#include <asm/processor-flags.h>
#ifndef __ASSEMBLY__
+
+/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
+#define __cpuidle __attribute__((__section__(".cpuidle.text")))
+
/*
* Interrupt control:
*/
@@ -44,12 +48,12 @@ static inline void native_irq_enable(void)
asm volatile("sti": : :"memory");
}
-static inline void native_safe_halt(void)
+static inline __cpuidle void native_safe_halt(void)
{
asm volatile("sti; hlt": : :"memory");
}
-static inline void native_halt(void)
+static inline __cpuidle void native_halt(void)
{
asm volatile("hlt": : :"memory");
}
@@ -86,7 +90,7 @@ static inline notrace void arch_local_irq_enable(void)
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
-static inline void arch_safe_halt(void)
+static inline __cpuidle void arch_safe_halt(void)
{
native_safe_halt();
}
@@ -95,7 +99,7 @@ static inline void arch_safe_halt(void)
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
-static inline void halt(void)
+static inline __cpuidle void halt(void)
{
native_halt();
}
@@ -152,7 +152,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
-void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
+void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
struct cstate_entry *percpu_entry;
@@ -301,7 +301,7 @@ void arch_cpu_idle(void)
/*
* We use this if we don't have any better idle routine..
*/
-void default_idle(void)
+void __cpuidle default_idle(void)
{
trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt();
@@ -416,7 +416,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
* with interrupts enabled and no flags, which is backwards compatible with the
* original MWAIT implementation.
*/
-static void mwait_idle(void)
+static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
@@ -97,6 +97,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
ENTRY_TEXT
@@ -93,6 +93,9 @@ SECTIONS
VMLINUX_SYMBOL(__sched_text_start) = .;
*(.sched.literal .sched.text)
VMLINUX_SYMBOL(__sched_text_end) = .;
+ VMLINUX_SYMBOL(__cpuidle_text_start) = .;
+ *(.cpuidle.literal .cpuidle.text)
+ VMLINUX_SYMBOL(__cpuidle_text_end) = .;
VMLINUX_SYMBOL(__lock_text_start) = .;
*(.spinlock.literal .spinlock.text)
VMLINUX_SYMBOL(__lock_text_end) = .;
@@ -31,6 +31,7 @@
#include <linux/sched.h> /* need_resched() */
#include <linux/tick.h>
#include <linux/cpuidle.h>
+#include <linux/cpu.h>
#include <acpi/processor.h>
/*
@@ -115,7 +116,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
* Callers should disable interrupts before the call and enable
* interrupts after return.
*/
-static void acpi_safe_halt(void)
+static void __cpuidle acpi_safe_halt(void)
{
if (!tif_need_resched()) {
safe_halt();
@@ -645,7 +646,7 @@ static int acpi_idle_bm_check(void)
*
* Caller disables interrupt before call and enables interrupt after return.
*/
-static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
+static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
@@ -14,6 +14,7 @@
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/tick.h>
+#include <linux/cpu.h>
#include "cpuidle.h"
@@ -178,8 +179,8 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
}
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static int __cpuidle poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
local_irq_enable();
if (!current_set_polling_and_test()) {
@@ -863,8 +863,8 @@ static struct cpuidle_state dnv_cstates[] = {
*
* Must be called under local_irq_disable().
*/
-static int intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
@@ -454,6 +454,12 @@
*(.spinlock.text) \
VMLINUX_SYMBOL(__lock_text_end) = .;
+#define CPUIDLE_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
+ *(.cpuidle.text) \
+ VMLINUX_SYMBOL(__cpuidle_text_end) = .;
+
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__kprobes_text_start) = .; \
@@ -239,6 +239,11 @@ void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
+/* Attach to any functions which should be considered cpuidle. */
+#define __cpuidle __attribute__((__section__(".cpuidle.text")))
+
+bool cpu_in_idle(unsigned long pc);
+
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
@@ -16,6 +16,9 @@
#include "sched.h"
+/* Linker adds these: start and end of __cpuidle functions */
+extern char __cpuidle_text_start[], __cpuidle_text_end[];
+
/**
* sched_idle_set_state - Record idle state for the current CPU.
* @idle_state: State to record.
@@ -53,7 +56,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused)
__setup("hlt", cpu_idle_nopoll_setup);
#endif
-static inline int cpu_idle_poll(void)
+static noinline int __cpuidle cpu_idle_poll(void)
{
rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id());
@@ -84,7 +87,7 @@ void __weak arch_cpu_idle(void)
*
* To use when the cpuidle framework cannot be used.
*/
-void default_idle_call(void)
+void __cpuidle default_idle_call(void)
{
if (current_clr_polling_and_test()) {
local_irq_enable();
@@ -271,6 +274,12 @@ static void cpu_idle_loop(void)
}
}
+bool cpu_in_idle(unsigned long pc)
+{
+ return pc >= (unsigned long)__cpuidle_text_start &&
+ pc < (unsigned long)__cpuidle_text_end;
+}
+
void cpu_startup_entry(enum cpuhp_state state)
{
/*
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/kprobes.h>
#include <linux/nmi.h>
+#include <linux/cpu.h>
#ifdef arch_trigger_cpumask_backtrace
/* For reliability, we're prepared to waste bits here. */
@@ -87,11 +88,16 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- pr_warn("NMI backtrace for cpu %d\n", cpu);
- if (regs)
- show_regs(regs);
- else
- dump_stack();
+ if (regs && cpu_in_idle(instruction_pointer(regs))) {
+ pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
+ cpu, instruction_pointer(regs));
+ } else {
+ pr_warn("NMI backtrace for cpu %d\n", cpu);
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+ }
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
@@ -888,7 +888,7 @@ static void check_section(const char *modname, struct elf_info *elf,
#define DATA_SECTIONS ".data", ".data.rel"
#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
- ".kprobes.text"
+ ".kprobes.text", ".cpuidle.text"
#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
".fixup", ".entry.text", ".exception.text", ".text.*", \
".coldtext"
@@ -364,6 +364,7 @@ is_mcounted_section_name(char const *const txtname)
strcmp(".spinlock.text", txtname) == 0 ||
strcmp(".irqentry.text", txtname) == 0 ||
strcmp(".kprobes.text", txtname) == 0 ||
+ strcmp(".cpuidle.text", txtname) == 0 ||
strcmp(".text.unlikely", txtname) == 0;
}
@@ -135,6 +135,7 @@ my %text_sections = (
".spinlock.text" => 1,
".irqentry.text" => 1,
".kprobes.text" => 1,
+ ".cpuidle.text" => 1,
".text.unlikely" => 1,
);