arch/arm/Kconfig | 3 +
arch/arm/include/asm/memory.h | 1 +
arch/arm/include/asm/suspend.h | 6 ++
arch/arm/kernel/cpu.c | 65 ++++++++++++++++++++++++++
arch/arm/kernel/swsusp.S | 92 +++++++++++++++++++++++++++++++++++++
arch/arm/kernel/vmlinux.lds.S | 3 +-
include/asm-generic/vmlinux.lds.h | 2 +-
7 files changed, 170 insertions(+), 2 deletions(-)
@@ -198,6 +198,9 @@ config VECTORS_BASE
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool n
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -191,6 +191,7 @@ static inline void *phys_to_virt(unsigned long x)
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
/*
new file mode 100644
@@ -0,0 +1,6 @@
+#ifndef __ASM_ARM_SUSPEND_H
+#define __ASM_ARM_SUSPEND_H
+
+static inline int arch_prepare_suspend(void) { return 0; }
+
+#endif /* __ASM_ARM_SUSPEND_H */
new file mode 100644
@@ -0,0 +1,65 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <asm/tlbflush.h>
+
+extern const void __nosave_begin, __nosave_end;
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
+
+void save_processor_state(void)
+{
+ flush_thread();
+}
+
+void restore_processor_state(void)
+{
+ local_flush_tlb_all();
+}
+
+u8 __swsusp_arch_ctx[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+u8 __swsusp_resume_stk[PAGE_SIZE/2] __nosavedata;
+
+/*
+ * The framework loads the hibernation image into this linked list,
+ * for swsusp_arch_resume() to copy back to the proper destinations.
+ *
+ * To make this work if resume is triggered from initramfs, the
+ * pagetables need to be switched to allow writes to kernel mem.
+ */
+void notrace __swsusp_arch_restore_prepare(void)
+{
+ cpu_switch_mm(__virt_to_phys(swapper_pg_dir), current->active_mm);
+}
+
+void notrace __swsusp_arch_restore_image(void)
+{
+ extern struct pbe *restore_pblist;
+ struct pbe *pbe;
+
+ for (pbe = restore_pblist; pbe; pbe = pbe->next)
+ copy_page(pbe->orig_address, pbe->address);
+}
new file mode 100644
@@ -0,0 +1,92 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Based on work by:
+ *
+ * Ubuntu project, hibernation support for mach-dove,
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/linkage.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/ptrace.h>
+
+/*
+ * Save the current CPU state before suspend / poweroff.
+ */
+ENTRY(swsusp_arch_suspend)
+ ldr r0, =__swsusp_arch_ctx
+ mrs r1, cpsr
+ str r1, [r0], #4 /* CPSR */
+ARM( msr cpsr_c, #SYSTEM_MODE )
+THUMB( mov r2, #SYSTEM_MODE )
+THUMB( msr cpsr_c, r2 )
+ stm r0!, {r4-r12,lr} /* nonvolatile regs */
+ str sp, [r0], #4
+ARM( msr cpsr_c, #SVC_MODE )
+THUMB( mov r2, #SVC_MODE )
+THUMB( msr cpsr_c, r2 )
+ mrs r2, spsr
+ stm r0!, {r2,lr} /* SVC SPSR, SVC regs */
+ str sp, [r0], #4
+ msr cpsr, r1 /* restore mode at entry */
+ push {lr}
+ bl __save_processor_state
+ pop {lr}
+ b swsusp_save
+ENDPROC(swsusp_arch_suspend)
+
+/*
+ * Restore the memory image from the pagelists, and load the CPU registers
+ * from saved state.
+ */
+ENTRY(swsusp_arch_resume)
+ bl __swsusp_arch_restore_prepare
+ /*
+ * Switch stack to a nosavedata region to make sure image restore
+ * doesn't clobber it underneath itself.
+ */
+ ldr sp, =(__swsusp_resume_stk + PAGE_SIZE / 2)
+ bl __swsusp_arch_restore_image
+
+ /*
+ * Restore the CPU registers.
+ */
+ARM( msr cpsr_c, #SYSTEM_MODE )
+THUMB( mov r2, #SYSTEM_MODE )
+THUMB( msr cpsr_c, r2 )
+ ldr r0, =__swsusp_arch_ctx
+ ldr r1, [r0], #4
+ msr cpsr_cxsf, r1
+ ldm r0!, {r4-r12,lr}
+ ldr sp, [r0], #4
+ARM( msr cpsr_c, #SVC_MODE )
+THUMB( mov r2, #SVC_MODE )
+THUMB( msr cpsr_c, r2 )
+ ldm r0!, {r2,lr}
+ ldr sp, [r0], #4
+ msr spsr_cxsf, r2
+ msr cpsr_c, r1
+
+ /*
+ * From here on we have a valid stack again. Core state is
+ * not restored yet, redirect to the machine-specific
+ * implementation to get that done.
+ * Resume has succeeded at this point; if the machine-specific
+ * code wants to fail it needs to panic.
+ */
+ mov r1, #0
+ push {r1,lr}
+ bl __restore_processor_state /* restore core state */
+ pop {r0,pc}
+ENDPROC(swsusp_arch_resume)
@@ -153,7 +153,6 @@ SECTIONS
__init_end = .;
#endif
- NOSAVE_DATA
CACHELINE_ALIGNED_DATA(32)
/*
@@ -176,6 +175,8 @@ SECTIONS
}
_edata_loc = __data_loc + SIZEOF(.data);
+ NOSAVE_DATA
+
#ifdef CONFIG_HAVE_TCM
/*
* We align everything to a page boundary so we can
@@ -171,7 +171,7 @@
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_begin) = .; \
- *(.data.nosave) \
+ .data.nosave : { *(.data.nosave) } \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_end) = .;