@@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume)
*/
bl cpu_do_resume
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
mov x0, sp
bl kasan_unpoison_task_stack_below
#endif
@@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
movq pt_regs_r14(%rax), %r14
movq pt_regs_r15(%rax), %r15
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
/*
* The suspend path may have poisoned some areas deeper in the stack,
* which we now need to unpoison.
@@ -77,8 +77,6 @@ static inline void kasan_disable_current
void kasan_unpoison_range(const void *address, size_t size);
-void kasan_unpoison_task_stack(struct task_struct *task);
-
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
@@ -123,8 +121,6 @@ void kasan_restore_multi_shot(bool enabl
static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
-
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
@@ -176,6 +172,12 @@ static inline size_t kasan_metadata_size
#endif /* CONFIG_KASAN */
+#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+void kasan_unpoison_task_stack(struct task_struct *task);
+#else
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+#endif
+
#ifdef CONFIG_KASAN_GENERIC
void kasan_cache_shrink(struct kmem_cache *cache);
@@ -63,6 +63,7 @@ void kasan_unpoison_range(const void *ad
unpoison_range(address, size);
}
+#if CONFIG_KASAN_STACK
static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{
void *base = task_stack_page(task);
@@ -89,6 +90,7 @@ asmlinkage void kasan_unpoison_task_stac
unpoison_range(base, watermark - base);
}
+#endif /* CONFIG_KASAN_STACK */
void kasan_alloc_pages(struct page *page, unsigned int order)
{