diff mbox series

[RFC,v3,24/36] kmsan: disable instrumentation of certain functions

Message ID 20191122112621.204798-25-glider@google.com (mailing list archive)
State New, archived
Headers show
Series Add KernelMemorySanitizer infrastructure | expand

Commit Message

Alexander Potapenko Nov. 22, 2019, 11:26 a.m. UTC
Some functions are called from handwritten assembly, and therefore don't
have their arguments' metadata fully set up by the instrumentation code.
Mark them with __no_sanitize_memory to avoid false positives from
spreading further.
Certain functions perform task switching, so that the value of |current|
is different as they proceed. Because KMSAN state pointer is only read
once at the beginning of the function, touching it after |current| has
changed may be dangerous.

Signed-off-by: Alexander Potapenko <glider@google.com>
To: Alexander Potapenko <glider@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: linux-mm@kvack.org
---
v3:
 - removed TODOs from comments

Change-Id: I684d23dac5a22eb0a4cea71993cb934302b17cea
---
 arch/x86/entry/common.c                |  1 +
 arch/x86/include/asm/irq_regs.h        |  1 +
 arch/x86/include/asm/syscall_wrapper.h |  1 +
 arch/x86/kernel/apic/apic.c            |  1 +
 arch/x86/kernel/dumpstack_64.c         |  1 +
 arch/x86/kernel/process_64.c           |  5 +++++
 arch/x86/kernel/traps.c                | 12 ++++++++++--
 arch/x86/kernel/uprobes.c              |  7 ++++++-
 kernel/profile.c                       |  1 +
 kernel/sched/core.c                    |  6 ++++++
 10 files changed, 33 insertions(+), 3 deletions(-)

Comments

Andrey Konovalov Nov. 29, 2019, 2:59 p.m. UTC | #1
On Fri, Nov 22, 2019 at 12:27 PM <glider@google.com> wrote:
>
> Some functions are called from handwritten assembly, and therefore don't
> have their arguments' metadata fully set up by the instrumentation code.
> Mark them with __no_sanitize_memory to avoid false positives from
> spreading further.
> Certain functions perform task switching, so that the value of |current|
> is different as they proceed. Because KMSAN state pointer is only read
> once at the beginning of the function, touching it after |current| has
> changed may be dangerous.
>
> Signed-off-by: Alexander Potapenko <glider@google.com>
> To: Alexander Potapenko <glider@google.com>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Vegard Nossum <vegard.nossum@oracle.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: linux-mm@kvack.org
> ---
> v3:
>  - removed TODOs from comments
>
> Change-Id: I684d23dac5a22eb0a4cea71993cb934302b17cea
> ---
>  arch/x86/entry/common.c                |  1 +
>  arch/x86/include/asm/irq_regs.h        |  1 +
>  arch/x86/include/asm/syscall_wrapper.h |  1 +
>  arch/x86/kernel/apic/apic.c            |  1 +
>  arch/x86/kernel/dumpstack_64.c         |  1 +
>  arch/x86/kernel/process_64.c           |  5 +++++
>  arch/x86/kernel/traps.c                | 12 ++++++++++--
>  arch/x86/kernel/uprobes.c              |  7 ++++++-
>  kernel/profile.c                       |  1 +
>  kernel/sched/core.c                    |  6 ++++++
>  10 files changed, 33 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
> index 3f8e22615812..0dd5b2acb355 100644
> --- a/arch/x86/entry/common.c
> +++ b/arch/x86/entry/common.c
> @@ -275,6 +275,7 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
>  }
>
>  #ifdef CONFIG_X86_64
> +__no_sanitize_memory
>  __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
>  {
>         struct thread_info *ti;
> diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
> index 187ce59aea28..d65a00bd6f02 100644
> --- a/arch/x86/include/asm/irq_regs.h
> +++ b/arch/x86/include/asm/irq_regs.h
> @@ -14,6 +14,7 @@
>
>  DECLARE_PER_CPU(struct pt_regs *, irq_regs);
>
> +__no_sanitize_memory
>  static inline struct pt_regs *get_irq_regs(void)
>  {
>         return __this_cpu_read(irq_regs);
> diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
> index e046a405743d..43910ce1b53b 100644
> --- a/arch/x86/include/asm/syscall_wrapper.h
> +++ b/arch/x86/include/asm/syscall_wrapper.h
> @@ -159,6 +159,7 @@
>         ALLOW_ERROR_INJECTION(__x64_sys##name, ERRNO);                  \
>         static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));     \
>         static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
> +       __no_sanitize_memory                                            \
>         asmlinkage long __x64_sys##name(const struct pt_regs *regs)     \
>         {                                                               \
>                 return __se_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
> diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
> index 9e2dd2b296cd..7b24bda22c38 100644
> --- a/arch/x86/kernel/apic/apic.c
> +++ b/arch/x86/kernel/apic/apic.c
> @@ -1118,6 +1118,7 @@ static void local_apic_timer_interrupt(void)
>   * [ if a single-CPU system runs an SMP kernel then we call the local
>   *   interrupt as well. Thus we cannot inline the local irq ... ]
>   */
> +__no_sanitize_memory /* |regs| may be uninitialized */

The comment style around __no_sanitize_memory looks very different for
different call sites, perhaps it makes sense to unify it somehow.

>  __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
>  {
>         struct pt_regs *old_regs = set_irq_regs(regs);
> diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
> index 753b8cfe8b8a..ba883d282a43 100644
> --- a/arch/x86/kernel/dumpstack_64.c
> +++ b/arch/x86/kernel/dumpstack_64.c
> @@ -143,6 +143,7 @@ static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
>         return true;
>  }
>
> +__no_sanitize_memory
>  int get_stack_info(unsigned long *stack, struct task_struct *task,
>                    struct stack_info *info, unsigned long *visit_mask)
>  {
> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
> index af64519b2695..70e33150a83a 100644
> --- a/arch/x86/kernel/process_64.c
> +++ b/arch/x86/kernel/process_64.c
> @@ -500,6 +500,11 @@ void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
>   * Kprobes not supported here. Set the probe on schedule instead.
>   * Function graph tracer not supported too.
>   */
> +/*
> + * Avoid touching KMSAN state or reporting anything here, as __switch_to() does
> + * weird things with tasks.
> + */
> +__no_sanitize_memory
>  __visible __notrace_funcgraph struct task_struct *
>  __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
>  {
> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
> index 4bb0f8447112..a94282d1f60b 100644
> --- a/arch/x86/kernel/traps.c
> +++ b/arch/x86/kernel/traps.c
> @@ -618,7 +618,10 @@ NOKPROBE_SYMBOL(do_int3);
>   * Help handler running on a per-cpu (IST or entry trampoline) stack
>   * to switch to the normal thread stack if the interrupted code was in
>   * user mode. The actual stack switch is done in entry_64.S
> + *
> + * This function switches the registers - don't instrument it with KMSAN!
>   */
> +__no_sanitize_memory
>  asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
>  {
>         struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
> @@ -634,6 +637,11 @@ struct bad_iret_stack {
>  };
>
>  asmlinkage __visible notrace
> +/*
> + * Dark magic happening here, let's not instrument this function.
> + * Also avoid copying any metadata by using raw __memmove().
> + */
> +__no_sanitize_memory
>  struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
>  {
>         /*
> @@ -648,10 +656,10 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
>                 (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
>
>         /* Copy the IRET target to the new stack. */
> -       memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
> +       __memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
>
>         /* Copy the remainder of the stack from the current stack. */
> -       memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
> +       __memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));

Looks like this change should go into a separate patch.

>
>         BUG_ON(!user_mode(&new_stack->regs));
>         return new_stack;
> diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
> index 8cd745ef8c7b..bcd4bf5a909f 100644
> --- a/arch/x86/kernel/uprobes.c
> +++ b/arch/x86/kernel/uprobes.c
> @@ -8,6 +8,7 @@
>   *     Jim Keniston
>   */
>  #include <linux/kernel.h>
> +#include <linux/kmsan-checks.h>
>  #include <linux/sched.h>
>  #include <linux/ptrace.h>
>  #include <linux/uprobes.h>
> @@ -997,9 +998,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
>  int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
>  {
>         struct die_args *args = data;
> -       struct pt_regs *regs = args->regs;
> +       struct pt_regs *regs;
>         int ret = NOTIFY_DONE;
>
> +       kmsan_unpoison_shadow(args, sizeof(*args));
> +       regs = args->regs;
> +       if (regs)
> +               kmsan_unpoison_shadow(regs, sizeof(*regs));

This one as well.

>         /* We are only interested in userspace traps */
>         if (regs && !user_mode(regs))
>                 return NOTIFY_DONE;
> diff --git a/kernel/profile.c b/kernel/profile.c
> index af7c94bf5fa1..835a5b66d1a4 100644
> --- a/kernel/profile.c
> +++ b/kernel/profile.c
> @@ -399,6 +399,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
>  }
>  EXPORT_SYMBOL_GPL(profile_hits);
>
> +__no_sanitize_memory
>  void profile_tick(int type)
>  {
>         struct pt_regs *regs = get_irq_regs();
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index dd05a378631a..674d36fe9d44 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -475,6 +475,7 @@ void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
>                 put_task_struct(task);
>  }
>
> +__no_sanitize_memory /* context switching here */
>  void wake_up_q(struct wake_q_head *head)
>  {
>         struct wake_q_node *node = head->first;
> @@ -3180,6 +3181,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
>   * past. prev == current is still correct but we need to recalculate this_rq
>   * because prev may have moved to another CPU.
>   */
> +__no_sanitize_memory /* |current| changes here */
>  static struct rq *finish_task_switch(struct task_struct *prev)
>         __releases(rq->lock)
>  {
> @@ -3986,6 +3988,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
>   *
>   * WARNING: must be called with preemption disabled!
>   */
> +__no_sanitize_memory /* |current| changes here */
>  static void __sched notrace __schedule(bool preempt)
>  {
>         struct task_struct *prev, *next;
> @@ -4605,6 +4608,7 @@ int task_prio(const struct task_struct *p)
>   *
>   * Return: 1 if the CPU is currently idle. 0 otherwise.
>   */
> +__no_sanitize_memory /* nothing to report here */
>  int idle_cpu(int cpu)
>  {
>         struct rq *rq = cpu_rq(cpu);
> @@ -6544,6 +6548,7 @@ static struct kmem_cache *task_group_cache __read_mostly;
>  DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
>  DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
>
> +__no_sanitize_memory
>  void __init sched_init(void)
>  {
>         unsigned long ptr = 0;
> @@ -6716,6 +6721,7 @@ static inline int preempt_count_equals(int preempt_offset)
>         return (nested == preempt_offset);
>  }
>
> +__no_sanitize_memory /* expect the arguments to be initialized */
>  void __might_sleep(const char *file, int line, int preempt_offset)
>  {
>         /*
> --
> 2.24.0.432.g9d3f5f5b63-goog
>
Alexander Potapenko Dec. 18, 2019, 10:02 a.m. UTC | #2
> The comment style around __no_sanitize_memory looks very different for
> different call sites, perhaps it makes sense to unify it somehow.

Done in v4.

> > +/*
> > + * Dark magic happening here, let's not instrument this function.
> > + * Also avoid copying any metadata by using raw __memmove().
> > + */
> > +__no_sanitize_memory
> >  struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
> >  {
> >         /*
> > @@ -648,10 +656,10 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
> >                 (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
> >
> >         /* Copy the IRET target to the new stack. */
> > -       memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
> > +       __memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
> >
> >         /* Copy the remainder of the stack from the current stack. */
> > -       memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
> > +       __memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
>
> Looks like this change should go into a separate patch.

I disagree. Both the __no_sanitize_memory annotation and __memmove()
calls serve the same purpose: avoid calling KMSAN code from
fixup_bad_iret().
I don't think it makes sense to separate them.


> > +       kmsan_unpoison_shadow(args, sizeof(*args));
> > +       regs = args->regs;
> > +       if (regs)
> > +               kmsan_unpoison_shadow(regs, sizeof(*regs));
>
> This one as well.

Done in v4.
diff mbox series

Patch

diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 3f8e22615812..0dd5b2acb355 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -275,6 +275,7 @@  __visible inline void syscall_return_slowpath(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_X86_64
+__no_sanitize_memory
 __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
 {
 	struct thread_info *ti;
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 187ce59aea28..d65a00bd6f02 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -14,6 +14,7 @@ 
 
 DECLARE_PER_CPU(struct pt_regs *, irq_regs);
 
+__no_sanitize_memory
 static inline struct pt_regs *get_irq_regs(void)
 {
 	return __this_cpu_read(irq_regs);
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index e046a405743d..43910ce1b53b 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -159,6 +159,7 @@ 
 	ALLOW_ERROR_INJECTION(__x64_sys##name, ERRNO);			\
 	static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));	\
 	static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+	__no_sanitize_memory						\
 	asmlinkage long __x64_sys##name(const struct pt_regs *regs)	\
 	{								\
 		return __se_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 9e2dd2b296cd..7b24bda22c38 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1118,6 +1118,7 @@  static void local_apic_timer_interrupt(void)
  * [ if a single-CPU system runs an SMP kernel then we call the local
  *   interrupt as well. Thus we cannot inline the local irq ... ]
  */
+__no_sanitize_memory /* |regs| may be uninitialized */
 __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 753b8cfe8b8a..ba883d282a43 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -143,6 +143,7 @@  static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
 	return true;
 }
 
+__no_sanitize_memory
 int get_stack_info(unsigned long *stack, struct task_struct *task,
 		   struct stack_info *info, unsigned long *visit_mask)
 {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index af64519b2695..70e33150a83a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -500,6 +500,11 @@  void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
  * Kprobes not supported here. Set the probe on schedule instead.
  * Function graph tracer not supported too.
  */
+/*
+ * Avoid touching KMSAN state or reporting anything here, as __switch_to() does
+ * weird things with tasks.
+ */
+__no_sanitize_memory
 __visible __notrace_funcgraph struct task_struct *
 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4bb0f8447112..a94282d1f60b 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -618,7 +618,10 @@  NOKPROBE_SYMBOL(do_int3);
  * Help handler running on a per-cpu (IST or entry trampoline) stack
  * to switch to the normal thread stack if the interrupted code was in
  * user mode. The actual stack switch is done in entry_64.S
+ *
+ * This function switches the registers - don't instrument it with KMSAN!
  */
+__no_sanitize_memory
 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
 	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
@@ -634,6 +637,11 @@  struct bad_iret_stack {
 };
 
 asmlinkage __visible notrace
+/*
+ * Dark magic happening here, let's not instrument this function.
+ * Also avoid copying any metadata by using raw __memmove().
+ */
+__no_sanitize_memory
 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 {
 	/*
@@ -648,10 +656,10 @@  struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 		(struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
 	/* Copy the IRET target to the new stack. */
-	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
+	__memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
 
 	/* Copy the remainder of the stack from the current stack. */
-	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
+	__memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
 
 	BUG_ON(!user_mode(&new_stack->regs));
 	return new_stack;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 8cd745ef8c7b..bcd4bf5a909f 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -8,6 +8,7 @@ 
  *	Jim Keniston
  */
 #include <linux/kernel.h>
+#include <linux/kmsan-checks.h>
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <linux/uprobes.h>
@@ -997,9 +998,13 @@  int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
 {
 	struct die_args *args = data;
-	struct pt_regs *regs = args->regs;
+	struct pt_regs *regs;
 	int ret = NOTIFY_DONE;
 
+	kmsan_unpoison_shadow(args, sizeof(*args));
+	regs = args->regs;
+	if (regs)
+		kmsan_unpoison_shadow(regs, sizeof(*regs));
 	/* We are only interested in userspace traps */
 	if (regs && !user_mode(regs))
 		return NOTIFY_DONE;
diff --git a/kernel/profile.c b/kernel/profile.c
index af7c94bf5fa1..835a5b66d1a4 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -399,6 +399,7 @@  void profile_hits(int type, void *__pc, unsigned int nr_hits)
 }
 EXPORT_SYMBOL_GPL(profile_hits);
 
+__no_sanitize_memory
 void profile_tick(int type)
 {
 	struct pt_regs *regs = get_irq_regs();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dd05a378631a..674d36fe9d44 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -475,6 +475,7 @@  void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
 		put_task_struct(task);
 }
 
+__no_sanitize_memory /* context switching here */
 void wake_up_q(struct wake_q_head *head)
 {
 	struct wake_q_node *node = head->first;
@@ -3180,6 +3181,7 @@  prepare_task_switch(struct rq *rq, struct task_struct *prev,
  * past. prev == current is still correct but we need to recalculate this_rq
  * because prev may have moved to another CPU.
  */
+__no_sanitize_memory /* |current| changes here */
 static struct rq *finish_task_switch(struct task_struct *prev)
 	__releases(rq->lock)
 {
@@ -3986,6 +3988,7 @@  pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  *
  * WARNING: must be called with preemption disabled!
  */
+__no_sanitize_memory /* |current| changes here */
 static void __sched notrace __schedule(bool preempt)
 {
 	struct task_struct *prev, *next;
@@ -4605,6 +4608,7 @@  int task_prio(const struct task_struct *p)
  *
  * Return: 1 if the CPU is currently idle. 0 otherwise.
  */
+__no_sanitize_memory /* nothing to report here */
 int idle_cpu(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
@@ -6544,6 +6548,7 @@  static struct kmem_cache *task_group_cache __read_mostly;
 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
 
+__no_sanitize_memory
 void __init sched_init(void)
 {
 	unsigned long ptr = 0;
@@ -6716,6 +6721,7 @@  static inline int preempt_count_equals(int preempt_offset)
 	return (nested == preempt_offset);
 }
 
+__no_sanitize_memory /* expect the arguments to be initialized */
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
 	/*