diff mbox series

[RFC,08/14] fork: separate vmap stack alloction and free calls

Message ID 20240311164638.2015063-9-pasha.tatashin@soleen.com (mailing list archive)
State New
Headers show
Series Dynamic Kernel Stacks | expand

Commit Message

Pasha Tatashin March 11, 2024, 4:46 p.m. UTC
In preparation for the dynamic stacks, separate out the
__vmalloc_node_range and vfree calls from the vmap based stack
allocations. The dynamic stacks will use their own variants of these
functions.

Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
---
 kernel/fork.c | 53 ++++++++++++++++++++++++++++++---------------------
 1 file changed, 31 insertions(+), 22 deletions(-)

Comments

Jeff Xie March 14, 2024, 3:18 p.m. UTC | #1
On Tue, Mar 12, 2024 at 12:47 AM Pasha Tatashin
<pasha.tatashin@soleen.com> wrote:
>
> In preparation for the dynamic stacks, separate out the
> __vmalloc_node_range and vfree calls from the vmap based stack
> allocations. The dynamic stacks will use their own variants of these
> functions.
>
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> ---
>  kernel/fork.c | 53 ++++++++++++++++++++++++++++++---------------------
>  1 file changed, 31 insertions(+), 22 deletions(-)
>
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 3004e6ce6c65..bbae5f705773 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -204,6 +204,29 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
>         return false;
>  }
>
> +static inline struct vm_struct *alloc_vmap_stack(int node)
> +{
> +       void *stack;
> +
> +       /*
> +        * Allocated stacks are cached and later reused by new threads,
> +        * so memcg accounting is performed manually on assigning/releasing
> +        * stacks to tasks. Drop __GFP_ACCOUNT.
> +        */
> +       stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
> +                                    VMALLOC_START, VMALLOC_END,
> +                                    THREADINFO_GFP & ~__GFP_ACCOUNT,
> +                                    PAGE_KERNEL,
> +                                    0, node, __builtin_return_address(0));
> +
> +       return (stack) ? find_vm_area(stack) : NULL;
> +}
> +
> +static inline void free_vmap_stack(struct vm_struct *vm_area)
> +{
> +       vfree(vm_area->addr);
> +}
> +
>  static void thread_stack_free_rcu(struct rcu_head *rh)
>  {
>         struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
> @@ -212,7 +235,7 @@ static void thread_stack_free_rcu(struct rcu_head *rh)
>         if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
>                 return;
>
> -       vfree(vm_area->addr);
> +       free_vmap_stack(vm_area);
>  }

I've discovered that the function free_vmap_stack() can trigger a warning.
It appears that free_vmap_stack() should handle interrupt context and
task context separately as vfree().

[root@JeffXie ]# poweroff
[root@JeffXie ]# umount: devtmpfs busy - remounted read-only
[   93.036872] EXT4-fs (vda): re-mounted
2e1f057b-471f-4c08-a7b8-611457b221f2 ro. Quota mode: none.
The system is going down NOW!
Sent SIGTERM to all processes
Sent SIGKILL to all processes
Requesting system poweroff
[   94.043540] ------------[ cut here ]------------
[   94.043977] WARNING: CPU: 0 PID: 0 at kernel/smp.c:786
smp_call_function_many_cond+0x4e5/0x550
[   94.044744] Modules linked in:
[   94.045024] CPU: 0 PID: 0 Comm: swapper/0 Not tainted
6.8.0-00014-g82270db6e1f0 #91
[   94.045697] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009),
BIOS 1.15.0-1 04/01/2014
[   94.046399] RIP: 0010:smp_call_function_many_cond+0x4e5/0x550
[   94.046914] Code: 48 8b 78 08 48 c7 c1 a0 84 16 81 4c 89 f6 e8 22
11 f6 ff 65 ff 0d 23 38 ec 7e 0f 85 a1 fc ff ff 0f 1f 44 00 00 e9 97
fc ff ff <0f> 0b e9 61
[   94.048509] RSP: 0018:ffffc90000003e48 EFLAGS: 00010206
[   94.048965] RAX: ffffffff82cb3fd0 RBX: ffff88811862cbc0 RCX: 0000000000000003
[   94.049598] RDX: 0000000000000100 RSI: 0000000000000000 RDI: 0000000000000000
[   94.050226] RBP: ffff8881052c5090 R08: 0000000000000000 R09: 0000000000000001
[   94.050861] R10: ffffffff82a060c0 R11: 0000000000008847 R12: ffff888102eb3500
[   94.051480] R13: ffff88811862b800 R14: ffff88811862cc38 R15: 0000000000000000
[   94.052109] FS:  0000000000000000(0000) GS:ffff888118600000(0000)
knlGS:0000000000000000
[   94.052812] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   94.053318] CR2: 00000000004759e0 CR3: 0000000002a2e000 CR4: 0000000000750ef0
[   94.053955] PKRU: 55555554
[   94.054203] Call Trace:
[   94.054433]  <IRQ>
[   94.054632]  ? __warn+0x84/0x140
[   94.054925]  ? smp_call_function_many_cond+0x4e5/0x550
[   94.055362]  ? report_bug+0x199/0x1b0
[   94.055697]  ? handle_bug+0x3c/0x70
[   94.056010]  ? exc_invalid_op+0x18/0x70
[   94.056350]  ? asm_exc_invalid_op+0x1a/0x20
[   94.056728]  ? smp_call_function_many_cond+0x4e5/0x550
[   94.057179]  ? __pfx_do_kernel_range_flush+0x10/0x10
[   94.057622]  on_each_cpu_cond_mask+0x24/0x40
[   94.057999]  flush_tlb_kernel_range+0x98/0xb0
[   94.058390]  free_unmap_vmap_area+0x2d/0x40
[   94.058768]  remove_vm_area+0x3a/0x70
[   94.059094]  free_vmap_stack+0x15/0x60
[   94.059427]  rcu_core+0x2bf/0x980
[   94.059735]  ? rcu_core+0x244/0x980
[   94.060046]  ? kvm_clock_get_cycles+0x18/0x30
[   94.060431]  __do_softirq+0xc2/0x292
[   94.060760]  irq_exit_rcu+0x6a/0x90
[   94.061074]  sysvec_apic_timer_interrupt+0x6e/0x90
[   94.061507]  </IRQ>
[   94.061704]  <TASK>
[   94.061903]  asm_sysvec_apic_timer_interrupt+0x1a/0x20
[   94.062367] RIP: 0010:default_idle+0xf/0x20
[   94.062746] Code: 4c 01 c7 4c 29 c2 e9 72 ff ff ff 90 90 90 90 90
90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa eb 07 0f 00 2d 33 b4 2a
00 fb f4 <fa> c3 cc c0
[   94.064342] RSP: 0018:ffffffff82a03e70 EFLAGS: 00000212
[   94.064805] RAX: ffff888118628608 RBX: ffffffff82a0c980 RCX: 0000000000000000
[   94.065429] RDX: 4000000000000000 RSI: ffffffff82725be8 RDI: 000000000000a14c
[   94.066066] RBP: 0000000000000000 R08: 000000000000a14c R09: 0000000000000001
[   94.066705] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
[   94.067311] R13: 0000000000000000 R14: ffffffff82a0c030 R15: 00000000000000ac
[   94.067936]  default_idle_call+0x2c/0xd0
[   94.068284]  do_idle+0x1ce/0x210
[   94.068584]  cpu_startup_entry+0x2a/0x30
[   94.068931]  rest_init+0xc5/0xd0
[   94.069224]  arch_call_rest_init+0xe/0x30
[   94.069597]  start_kernel+0x58e/0x8d0
[   94.069929]  x86_64_start_reservations+0x18/0x30
[   94.070353]  x86_64_start_kernel+0xc6/0xe0
[   94.070725]  secondary_startup_64_no_verify+0x16d/0x17b
[   94.071189]  </TASK>
[   94.071392] ---[ end trace 0000000000000000 ]---
[   95.040718] e1000e: EEE TX LPI TIMER: 00000000
[   95.055005] ACPI: PM: Preparing to enter system sleep state S5
[   95.055619] reboot: Power down


 ./scripts/faddr2line ./vmlinux smp_call_function_many_cond+0x4e5/0x550
smp_call_function_many_cond+0x4e5/0x550:
smp_call_function_many_cond at kernel/smp.c:786 (discriminator 1)

 756 static void smp_call_function_many_cond(const struct cpumask *mask,
 757                                         smp_call_func_t func, void *info,
 758                                         unsigned int scf_flags,
 759                                         smp_cond_func_t cond_func)
[...]
 781          * When @wait we can deadlock when we interrupt between
llist_add() and
 782          * arch_send_call_function_ipi*(); when !@wait we can
deadlock due to
 783          * csd_lock() on because the interrupt context uses the same csd
 784          * storage.
 785          */
 786         WARN_ON_ONCE(!in_task());
// <<< warning here
[...]
Pasha Tatashin March 14, 2024, 5:14 p.m. UTC | #2
> I've discovered that the function free_vmap_stack() can trigger a warning.
> It appears that free_vmap_stack() should handle interrupt context and
> task context separately as vfree().

Hi Jeff,

Thank you for reporting this. Yes, it appears free_vmap_stack() may
get called from the interrupt context, and yet we call
remove_vm_area() that takes locks. I will fix it in the next version
similar to the way you suggested by adding an in_interrupt() case.

Thank you,
Pasha

> [root@JeffXie ]# poweroff
> [root@JeffXie ]# umount: devtmpfs busy - remounted read-only
> [   93.036872] EXT4-fs (vda): re-mounted
> 2e1f057b-471f-4c08-a7b8-611457b221f2 ro. Quota mode: none.
> The system is going down NOW!
> Sent SIGTERM to all processes
> Sent SIGKILL to all processes
> Requesting system poweroff
> [   94.043540] ------------[ cut here ]------------
> [   94.043977] WARNING: CPU: 0 PID: 0 at kernel/smp.c:786
> smp_call_function_many_cond+0x4e5/0x550
> [   94.044744] Modules linked in:
> [   94.045024] CPU: 0 PID: 0 Comm: swapper/0 Not tainted
> 6.8.0-00014-g82270db6e1f0 #91
> [   94.045697] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009),
> BIOS 1.15.0-1 04/01/2014
> [   94.046399] RIP: 0010:smp_call_function_many_cond+0x4e5/0x550
> [   94.046914] Code: 48 8b 78 08 48 c7 c1 a0 84 16 81 4c 89 f6 e8 22
> 11 f6 ff 65 ff 0d 23 38 ec 7e 0f 85 a1 fc ff ff 0f 1f 44 00 00 e9 97
> fc ff ff <0f> 0b e9 61
> [   94.048509] RSP: 0018:ffffc90000003e48 EFLAGS: 00010206
> [   94.048965] RAX: ffffffff82cb3fd0 RBX: ffff88811862cbc0 RCX: 0000000000000003
> [   94.049598] RDX: 0000000000000100 RSI: 0000000000000000 RDI: 0000000000000000
> [   94.050226] RBP: ffff8881052c5090 R08: 0000000000000000 R09: 0000000000000001
> [   94.050861] R10: ffffffff82a060c0 R11: 0000000000008847 R12: ffff888102eb3500
> [   94.051480] R13: ffff88811862b800 R14: ffff88811862cc38 R15: 0000000000000000
> [   94.052109] FS:  0000000000000000(0000) GS:ffff888118600000(0000)
> knlGS:0000000000000000
> [   94.052812] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   94.053318] CR2: 00000000004759e0 CR3: 0000000002a2e000 CR4: 0000000000750ef0
> [   94.053955] PKRU: 55555554
> [   94.054203] Call Trace:
> [   94.054433]  <IRQ>
> [   94.054632]  ? __warn+0x84/0x140
> [   94.054925]  ? smp_call_function_many_cond+0x4e5/0x550
> [   94.055362]  ? report_bug+0x199/0x1b0
> [   94.055697]  ? handle_bug+0x3c/0x70
> [   94.056010]  ? exc_invalid_op+0x18/0x70
> [   94.056350]  ? asm_exc_invalid_op+0x1a/0x20
> [   94.056728]  ? smp_call_function_many_cond+0x4e5/0x550
> [   94.057179]  ? __pfx_do_kernel_range_flush+0x10/0x10
> [   94.057622]  on_each_cpu_cond_mask+0x24/0x40
> [   94.057999]  flush_tlb_kernel_range+0x98/0xb0
> [   94.058390]  free_unmap_vmap_area+0x2d/0x40
> [   94.058768]  remove_vm_area+0x3a/0x70
> [   94.059094]  free_vmap_stack+0x15/0x60
> [   94.059427]  rcu_core+0x2bf/0x980
> [   94.059735]  ? rcu_core+0x244/0x980
> [   94.060046]  ? kvm_clock_get_cycles+0x18/0x30
> [   94.060431]  __do_softirq+0xc2/0x292
> [   94.060760]  irq_exit_rcu+0x6a/0x90
> [   94.061074]  sysvec_apic_timer_interrupt+0x6e/0x90
> [   94.061507]  </IRQ>
> [   94.061704]  <TASK>
> [   94.061903]  asm_sysvec_apic_timer_interrupt+0x1a/0x20
> [   94.062367] RIP: 0010:default_idle+0xf/0x20
> [   94.062746] Code: 4c 01 c7 4c 29 c2 e9 72 ff ff ff 90 90 90 90 90
> 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa eb 07 0f 00 2d 33 b4 2a
> 00 fb f4 <fa> c3 cc c0
> [   94.064342] RSP: 0018:ffffffff82a03e70 EFLAGS: 00000212
> [   94.064805] RAX: ffff888118628608 RBX: ffffffff82a0c980 RCX: 0000000000000000
> [   94.065429] RDX: 4000000000000000 RSI: ffffffff82725be8 RDI: 000000000000a14c
> [   94.066066] RBP: 0000000000000000 R08: 000000000000a14c R09: 0000000000000001
> [   94.066705] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
> [   94.067311] R13: 0000000000000000 R14: ffffffff82a0c030 R15: 00000000000000ac
> [   94.067936]  default_idle_call+0x2c/0xd0
> [   94.068284]  do_idle+0x1ce/0x210
> [   94.068584]  cpu_startup_entry+0x2a/0x30
> [   94.068931]  rest_init+0xc5/0xd0
> [   94.069224]  arch_call_rest_init+0xe/0x30
> [   94.069597]  start_kernel+0x58e/0x8d0
> [   94.069929]  x86_64_start_reservations+0x18/0x30
> [   94.070353]  x86_64_start_kernel+0xc6/0xe0
> [   94.070725]  secondary_startup_64_no_verify+0x16d/0x17b
> [   94.071189]  </TASK>
> [   94.071392] ---[ end trace 0000000000000000 ]---
> [   95.040718] e1000e: EEE TX LPI TIMER: 00000000
> [   95.055005] ACPI: PM: Preparing to enter system sleep state S5
> [   95.055619] reboot: Power down
>
>
>  ./scripts/faddr2line ./vmlinux smp_call_function_many_cond+0x4e5/0x550
> smp_call_function_many_cond+0x4e5/0x550:
> smp_call_function_many_cond at kernel/smp.c:786 (discriminator 1)
>
>  756 static void smp_call_function_many_cond(const struct cpumask *mask,
>  757                                         smp_call_func_t func, void *info,
>  758                                         unsigned int scf_flags,
>  759                                         smp_cond_func_t cond_func)
> [...]
>  781          * When @wait we can deadlock when we interrupt between
> llist_add() and
>  782          * arch_send_call_function_ipi*(); when !@wait we can
> deadlock due to
>  783          * csd_lock() on because the interrupt context uses the same csd
>  784          * storage.
>  785          */
>  786         WARN_ON_ONCE(!in_task());
> // <<< warning here
> [...]
>
>
>
> --
> Thanks,
> JeffXie
Christophe JAILLET March 17, 2024, 2:51 p.m. UTC | #3
Le 11/03/2024 à 17:46, Pasha Tatashin a écrit :
> In preparation for the dynamic stacks, separate out the
> __vmalloc_node_range and vfree calls from the vmap based stack
> allocations. The dynamic stacks will use their own variants of these
> functions.
> 
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> ---
>   kernel/fork.c | 53 ++++++++++++++++++++++++++++++---------------------
>   1 file changed, 31 insertions(+), 22 deletions(-)
> 
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 3004e6ce6c65..bbae5f705773 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -204,6 +204,29 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
>   	return false;
>   }
>   
> +static inline struct vm_struct *alloc_vmap_stack(int node)
> +{
> +	void *stack;
> +
> +	/*
> +	 * Allocated stacks are cached and later reused by new threads,
> +	 * so memcg accounting is performed manually on assigning/releasing
> +	 * stacks to tasks. Drop __GFP_ACCOUNT.
> +	 */
> +	stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
> +				     VMALLOC_START, VMALLOC_END,
> +				     THREADINFO_GFP & ~__GFP_ACCOUNT,
> +				     PAGE_KERNEL,
> +				     0, node, __builtin_return_address(0));
> +
> +	return (stack) ? find_vm_area(stack) : NULL;

Nit: superfluous ()

> +}

...
Pasha Tatashin March 17, 2024, 3:15 p.m. UTC | #4
On Sun, Mar 17, 2024 at 10:52 AM Christophe JAILLET
<christophe.jaillet@wanadoo.fr> wrote:
>
> Le 11/03/2024 à 17:46, Pasha Tatashin a écrit :
> > In preparation for the dynamic stacks, separate out the
> > __vmalloc_node_range and vfree calls from the vmap based stack
> > allocations. The dynamic stacks will use their own variants of these
> > functions.
> >
> > Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> > ---
> >   kernel/fork.c | 53 ++++++++++++++++++++++++++++++---------------------
> >   1 file changed, 31 insertions(+), 22 deletions(-)
> >
> > diff --git a/kernel/fork.c b/kernel/fork.c
> > index 3004e6ce6c65..bbae5f705773 100644
> > --- a/kernel/fork.c
> > +++ b/kernel/fork.c
> > @@ -204,6 +204,29 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
> >       return false;
> >   }
> >
> > +static inline struct vm_struct *alloc_vmap_stack(int node)
> > +{
> > +     void *stack;
> > +
> > +     /*
> > +      * Allocated stacks are cached and later reused by new threads,
> > +      * so memcg accounting is performed manually on assigning/releasing
> > +      * stacks to tasks. Drop __GFP_ACCOUNT.
> > +      */
> > +     stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
> > +                                  VMALLOC_START, VMALLOC_END,
> > +                                  THREADINFO_GFP & ~__GFP_ACCOUNT,
> > +                                  PAGE_KERNEL,
> > +                                  0, node, __builtin_return_address(0));
> > +
> > +     return (stack) ? find_vm_area(stack) : NULL;
>
> Nit: superfluous ()

Thank you.

>
> > +}
>
> ...
>
diff mbox series

Patch

diff --git a/kernel/fork.c b/kernel/fork.c
index 3004e6ce6c65..bbae5f705773 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -204,6 +204,29 @@  static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
 	return false;
 }
 
+static inline struct vm_struct *alloc_vmap_stack(int node)
+{
+	void *stack;
+
+	/*
+	 * Allocated stacks are cached and later reused by new threads,
+	 * so memcg accounting is performed manually on assigning/releasing
+	 * stacks to tasks. Drop __GFP_ACCOUNT.
+	 */
+	stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
+				     VMALLOC_START, VMALLOC_END,
+				     THREADINFO_GFP & ~__GFP_ACCOUNT,
+				     PAGE_KERNEL,
+				     0, node, __builtin_return_address(0));
+
+	return (stack) ? find_vm_area(stack) : NULL;
+}
+
+static inline void free_vmap_stack(struct vm_struct *vm_area)
+{
+	vfree(vm_area->addr);
+}
+
 static void thread_stack_free_rcu(struct rcu_head *rh)
 {
 	struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
@@ -212,7 +235,7 @@  static void thread_stack_free_rcu(struct rcu_head *rh)
 	if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
 		return;
 
-	vfree(vm_area->addr);
+	free_vmap_stack(vm_area);
 }
 
 static void thread_stack_delayed_free(struct task_struct *tsk)
@@ -235,7 +258,7 @@  static int free_vm_stack_cache(unsigned int cpu)
 		if (!vm_area)
 			continue;
 
-		vfree(vm_area->addr);
+		free_vmap_stack(vm_area);
 		cached_vm_stacks[i] = NULL;
 	}
 
@@ -265,7 +288,6 @@  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
 {
 	struct vm_struct *vm_area;
 	int i, j, nr_pages;
-	void *stack;
 
 	for (i = 0; i < NR_CACHED_STACKS; i++) {
 		vm_area = this_cpu_xchg(cached_stacks[i], NULL);
@@ -273,14 +295,13 @@  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
 			continue;
 
 		if (memcg_charge_kernel_stack(vm_area)) {
-			vfree(vm_area->addr);
+			free_vmap_stack(vm_area);
 			return -ENOMEM;
 		}
 
 		/* Reset stack metadata. */
 		kasan_unpoison_range(vm_area->addr, THREAD_SIZE);
-
-		stack = kasan_reset_tag(vm_area->addr);
+		tsk->stack = kasan_reset_tag(vm_area->addr);
 
 		/* Clear stale pointers from reused stack. */
 		nr_pages = vm_area->nr_pages;
@@ -288,26 +309,15 @@  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
 			clear_page(page_address(vm_area->pages[j]));
 
 		tsk->stack_vm_area = vm_area;
-		tsk->stack = stack;
 		return 0;
 	}
 
-	/*
-	 * Allocated stacks are cached and later reused by new threads,
-	 * so memcg accounting is performed manually on assigning/releasing
-	 * stacks to tasks. Drop __GFP_ACCOUNT.
-	 */
-	stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
-				     VMALLOC_START, VMALLOC_END,
-				     THREADINFO_GFP & ~__GFP_ACCOUNT,
-				     PAGE_KERNEL,
-				     0, node, __builtin_return_address(0));
-	if (!stack)
+	vm_area = alloc_vmap_stack(node);
+	if (!vm_area)
 		return -ENOMEM;
 
-	vm_area = find_vm_area(stack);
 	if (memcg_charge_kernel_stack(vm_area)) {
-		vfree(stack);
+		free_vmap_stack(vm_area);
 		return -ENOMEM;
 	}
 	/*
@@ -316,8 +326,7 @@  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
 	 * so cache the vm_struct.
 	 */
 	tsk->stack_vm_area = vm_area;
-	stack = kasan_reset_tag(stack);
-	tsk->stack = stack;
+	tsk->stack = kasan_reset_tag(vm_area->addr);
 	return 0;
 }