diff mbox series

[mm,v3,26/38] kasan, vmalloc: don't unpoison VM_ALLOC pages before mapping

Message ID 1a2b5e3047faf05e5c11a9080c3f97a9b9b4c383.1639432170.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan, vmalloc, arm64: add vmalloc tagging support for SW/HW_TAGS | expand

Commit Message

andrey.konovalov@linux.dev Dec. 13, 2021, 9:54 p.m. UTC
From: Andrey Konovalov <andreyknvl@google.com>

Make KASAN unpoison vmalloc mappings after that have been mapped in
when it's possible: for vmalloc() (indentified via VM_ALLOC) and
vm_map_ram().

The reasons for this are:

- For vmalloc() and vm_map_ram(): pages don't get unpoisoned in case
  mapping them fails.
- For vmalloc(): HW_TAGS KASAN needs pages to be mapped to set tags via
  kasan_unpoison_vmalloc().

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

---

Changes v2->v3:
- Update patch description.
---
 mm/vmalloc.c | 26 ++++++++++++++++++++++----
 1 file changed, 22 insertions(+), 4 deletions(-)

Comments

Alexander Potapenko Dec. 16, 2021, 7:07 p.m. UTC | #1
On Mon, Dec 13, 2021 at 10:54 PM <andrey.konovalov@linux.dev> wrote:
>
> From: Andrey Konovalov <andreyknvl@google.com>
>
> Make KASAN unpoison vmalloc mappings after that have been mapped in
> when it's possible: for vmalloc() (indentified via VM_ALLOC) and
> vm_map_ram().

The subject says "don't unpoison VM_ALLOC pages", whereas the
description says "unpoison VM_ALLOC pages", or am I missing something?

> The reasons for this are:
>
> - For vmalloc() and vm_map_ram(): pages don't get unpoisoned in case
>   mapping them fails.
> - For vmalloc(): HW_TAGS KASAN needs pages to be mapped to set tags via
>   kasan_unpoison_vmalloc().
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
>
> ---
>
> Changes v2->v3:
> - Update patch description.
> ---
>  mm/vmalloc.c | 26 ++++++++++++++++++++++----
>  1 file changed, 22 insertions(+), 4 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 58bd2f7f86d7..9a6862e274df 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2208,14 +2208,15 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
>                 mem = (void *)addr;
>         }
>
> -       mem = kasan_unpoison_vmalloc(mem, size);
> -
>         if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
>                                 pages, PAGE_SHIFT) < 0) {
>                 vm_unmap_ram(mem, count);
>                 return NULL;
>         }
>
> +       /* Mark the pages as accessible after they were mapped in. */
> +       mem = kasan_unpoison_vmalloc(mem, size);
> +
>         return mem;
>  }
>  EXPORT_SYMBOL(vm_map_ram);
> @@ -2443,7 +2444,14 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
>
>         setup_vmalloc_vm(area, va, flags, caller);
>
> -       area->addr = kasan_unpoison_vmalloc(area->addr, requested_size);
> +       /*
> +        * For VM_ALLOC mappings, __vmalloc_node_range() mark the pages as
> +        * accessible after they are mapped in.
> +        * Otherwise, as the pages can be mapped outside of vmalloc code,
> +        * mark them now as a best-effort approach.
> +        */
> +       if (!(flags & VM_ALLOC))
> +               area->addr = kasan_unpoison_vmalloc(area->addr, requested_size);
>
>         return area;
>  }
> @@ -3104,6 +3112,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>         if (!addr)
>                 goto fail;
>
> +       /*
> +        * Mark the pages for VM_ALLOC mappings as accessible after they were
> +        * mapped in.
> +        */
> +       addr = kasan_unpoison_vmalloc(addr, real_size);
> +
>         /*
>          * In this function, newly allocated vm_struct has VM_UNINITIALIZED
>          * flag. It means that vm_struct is not fully initialized.
> @@ -3799,7 +3813,11 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>         }
>         spin_unlock(&vmap_area_lock);
>
> -       /* mark allocated areas as accessible */
> +       /*
> +        * Mark allocated areas as accessible.
> +        * As the pages are mapped outside of vmalloc code,
> +        * mark them now as a best-effort approach.
> +        */
>         for (area = 0; area < nr_vms; area++)
>                 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
>                                                          vms[area]->size);
> --
> 2.25.1
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/1a2b5e3047faf05e5c11a9080c3f97a9b9b4c383.1639432170.git.andreyknvl%40google.com.
Andrey Konovalov Dec. 20, 2021, 9:35 p.m. UTC | #2
On Thu, Dec 16, 2021 at 8:08 PM Alexander Potapenko <glider@google.com> wrote:
>
> On Mon, Dec 13, 2021 at 10:54 PM <andrey.konovalov@linux.dev> wrote:
> >
> > From: Andrey Konovalov <andreyknvl@google.com>
> >
> > Make KASAN unpoison vmalloc mappings after that have been mapped in
> > when it's possible: for vmalloc() (indentified via VM_ALLOC) and
> > vm_map_ram().
>
> The subject says "don't unpoison VM_ALLOC pages", whereas the
> description says "unpoison VM_ALLOC pages", or am I missing something?

Yes :) The title says "don't unpoison *before*", and the body says
"unpoison *after*".

I'll reword the changelog in v4 to make it less confusing.

Thanks!
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 58bd2f7f86d7..9a6862e274df 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2208,14 +2208,15 @@  void *vm_map_ram(struct page **pages, unsigned int count, int node)
 		mem = (void *)addr;
 	}
 
-	mem = kasan_unpoison_vmalloc(mem, size);
-
 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
 				pages, PAGE_SHIFT) < 0) {
 		vm_unmap_ram(mem, count);
 		return NULL;
 	}
 
+	/* Mark the pages as accessible after they were mapped in. */
+	mem = kasan_unpoison_vmalloc(mem, size);
+
 	return mem;
 }
 EXPORT_SYMBOL(vm_map_ram);
@@ -2443,7 +2444,14 @@  static struct vm_struct *__get_vm_area_node(unsigned long size,
 
 	setup_vmalloc_vm(area, va, flags, caller);
 
-	area->addr = kasan_unpoison_vmalloc(area->addr, requested_size);
+	/*
+	 * For VM_ALLOC mappings, __vmalloc_node_range() mark the pages as
+	 * accessible after they are mapped in.
+	 * Otherwise, as the pages can be mapped outside of vmalloc code,
+	 * mark them now as a best-effort approach.
+	 */
+	if (!(flags & VM_ALLOC))
+		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size);
 
 	return area;
 }
@@ -3104,6 +3112,12 @@  void *__vmalloc_node_range(unsigned long size, unsigned long align,
 	if (!addr)
 		goto fail;
 
+	/*
+	 * Mark the pages for VM_ALLOC mappings as accessible after they were
+	 * mapped in.
+	 */
+	addr = kasan_unpoison_vmalloc(addr, real_size);
+
 	/*
 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
 	 * flag. It means that vm_struct is not fully initialized.
@@ -3799,7 +3813,11 @@  struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 	}
 	spin_unlock(&vmap_area_lock);
 
-	/* mark allocated areas as accessible */
+	/*
+	 * Mark allocated areas as accessible.
+	 * As the pages are mapped outside of vmalloc code,
+	 * mark them now as a best-effort approach.
+	 */
 	for (area = 0; area < nr_vms; area++)
 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
 							 vms[area]->size);