diff mbox series

[2/3] mm: kasan: Reset the tag on pages intended for user

Message ID 20220517180945.756303-3-catalin.marinas@arm.com (mailing list archive)
State New, archived
Headers show
Series kasan: Fix ordering between MTE tag colouring and page->flags | expand

Commit Message

Catalin Marinas May 17, 2022, 6:09 p.m. UTC
On allocation kasan colours a page with a random tag and stores such tag
in page->flags so that a subsequent page_to_virt() reconstructs the
correct tagged pointer. However, when such page is mapped in user-space
with PROT_MTE, the kernel's initial tag is overridden. Ensure that such
pages have the tag reset (match-all) at allocation time since any late
clearing of the tag is racy with other page_to_virt() dereferencing.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
---
 include/linux/gfp.h | 10 +++++++---
 mm/page_alloc.c     |  9 ++++++---
 2 files changed, 13 insertions(+), 6 deletions(-)

Comments

Andrey Konovalov May 21, 2022, 10:15 p.m. UTC | #1
On Tue, May 17, 2022 at 8:09 PM Catalin Marinas <catalin.marinas@arm.com> wrote:
>
> On allocation kasan colours a page with a random tag and stores such tag
> in page->flags so that a subsequent page_to_virt() reconstructs the
> correct tagged pointer. However, when such page is mapped in user-space
> with PROT_MTE, the kernel's initial tag is overridden. Ensure that such
> pages have the tag reset (match-all) at allocation time since any late
> clearing of the tag is racy with other page_to_virt() dereferencing.
>
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> ---
>  include/linux/gfp.h | 10 +++++++---
>  mm/page_alloc.c     |  9 ++++++---
>  2 files changed, 13 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 3e3d36fc2109..88b1d4fe4dcb 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -58,13 +58,15 @@ struct vm_area_struct;
>  #define ___GFP_SKIP_ZERO               0x1000000u
>  #define ___GFP_SKIP_KASAN_UNPOISON     0x2000000u
>  #define ___GFP_SKIP_KASAN_POISON       0x4000000u
> +#define ___GFP_PAGE_KASAN_TAG_RESET    0x8000000u

Let's name it ___GFP_RESET_KASAN_PAGE_TAG to be consistent with the rest.

Also, please add a comment above that explains the new flag's purpose.

>  #else
>  #define ___GFP_SKIP_ZERO               0
>  #define ___GFP_SKIP_KASAN_UNPOISON     0
>  #define ___GFP_SKIP_KASAN_POISON       0
> +#define ___GFP_PAGE_KASAN_TAG_RESET    0
>  #endif
>  #ifdef CONFIG_LOCKDEP
> -#define ___GFP_NOLOCKDEP       0x8000000u
> +#define ___GFP_NOLOCKDEP       0x10000000u
>  #else
>  #define ___GFP_NOLOCKDEP       0
>  #endif
> @@ -259,12 +261,13 @@ struct vm_area_struct;
>  #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
>  #define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
>  #define __GFP_SKIP_KASAN_POISON   ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
> +#define __GFP_PAGE_KASAN_TAG_RESET ((__force gfp_t)___GFP_PAGE_KASAN_TAG_RESET)
>
>  /* Disable lockdep for GFP context tracking */
>  #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
>
>  /* Room for N __GFP_FOO bits */
> -#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
> +#define __GFP_BITS_SHIFT (28 + IS_ENABLED(CONFIG_LOCKDEP))
>  #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
>
>  /**
> @@ -343,7 +346,8 @@ struct vm_area_struct;
>  #define GFP_NOWAIT     (__GFP_KSWAPD_RECLAIM)
>  #define GFP_NOIO       (__GFP_RECLAIM)
>  #define GFP_NOFS       (__GFP_RECLAIM | __GFP_IO)
> -#define GFP_USER       (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
> +#define GFP_USER       (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
> +                        __GFP_PAGE_KASAN_TAG_RESET)

I guess we can also add both ___GFP_SKIP_KASAN_UNPOISON and
___GFP_SKIP_KASAN_POISON here then? Since we don't care about tags.

Or maybe we can add all three flags to GFP_HIGHUSER_MOVABLE instead?

>  #define GFP_DMA                __GFP_DMA
>  #define GFP_DMA32      __GFP_DMA32
>  #define GFP_HIGHUSER   (GFP_USER | __GFP_HIGHMEM)

In case we add __GFP_SKIP_KASAN_POISON to GFP_USER, we should drop it
from GFP_HIGHUSER_MOVABLE.

> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 0e42038382c1..f9018a84f4e3 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2382,6 +2382,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
>         bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
>                         !should_skip_init(gfp_flags);
>         bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
> +       int i;
>
>         set_page_private(page, 0);
>         set_page_refcounted(page);
> @@ -2407,8 +2408,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
>          * should be initialized as well).
>          */
>         if (init_tags) {
> -               int i;
> -
>                 /* Initialize both memory and tags. */
>                 for (i = 0; i != 1 << order; ++i)
>                         tag_clear_highpage(page + i);
> @@ -2430,7 +2429,11 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
>         /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
>         if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
>                 SetPageSkipKASanPoison(page);
> -
> +       /* if match-all page address required, reset the tag */

Please match the style of other comments: capitalize the first letter
and add a dot at the end.

I would also simply say: "Reset page tags if required."

> +       if (gfp_flags & __GFP_PAGE_KASAN_TAG_RESET) {
> +               for (i = 0; i != 1 << order; ++i)
> +                       page_kasan_tag_reset(page + i);
> +       };

I would add an empty line here.



>         set_page_owner(page, order, gfp_flags);
>         page_table_check_alloc(page, order);
>  }
diff mbox series

Patch

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3e3d36fc2109..88b1d4fe4dcb 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -58,13 +58,15 @@  struct vm_area_struct;
 #define ___GFP_SKIP_ZERO		0x1000000u
 #define ___GFP_SKIP_KASAN_UNPOISON	0x2000000u
 #define ___GFP_SKIP_KASAN_POISON	0x4000000u
+#define ___GFP_PAGE_KASAN_TAG_RESET	0x8000000u
 #else
 #define ___GFP_SKIP_ZERO		0
 #define ___GFP_SKIP_KASAN_UNPOISON	0
 #define ___GFP_SKIP_KASAN_POISON	0
+#define ___GFP_PAGE_KASAN_TAG_RESET	0
 #endif
 #ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP	0x8000000u
+#define ___GFP_NOLOCKDEP	0x10000000u
 #else
 #define ___GFP_NOLOCKDEP	0
 #endif
@@ -259,12 +261,13 @@  struct vm_area_struct;
 #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
 #define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
 #define __GFP_SKIP_KASAN_POISON   ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
+#define __GFP_PAGE_KASAN_TAG_RESET ((__force gfp_t)___GFP_PAGE_KASAN_TAG_RESET)
 
 /* Disable lockdep for GFP context tracking */
 #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (28 + IS_ENABLED(CONFIG_LOCKDEP))
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /**
@@ -343,7 +346,8 @@  struct vm_area_struct;
 #define GFP_NOWAIT	(__GFP_KSWAPD_RECLAIM)
 #define GFP_NOIO	(__GFP_RECLAIM)
 #define GFP_NOFS	(__GFP_RECLAIM | __GFP_IO)
-#define GFP_USER	(__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_USER	(__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
+			 __GFP_PAGE_KASAN_TAG_RESET)
 #define GFP_DMA		__GFP_DMA
 #define GFP_DMA32	__GFP_DMA32
 #define GFP_HIGHUSER	(GFP_USER | __GFP_HIGHMEM)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0e42038382c1..f9018a84f4e3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2382,6 +2382,7 @@  inline void post_alloc_hook(struct page *page, unsigned int order,
 	bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
 			!should_skip_init(gfp_flags);
 	bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
+	int i;
 
 	set_page_private(page, 0);
 	set_page_refcounted(page);
@@ -2407,8 +2408,6 @@  inline void post_alloc_hook(struct page *page, unsigned int order,
 	 * should be initialized as well).
 	 */
 	if (init_tags) {
-		int i;
-
 		/* Initialize both memory and tags. */
 		for (i = 0; i != 1 << order; ++i)
 			tag_clear_highpage(page + i);
@@ -2430,7 +2429,11 @@  inline void post_alloc_hook(struct page *page, unsigned int order,
 	/* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
 	if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
 		SetPageSkipKASanPoison(page);
-
+	/* if match-all page address required, reset the tag */
+	if (gfp_flags & __GFP_PAGE_KASAN_TAG_RESET) {
+		for (i = 0; i != 1 << order; ++i)
+			page_kasan_tag_reset(page + i);
+	};
 	set_page_owner(page, order, gfp_flags);
 	page_table_check_alloc(page, order);
 }