diff mbox series

[v6,07/18] khwasan: add tag related helper functions

Message ID 6cd298a90d02068969713f2fd440eae21227467b.1535462971.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series khwasan: kernel hardware assisted address sanitizer | expand

Commit Message

Andrey Konovalov Aug. 29, 2018, 11:35 a.m. UTC
This commit adds a few helper functions, that are meant to be used to
work with tags embedded in the top byte of kernel pointers: to set, to
get or to reset (set to 0xff) the top byte.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 arch/arm64/mm/kasan_init.c |  2 ++
 include/linux/kasan.h      | 29 +++++++++++++++++
 mm/kasan/kasan.h           | 55 ++++++++++++++++++++++++++++++++
 mm/kasan/khwasan.c         | 65 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 151 insertions(+)

Comments

Dmitry Vyukov Sept. 12, 2018, 4:21 p.m. UTC | #1
On Wed, Aug 29, 2018 at 1:35 PM, Andrey Konovalov <andreyknvl@google.com> wrote:
> This commit adds a few helper functions, that are meant to be used to
> work with tags embedded in the top byte of kernel pointers: to set, to
> get or to reset (set to 0xff) the top byte.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---
>  arch/arm64/mm/kasan_init.c |  2 ++
>  include/linux/kasan.h      | 29 +++++++++++++++++
>  mm/kasan/kasan.h           | 55 ++++++++++++++++++++++++++++++++
>  mm/kasan/khwasan.c         | 65 ++++++++++++++++++++++++++++++++++++++
>  4 files changed, 151 insertions(+)
>
> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> index 7a31e8ccbad2..e7f37c0b7e14 100644
> --- a/arch/arm64/mm/kasan_init.c
> +++ b/arch/arm64/mm/kasan_init.c
> @@ -250,6 +250,8 @@ void __init kasan_init(void)
>         memset(kasan_zero_page, KASAN_SHADOW_INIT, PAGE_SIZE);
>         cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
>
> +       khwasan_init();
> +
>         /* At this point kasan is fully initialized. Enable error messages */
>         init_task.kasan_depth = 0;
>         pr_info("KernelAddressSanitizer initialized\n");
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 1c31bb089154..1f852244e739 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -166,6 +166,35 @@ static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
>
>  #define KASAN_SHADOW_INIT 0xFF
>
> +void khwasan_init(void);
> +
> +void *khwasan_reset_tag(const void *addr);
> +
> +void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr);
> +void *khwasan_preset_slab_tag(struct kmem_cache *cache, unsigned int idx,
> +                                       const void *addr);
> +
> +#else /* CONFIG_KASAN_HW */
> +
> +static inline void khwasan_init(void) { }
> +
> +static inline void *khwasan_reset_tag(const void *addr)
> +{
> +       return (void *)addr;
> +}
> +
> +static inline void *khwasan_preset_slub_tag(struct kmem_cache *cache,
> +                                               const void *addr)
> +{
> +       return (void *)addr;
> +}
> +
> +static inline void *khwasan_preset_slab_tag(struct kmem_cache *cache,
> +                                       unsigned int idx, const void *addr)
> +{
> +       return (void *)addr;
> +}
> +
>  #endif /* CONFIG_KASAN_HW */
>
>  #endif /* LINUX_KASAN_H */
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 19b950eaccff..a7cc27d96608 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -8,6 +8,10 @@
>  #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
>  #define KASAN_SHADOW_MASK       (KASAN_SHADOW_SCALE_SIZE - 1)
>
> +#define KHWASAN_TAG_KERNEL     0xFF /* native kernel pointers tag */
> +#define KHWASAN_TAG_INVALID    0xFE /* inaccessible memory tag */
> +#define KHWASAN_TAG_MAX                0xFD /* maximum value for random tags */
> +
>  #define KASAN_FREE_PAGE         0xFF  /* page was freed */
>  #define KASAN_PAGE_REDZONE      0xFE  /* redzone for kmalloc_large allocations */
>  #define KASAN_KMALLOC_REDZONE   0xFC  /* redzone inside slub object */
> @@ -126,6 +130,57 @@ static inline void quarantine_reduce(void) { }
>  static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
>  #endif
>
> +#ifdef CONFIG_KASAN_HW
> +
> +#define KHWASAN_TAG_SHIFT 56
> +#define KHWASAN_TAG_MASK (0xFFUL << KHWASAN_TAG_SHIFT)
> +
> +u8 random_tag(void);
> +
> +static inline void *set_tag(const void *addr, u8 tag)
> +{
> +       u64 a = (u64)addr;
> +
> +       a &= ~KHWASAN_TAG_MASK;
> +       a |= ((u64)tag << KHWASAN_TAG_SHIFT);
> +
> +       return (void *)a;
> +}
> +
> +static inline u8 get_tag(const void *addr)
> +{
> +       return (u8)((u64)addr >> KHWASAN_TAG_SHIFT);
> +}
> +
> +static inline void *reset_tag(const void *addr)
> +{
> +       return set_tag(addr, KHWASAN_TAG_KERNEL);
> +}
> +
> +#else /* CONFIG_KASAN_HW */
> +
> +static inline u8 random_tag(void)
> +{
> +       return 0;
> +}
> +
> +static inline void *set_tag(const void *addr, u8 tag)
> +{
> +       return (void *)addr;
> +}
> +
> +static inline u8 get_tag(const void *addr)
> +{
> +       return 0;
> +}
> +
> +static inline void *reset_tag(const void *addr)
> +{
> +       return (void *)addr;
> +}
> +
> +#endif /* CONFIG_KASAN_HW */
> +
>  /*
>   * Exported functions for interfaces called from assembly or from generated
>   * code. Declarations here to avoid warning about missing declarations.
> diff --git a/mm/kasan/khwasan.c b/mm/kasan/khwasan.c
> index e2c3a7f7fd1f..9d91bf3c8246 100644
> --- a/mm/kasan/khwasan.c
> +++ b/mm/kasan/khwasan.c
> @@ -38,6 +38,71 @@
>  #include "kasan.h"
>  #include "../slab.h"
>
> +static DEFINE_PER_CPU(u32, prng_state);
> +
> +void khwasan_init(void)
> +{
> +       int cpu;
> +
> +       for_each_possible_cpu(cpu)
> +               per_cpu(prng_state, cpu) = get_random_u32();
> +}
> +
> +/*
> + * If a preemption happens between this_cpu_read and this_cpu_write, the only
> + * side effect is that we'll give a few allocated in different contexts objects
> + * the same tag. Since KHWASAN is meant to be used a probabilistic bug-detection
> + * debug feature, this doesn’t have significant negative impact.
> + *
> + * Ideally the tags use strong randomness to prevent any attempts to predict
> + * them during explicit exploit attempts. But strong randomness is expensive,
> + * and we did an intentional trade-off to use a PRNG. This non-atomic RMW
> + * sequence has in fact positive effect, since interrupts that randomly skew
> + * PRNG at unpredictable points do only good.
> + */
> +u8 random_tag(void)
> +{
> +       u32 state = this_cpu_read(prng_state);
> +
> +       state = 1664525 * state + 1013904223;
> +       this_cpu_write(prng_state, state);
> +
> +       return (u8)(state % (KHWASAN_TAG_MAX + 1));
> +}
> +
> +void *khwasan_reset_tag(const void *addr)
> +{
> +       return reset_tag(addr);
> +}
> +
> +void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr)

Can't we do this in the existing kasan_init_slab_obj() hook? It looks
like it should do exactly this -- allow any one-time initialization
for objects. We could extend it to accept index and return a new
pointer.
If that does not work for some reason, I would try to at least unify
the hook for slab/slub, e.g. pass idx=-1 from slub and then use
random_tag().
It also seems that we do preset tag for slab multiple times (from
slab_get_obj()). Using kasan_init_slab_obj() should resolve this too
(hopefully we don't call it multiple times).


> +{
> +       /*
> +        * Since it's desirable to only call object contructors ones during
> +        * slab allocation, we preassign tags to all such objects.
> +        * Also preassign tags for SLAB_TYPESAFE_BY_RCU slabs to avoid
> +        * use-after-free reports.
> +        */
> +       if (cache->ctor || cache->flags & SLAB_TYPESAFE_BY_RCU)
> +               return set_tag(addr, random_tag());
> +       return (void *)addr;
> +}
> +
> +void *khwasan_preset_slab_tag(struct kmem_cache *cache, unsigned int idx,
> +                               const void *addr)
> +{
> +       /*
> +        * See comment in khwasan_preset_slub_tag.
> +        * For SLAB allocator we can't preassign tags randomly since the
> +        * freelist is stored as an array of indexes instead of a linked
> +        * list. Assign tags based on objects indexes, so that objects that
> +        * are next to each other get different tags.
> +        */
> +       if (cache->ctor || cache->flags & SLAB_TYPESAFE_BY_RCU)
> +               return set_tag(addr, (u8)idx);
> +       return (void *)addr;
> +}
> +
>  void check_memory_region(unsigned long addr, size_t size, bool write,
>                                 unsigned long ret_ip)
>  {
> --
> 2.19.0.rc0.228.g281dcd1b4d0-goog
>
Andrey Konovalov Sept. 17, 2018, 6:59 p.m. UTC | #2
On Wed, Sep 12, 2018 at 6:21 PM, Dmitry Vyukov <dvyukov@google.com> wrote:
> On Wed, Aug 29, 2018 at 1:35 PM, Andrey Konovalov <andreyknvl@google.com> wrote:

>> +void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr)
>
> Can't we do this in the existing kasan_init_slab_obj() hook? It looks
> like it should do exactly this -- allow any one-time initialization
> for objects. We could extend it to accept index and return a new
> pointer.
> If that does not work for some reason, I would try to at least unify
> the hook for slab/slub, e.g. pass idx=-1 from slub and then use
> random_tag().
> It also seems that we do preset tag for slab multiple times (from
> slab_get_obj()). Using kasan_init_slab_obj() should resolve this too
> (hopefully we don't call it multiple times).

The issue is that SLAB stores freelist as an array of indexes instead
of using an actual linked list like SLUB. So you can't store the tag
in the pointer while the object is in the freelist, since there's no
pointer. And, technically, we don't preset tags for SLAB, we just use
the id as the tag every time a pointer is used, so perhaps we should
rename the callback. As to unifying the callbacks, sure, we can do
that.
Dmitry Vyukov Sept. 18, 2018, 3:45 p.m. UTC | #3
On Mon, Sep 17, 2018 at 8:59 PM, Andrey Konovalov <andreyknvl@google.com> wrote:
> On Wed, Sep 12, 2018 at 6:21 PM, Dmitry Vyukov <dvyukov@google.com> wrote:
>> On Wed, Aug 29, 2018 at 1:35 PM, Andrey Konovalov <andreyknvl@google.com> wrote:
>
>>> +void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr)
>>
>> Can't we do this in the existing kasan_init_slab_obj() hook? It looks
>> like it should do exactly this -- allow any one-time initialization
>> for objects. We could extend it to accept index and return a new
>> pointer.
>> If that does not work for some reason, I would try to at least unify
>> the hook for slab/slub, e.g. pass idx=-1 from slub and then use
>> random_tag().
>> It also seems that we do preset tag for slab multiple times (from
>> slab_get_obj()). Using kasan_init_slab_obj() should resolve this too
>> (hopefully we don't call it multiple times).
>
> The issue is that SLAB stores freelist as an array of indexes instead
> of using an actual linked list like SLUB. So you can't store the tag
> in the pointer while the object is in the freelist, since there's no
> pointer. And, technically, we don't preset tags for SLAB, we just use
> the id as the tag every time a pointer is used, so perhaps we should
> rename the callback. As to unifying the callbacks, sure, we can do
> that.

As per offline discussion: potentially we can use
kasan_init_slab_obj() if we add tag in kmalloc hook by using
obj_to_idx().
diff mbox series

Patch

diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 7a31e8ccbad2..e7f37c0b7e14 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -250,6 +250,8 @@  void __init kasan_init(void)
 	memset(kasan_zero_page, KASAN_SHADOW_INIT, PAGE_SIZE);
 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
+	khwasan_init();
+
 	/* At this point kasan is fully initialized. Enable error messages */
 	init_task.kasan_depth = 0;
 	pr_info("KernelAddressSanitizer initialized\n");
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 1c31bb089154..1f852244e739 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -166,6 +166,35 @@  static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
 
 #define KASAN_SHADOW_INIT 0xFF
 
+void khwasan_init(void);
+
+void *khwasan_reset_tag(const void *addr);
+
+void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr);
+void *khwasan_preset_slab_tag(struct kmem_cache *cache, unsigned int idx,
+					const void *addr);
+
+#else /* CONFIG_KASAN_HW */
+
+static inline void khwasan_init(void) { }
+
+static inline void *khwasan_reset_tag(const void *addr)
+{
+	return (void *)addr;
+}
+
+static inline void *khwasan_preset_slub_tag(struct kmem_cache *cache,
+						const void *addr)
+{
+	return (void *)addr;
+}
+
+static inline void *khwasan_preset_slab_tag(struct kmem_cache *cache,
+					unsigned int idx, const void *addr)
+{
+	return (void *)addr;
+}
+
 #endif /* CONFIG_KASAN_HW */
 
 #endif /* LINUX_KASAN_H */
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 19b950eaccff..a7cc27d96608 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -8,6 +8,10 @@ 
 #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
 #define KASAN_SHADOW_MASK       (KASAN_SHADOW_SCALE_SIZE - 1)
 
+#define KHWASAN_TAG_KERNEL	0xFF /* native kernel pointers tag */
+#define KHWASAN_TAG_INVALID	0xFE /* inaccessible memory tag */
+#define KHWASAN_TAG_MAX		0xFD /* maximum value for random tags */
+
 #define KASAN_FREE_PAGE         0xFF  /* page was freed */
 #define KASAN_PAGE_REDZONE      0xFE  /* redzone for kmalloc_large allocations */
 #define KASAN_KMALLOC_REDZONE   0xFC  /* redzone inside slub object */
@@ -126,6 +130,57 @@  static inline void quarantine_reduce(void) { }
 static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
 #endif
 
+#ifdef CONFIG_KASAN_HW
+
+#define KHWASAN_TAG_SHIFT 56
+#define KHWASAN_TAG_MASK (0xFFUL << KHWASAN_TAG_SHIFT)
+
+u8 random_tag(void);
+
+static inline void *set_tag(const void *addr, u8 tag)
+{
+	u64 a = (u64)addr;
+
+	a &= ~KHWASAN_TAG_MASK;
+	a |= ((u64)tag << KHWASAN_TAG_SHIFT);
+
+	return (void *)a;
+}
+
+static inline u8 get_tag(const void *addr)
+{
+	return (u8)((u64)addr >> KHWASAN_TAG_SHIFT);
+}
+
+static inline void *reset_tag(const void *addr)
+{
+	return set_tag(addr, KHWASAN_TAG_KERNEL);
+}
+
+#else /* CONFIG_KASAN_HW */
+
+static inline u8 random_tag(void)
+{
+	return 0;
+}
+
+static inline void *set_tag(const void *addr, u8 tag)
+{
+	return (void *)addr;
+}
+
+static inline u8 get_tag(const void *addr)
+{
+	return 0;
+}
+
+static inline void *reset_tag(const void *addr)
+{
+	return (void *)addr;
+}
+
+#endif /* CONFIG_KASAN_HW */
+
 /*
  * Exported functions for interfaces called from assembly or from generated
  * code. Declarations here to avoid warning about missing declarations.
diff --git a/mm/kasan/khwasan.c b/mm/kasan/khwasan.c
index e2c3a7f7fd1f..9d91bf3c8246 100644
--- a/mm/kasan/khwasan.c
+++ b/mm/kasan/khwasan.c
@@ -38,6 +38,71 @@ 
 #include "kasan.h"
 #include "../slab.h"
 
+static DEFINE_PER_CPU(u32, prng_state);
+
+void khwasan_init(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		per_cpu(prng_state, cpu) = get_random_u32();
+}
+
+/*
+ * If a preemption happens between this_cpu_read and this_cpu_write, the only
+ * side effect is that we'll give a few allocated in different contexts objects
+ * the same tag. Since KHWASAN is meant to be used a probabilistic bug-detection
+ * debug feature, this doesn’t have significant negative impact.
+ *
+ * Ideally the tags use strong randomness to prevent any attempts to predict
+ * them during explicit exploit attempts. But strong randomness is expensive,
+ * and we did an intentional trade-off to use a PRNG. This non-atomic RMW
+ * sequence has in fact positive effect, since interrupts that randomly skew
+ * PRNG at unpredictable points do only good.
+ */
+u8 random_tag(void)
+{
+	u32 state = this_cpu_read(prng_state);
+
+	state = 1664525 * state + 1013904223;
+	this_cpu_write(prng_state, state);
+
+	return (u8)(state % (KHWASAN_TAG_MAX + 1));
+}
+
+void *khwasan_reset_tag(const void *addr)
+{
+	return reset_tag(addr);
+}
+
+void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr)
+{
+	/*
+	 * Since it's desirable to only call object contructors ones during
+	 * slab allocation, we preassign tags to all such objects.
+	 * Also preassign tags for SLAB_TYPESAFE_BY_RCU slabs to avoid
+	 * use-after-free reports.
+	 */
+	if (cache->ctor || cache->flags & SLAB_TYPESAFE_BY_RCU)
+		return set_tag(addr, random_tag());
+	return (void *)addr;
+}
+
+void *khwasan_preset_slab_tag(struct kmem_cache *cache, unsigned int idx,
+				const void *addr)
+{
+	/*
+	 * See comment in khwasan_preset_slub_tag.
+	 * For SLAB allocator we can't preassign tags randomly since the
+	 * freelist is stored as an array of indexes instead of a linked
+	 * list. Assign tags based on objects indexes, so that objects that
+	 * are next to each other get different tags.
+	 */
+	if (cache->ctor || cache->flags & SLAB_TYPESAFE_BY_RCU)
+		return set_tag(addr, (u8)idx);
+	return (void *)addr;
+}
+
 void check_memory_region(unsigned long addr, size_t size, bool write,
 				unsigned long ret_ip)
 {