Message ID | 20210624034050.511391-3-dja@axtens.net (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | KASAN core changes for ppc64 radix KASAN | expand |
On Thu, Jun 24, 2021 at 6:41 AM Daniel Axtens <dja@axtens.net> wrote: > > Allow architectures to define a kasan_arch_is_ready() hook that bails > out of any function that's about to touch the shadow unless the arch > says that it is ready for the memory to be accessed. This is fairly > uninvasive and should have a negligible performance penalty. > > This will only work in outline mode, so an arch must specify > ARCH_DISABLE_KASAN_INLINE if it requires this. > > Cc: Balbir Singh <bsingharora@gmail.com> > Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> > Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu> > Reviewed-by: Marco Elver <elver@google.com> > Signed-off-by: Daniel Axtens <dja@axtens.net> > > -- > > Both previous RFCs for ppc64 - by 2 different people - have > needed this trick! See: > - https://lore.kernel.org/patchwork/patch/592820/ # ppc64 hash series > - https://patchwork.ozlabs.org/patch/795211/ # ppc radix series > > Build tested on arm64 with SW_TAGS and x86 with INLINE: the error fires > if I add a kasan_arch_is_ready define. > --- > mm/kasan/common.c | 3 +++ > mm/kasan/generic.c | 3 +++ > mm/kasan/kasan.h | 6 ++++++ > mm/kasan/shadow.c | 6 ++++++ > 4 files changed, 18 insertions(+) > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c > index 10177cc26d06..2baf121fb8c5 100644 > --- a/mm/kasan/common.c > +++ b/mm/kasan/common.c > @@ -331,6 +331,9 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, > u8 tag; > void *tagged_object; > > + if (!kasan_arch_is_ready()) > + return false; > + > tag = get_tag(object); > tagged_object = object; > object = kasan_reset_tag(object); > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c > index 53cbf28859b5..c3f5ba7a294a 100644 > --- a/mm/kasan/generic.c > +++ b/mm/kasan/generic.c > @@ -163,6 +163,9 @@ static __always_inline bool check_region_inline(unsigned long addr, > size_t size, bool write, > unsigned long ret_ip) > { > + if (!kasan_arch_is_ready()) > + return true; > + > if (unlikely(size == 0)) > return true; > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h > index 8f450bc28045..4dbc8def64f4 100644 > --- a/mm/kasan/kasan.h > +++ b/mm/kasan/kasan.h > @@ -449,6 +449,12 @@ static inline void kasan_poison_last_granule(const void *address, size_t size) { > > #endif /* CONFIG_KASAN_GENERIC */ > > +#ifndef kasan_arch_is_ready > +static inline bool kasan_arch_is_ready(void) { return true; } > +#elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE) > +#error kasan_arch_is_ready only works in KASAN generic outline mode! > +#endif > + > /* > * Exported functions for interfaces called from assembly or from generated > * code. Declarations here to avoid warning about missing declarations. > diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c > index 082ee5b6d9a1..8d95ee52d019 100644 > --- a/mm/kasan/shadow.c > +++ b/mm/kasan/shadow.c > @@ -73,6 +73,9 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) > { > void *shadow_start, *shadow_end; > > + if (!kasan_arch_is_ready()) > + return; > + > /* > * Perform shadow offset calculation based on untagged address, as > * some of the callers (e.g. kasan_poison_object_data) pass tagged > @@ -99,6 +102,9 @@ EXPORT_SYMBOL(kasan_poison); > #ifdef CONFIG_KASAN_GENERIC > void kasan_poison_last_granule(const void *addr, size_t size) > { > + if (!kasan_arch_is_ready()) > + return; > + > if (size & KASAN_GRANULE_MASK) { > u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); > *shadow = size & KASAN_GRANULE_MASK; > -- > 2.30.2 > Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 10177cc26d06..2baf121fb8c5 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -331,6 +331,9 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, u8 tag; void *tagged_object; + if (!kasan_arch_is_ready()) + return false; + tag = get_tag(object); tagged_object = object; object = kasan_reset_tag(object); diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 53cbf28859b5..c3f5ba7a294a 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -163,6 +163,9 @@ static __always_inline bool check_region_inline(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { + if (!kasan_arch_is_ready()) + return true; + if (unlikely(size == 0)) return true; diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 8f450bc28045..4dbc8def64f4 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -449,6 +449,12 @@ static inline void kasan_poison_last_granule(const void *address, size_t size) { #endif /* CONFIG_KASAN_GENERIC */ +#ifndef kasan_arch_is_ready +static inline bool kasan_arch_is_ready(void) { return true; } +#elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE) +#error kasan_arch_is_ready only works in KASAN generic outline mode! +#endif + /* * Exported functions for interfaces called from assembly or from generated * code. Declarations here to avoid warning about missing declarations. diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 082ee5b6d9a1..8d95ee52d019 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -73,6 +73,9 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) { void *shadow_start, *shadow_end; + if (!kasan_arch_is_ready()) + return; + /* * Perform shadow offset calculation based on untagged address, as * some of the callers (e.g. kasan_poison_object_data) pass tagged @@ -99,6 +102,9 @@ EXPORT_SYMBOL(kasan_poison); #ifdef CONFIG_KASAN_GENERIC void kasan_poison_last_granule(const void *addr, size_t size) { + if (!kasan_arch_is_ready()) + return; + if (size & KASAN_GRANULE_MASK) { u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); *shadow = size & KASAN_GRANULE_MASK;