Message ID | 20200518062730.4665-1-walter-zh.wu@mediatek.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | None | expand |
On Mon, May 18, 2020 at 8:27 AM Walter Wu <walter-zh.wu@mediatek.com> wrote: > > Move free track from slub alloc meta-data to slub free meta-data in > order to make struct kasan_free_meta size is 16 bytes. It is a good > size because it is the minimal redzone size and a good number of > alignment. > > For free track in generic KASAN, we do the modification in struct > kasan_alloc_meta and kasan_free_meta: > - remove free track from kasan_alloc_meta. > - add free track into kasan_free_meta. > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437 > > Signed-off-by: Walter Wu <walter-zh.wu@mediatek.com> > Suggested-by: Dmitry Vyukov <dvyukov@google.com> > Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> > Cc: Dmitry Vyukov <dvyukov@google.com> > Cc: Alexander Potapenko <glider@google.com> > --- > mm/kasan/common.c | 33 ++++++++++----------------------- > mm/kasan/generic.c | 18 ++++++++++++++++++ > mm/kasan/kasan.h | 7 +++++++ > mm/kasan/report.c | 20 -------------------- > mm/kasan/tags.c | 37 +++++++++++++++++++++++++++++++++++++ > 5 files changed, 72 insertions(+), 43 deletions(-) > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c > index 8bc618289bb1..6500bc2bb70c 100644 > --- a/mm/kasan/common.c > +++ b/mm/kasan/common.c > @@ -51,7 +51,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags) > return stack_depot_save(entries, nr_entries, flags); > } > > -static inline void set_track(struct kasan_track *track, gfp_t flags) > +void kasan_set_track(struct kasan_track *track, gfp_t flags) > { > track->pid = current->pid; > track->stack = kasan_save_stack(flags); > @@ -249,9 +249,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, > *size += sizeof(struct kasan_alloc_meta); > > /* Add free meta. */ > - if (IS_ENABLED(CONFIG_KASAN_GENERIC) && > - (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || > - cache->object_size < sizeof(struct kasan_free_meta))) { > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { Why do we need to increase object size unconditionally? We only store info in free track when the object is free, so I would assume we still can generally overlap free track and the object itself. We store free track at the same time we use the quarantine link, and the quarantine link was overlapped with the object just fine. With this change we indeed increase object size, which we do not want in general. > cache->kasan_info.free_meta_offset = *size; > *size += sizeof(struct kasan_free_meta); > } > @@ -299,24 +297,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache, > return (void *)object + cache->kasan_info.free_meta_offset; > } > > - > -static void kasan_set_free_info(struct kmem_cache *cache, > - void *object, u8 tag) > -{ > - struct kasan_alloc_meta *alloc_meta; > - u8 idx = 0; > - > - alloc_meta = get_alloc_info(cache, object); > - > -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > - idx = alloc_meta->free_track_idx; > - alloc_meta->free_pointer_tag[idx] = tag; > - alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; > -#endif > - > - set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); > -} > - > void kasan_poison_slab(struct page *page) > { > unsigned long i; > @@ -396,6 +376,13 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, > alloc_info = get_alloc_info(cache, object); > __memset(alloc_info, 0, sizeof(*alloc_info)); > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { > + struct kasan_free_meta *free_info; > + > + free_info = get_free_info(cache, object); > + __memset(free_info, 0, sizeof(*free_info)); If we overlap free track with object, this will not be needed as well, right? > + } > + > if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) > object = set_tag(object, > assign_tag(cache, object, true, false)); > @@ -492,7 +479,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, > KASAN_KMALLOC_REDZONE); > > if (cache->flags & SLAB_KASAN) > - set_track(&get_alloc_info(cache, object)->alloc_track, flags); > + kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags); > > return set_tag(object, tag); > } > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c > index 78d8e0a75a8a..988bc095b738 100644 > --- a/mm/kasan/generic.c > +++ b/mm/kasan/generic.c > @@ -345,3 +345,21 @@ void kasan_record_aux_stack(void *addr) > alloc_info->rcu_stack[1] = alloc_info->rcu_stack[0]; > alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT); > } > + > +void kasan_set_free_info(struct kmem_cache *cache, > + void *object, u8 tag) > +{ > + struct kasan_free_meta *free_meta; > + > + free_meta = get_free_info(cache, object); > + kasan_set_track(&free_meta->free_track, GFP_NOWAIT); > +} > + > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > + void *object, u8 tag) > +{ > + struct kasan_free_meta *free_meta; > + > + free_meta = get_free_info(cache, object); > + return &free_meta->free_track; > +} > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h > index 870c5dd07756..87ee3626b8b0 100644 > --- a/mm/kasan/kasan.h > +++ b/mm/kasan/kasan.h > @@ -127,6 +127,9 @@ struct kasan_free_meta { > * Otherwise it might be used for the allocator freelist. > */ > struct qlist_node quarantine_link; > +#ifdef CONFIG_KASAN_GENERIC > + struct kasan_track free_track; > +#endif > }; > > struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, > @@ -168,6 +171,10 @@ void kasan_report_invalid_free(void *object, unsigned long ip); > struct page *kasan_addr_to_page(const void *addr); > > depot_stack_handle_t kasan_save_stack(gfp_t flags); > +void kasan_set_track(struct kasan_track *track, gfp_t flags); > +void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag); > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > + void *object, u8 tag); > > #if defined(CONFIG_KASAN_GENERIC) && \ > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) > diff --git a/mm/kasan/report.c b/mm/kasan/report.c > index 5ee66cf7e27c..7e9f9f6d5e85 100644 > --- a/mm/kasan/report.c > +++ b/mm/kasan/report.c > @@ -159,26 +159,6 @@ static void describe_object_addr(struct kmem_cache *cache, void *object, > (void *)(object_addr + cache->object_size)); > } > > -static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > - void *object, u8 tag) > -{ > - struct kasan_alloc_meta *alloc_meta; > - int i = 0; > - > - alloc_meta = get_alloc_info(cache, object); > - > -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > - for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { > - if (alloc_meta->free_pointer_tag[i] == tag) > - break; > - } > - if (i == KASAN_NR_FREE_STACKS) > - i = alloc_meta->free_track_idx; > -#endif > - > - return &alloc_meta->free_track[i]; > -} > - > #ifdef CONFIG_KASAN_GENERIC > static void print_stack(depot_stack_handle_t stack) > { > diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c > index 25b7734e7013..201dee5d6ae0 100644 > --- a/mm/kasan/tags.c > +++ b/mm/kasan/tags.c > @@ -162,3 +162,40 @@ void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) > kasan_poison_shadow((void *)addr, size, tag); > } > EXPORT_SYMBOL(__hwasan_tag_memory); > + > +void kasan_set_free_info(struct kmem_cache *cache, > + void *object, u8 tag) > +{ > + struct kasan_alloc_meta *alloc_meta; > + u8 idx = 0; > + > + alloc_meta = get_alloc_info(cache, object); > + > +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > + idx = alloc_meta->free_track_idx; > + alloc_meta->free_pointer_tag[idx] = tag; > + alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; > +#endif > + > + kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); > +} > + > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > + void *object, u8 tag) > +{ > + struct kasan_alloc_meta *alloc_meta; > + int i = 0; > + > + alloc_meta = get_alloc_info(cache, object); > + > +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > + for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { > + if (alloc_meta->free_pointer_tag[i] == tag) > + break; > + } > + if (i == KASAN_NR_FREE_STACKS) > + i = alloc_meta->free_track_idx; > +#endif > + > + return &alloc_meta->free_track[i]; > +} > -- > 2.18.0
On Mon, 2020-05-18 at 12:18 +0200, Dmitry Vyukov wrote: > On Mon, May 18, 2020 at 8:27 AM Walter Wu <walter-zh.wu@mediatek.com> wrote: > > > > Move free track from slub alloc meta-data to slub free meta-data in > > order to make struct kasan_free_meta size is 16 bytes. It is a good > > size because it is the minimal redzone size and a good number of > > alignment. > > > > For free track in generic KASAN, we do the modification in struct > > kasan_alloc_meta and kasan_free_meta: > > - remove free track from kasan_alloc_meta. > > - add free track into kasan_free_meta. > > > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437 > > > > Signed-off-by: Walter Wu <walter-zh.wu@mediatek.com> > > Suggested-by: Dmitry Vyukov <dvyukov@google.com> > > Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> > > Cc: Dmitry Vyukov <dvyukov@google.com> > > Cc: Alexander Potapenko <glider@google.com> > > --- > > mm/kasan/common.c | 33 ++++++++++----------------------- > > mm/kasan/generic.c | 18 ++++++++++++++++++ > > mm/kasan/kasan.h | 7 +++++++ > > mm/kasan/report.c | 20 -------------------- > > mm/kasan/tags.c | 37 +++++++++++++++++++++++++++++++++++++ > > 5 files changed, 72 insertions(+), 43 deletions(-) > > > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c > > index 8bc618289bb1..6500bc2bb70c 100644 > > --- a/mm/kasan/common.c > > +++ b/mm/kasan/common.c > > @@ -51,7 +51,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags) > > return stack_depot_save(entries, nr_entries, flags); > > } > > > > -static inline void set_track(struct kasan_track *track, gfp_t flags) > > +void kasan_set_track(struct kasan_track *track, gfp_t flags) > > { > > track->pid = current->pid; > > track->stack = kasan_save_stack(flags); > > @@ -249,9 +249,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, > > *size += sizeof(struct kasan_alloc_meta); > > > > /* Add free meta. */ > > - if (IS_ENABLED(CONFIG_KASAN_GENERIC) && > > - (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || > > - cache->object_size < sizeof(struct kasan_free_meta))) { > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { > > Why do we need to increase object size unconditionally? > We only store info in free track when the object is free, so I would > assume we still can generally overlap free track and the object > itself. We store free track at the same time we use the quarantine > link, and the quarantine link was overlapped with the object just > fine. > With this change we indeed increase object size, which we do not want > in general. > If it doesn't add free meta, but we always store free track into the object, Is it safe? > > > cache->kasan_info.free_meta_offset = *size; > > *size += sizeof(struct kasan_free_meta); > > } > > @@ -299,24 +297,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache, > > return (void *)object + cache->kasan_info.free_meta_offset; > > } > > > > - > > -static void kasan_set_free_info(struct kmem_cache *cache, > > - void *object, u8 tag) > > -{ > > - struct kasan_alloc_meta *alloc_meta; > > - u8 idx = 0; > > - > > - alloc_meta = get_alloc_info(cache, object); > > - > > -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > - idx = alloc_meta->free_track_idx; > > - alloc_meta->free_pointer_tag[idx] = tag; > > - alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; > > -#endif > > - > > - set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); > > -} > > - > > void kasan_poison_slab(struct page *page) > > { > > unsigned long i; > > @@ -396,6 +376,13 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, > > alloc_info = get_alloc_info(cache, object); > > __memset(alloc_info, 0, sizeof(*alloc_info)); > > > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { > > + struct kasan_free_meta *free_info; > > + > > + free_info = get_free_info(cache, object); > > + __memset(free_info, 0, sizeof(*free_info)); > > If we overlap free track with object, this will not be needed as well, right? > Should we not consider those objects which have adding free meta? If they exist, then we should init their meta data when object re-allocate. struct kasan_free_meta { struct qlist_node quarantine_link; struct kasan_track free_track; }; > > + } > > + > > if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) > > object = set_tag(object, > > assign_tag(cache, object, true, false)); > > @@ -492,7 +479,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, > > KASAN_KMALLOC_REDZONE); > > > > if (cache->flags & SLAB_KASAN) > > - set_track(&get_alloc_info(cache, object)->alloc_track, flags); > > + kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags); > > > > return set_tag(object, tag); > > } > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c > > index 78d8e0a75a8a..988bc095b738 100644 > > --- a/mm/kasan/generic.c > > +++ b/mm/kasan/generic.c > > @@ -345,3 +345,21 @@ void kasan_record_aux_stack(void *addr) > > alloc_info->rcu_stack[1] = alloc_info->rcu_stack[0]; > > alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT); > > } > > + > > +void kasan_set_free_info(struct kmem_cache *cache, > > + void *object, u8 tag) > > +{ > > + struct kasan_free_meta *free_meta; > > + > > + free_meta = get_free_info(cache, object); > > + kasan_set_track(&free_meta->free_track, GFP_NOWAIT); > > +} > > + > > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > + void *object, u8 tag) > > +{ > > + struct kasan_free_meta *free_meta; > > + > > + free_meta = get_free_info(cache, object); > > + return &free_meta->free_track; > > +} > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h > > index 870c5dd07756..87ee3626b8b0 100644 > > --- a/mm/kasan/kasan.h > > +++ b/mm/kasan/kasan.h > > @@ -127,6 +127,9 @@ struct kasan_free_meta { > > * Otherwise it might be used for the allocator freelist. > > */ > > struct qlist_node quarantine_link; > > +#ifdef CONFIG_KASAN_GENERIC > > + struct kasan_track free_track; > > +#endif > > }; > > > > struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, > > @@ -168,6 +171,10 @@ void kasan_report_invalid_free(void *object, unsigned long ip); > > struct page *kasan_addr_to_page(const void *addr); > > > > depot_stack_handle_t kasan_save_stack(gfp_t flags); > > +void kasan_set_track(struct kasan_track *track, gfp_t flags); > > +void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag); > > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > + void *object, u8 tag); > > > > #if defined(CONFIG_KASAN_GENERIC) && \ > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c > > index 5ee66cf7e27c..7e9f9f6d5e85 100644 > > --- a/mm/kasan/report.c > > +++ b/mm/kasan/report.c > > @@ -159,26 +159,6 @@ static void describe_object_addr(struct kmem_cache *cache, void *object, > > (void *)(object_addr + cache->object_size)); > > } > > > > -static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > - void *object, u8 tag) > > -{ > > - struct kasan_alloc_meta *alloc_meta; > > - int i = 0; > > - > > - alloc_meta = get_alloc_info(cache, object); > > - > > -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > - for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { > > - if (alloc_meta->free_pointer_tag[i] == tag) > > - break; > > - } > > - if (i == KASAN_NR_FREE_STACKS) > > - i = alloc_meta->free_track_idx; > > -#endif > > - > > - return &alloc_meta->free_track[i]; > > -} > > - > > #ifdef CONFIG_KASAN_GENERIC > > static void print_stack(depot_stack_handle_t stack) > > { > > diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c > > index 25b7734e7013..201dee5d6ae0 100644 > > --- a/mm/kasan/tags.c > > +++ b/mm/kasan/tags.c > > @@ -162,3 +162,40 @@ void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) > > kasan_poison_shadow((void *)addr, size, tag); > > } > > EXPORT_SYMBOL(__hwasan_tag_memory); > > + > > +void kasan_set_free_info(struct kmem_cache *cache, > > + void *object, u8 tag) > > +{ > > + struct kasan_alloc_meta *alloc_meta; > > + u8 idx = 0; > > + > > + alloc_meta = get_alloc_info(cache, object); > > + > > +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > + idx = alloc_meta->free_track_idx; > > + alloc_meta->free_pointer_tag[idx] = tag; > > + alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; > > +#endif > > + > > + kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); > > +} > > + > > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > + void *object, u8 tag) > > +{ > > + struct kasan_alloc_meta *alloc_meta; > > + int i = 0; > > + > > + alloc_meta = get_alloc_info(cache, object); > > + > > +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > + for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { > > + if (alloc_meta->free_pointer_tag[i] == tag) > > + break; > > + } > > + if (i == KASAN_NR_FREE_STACKS) > > + i = alloc_meta->free_track_idx; > > +#endif > > + > > + return &alloc_meta->free_track[i]; > > +} > > -- > > 2.18.0
On Mon, 2020-05-18 at 19:27 +0800, Walter Wu wrote: > On Mon, 2020-05-18 at 12:18 +0200, Dmitry Vyukov wrote: > > On Mon, May 18, 2020 at 8:27 AM Walter Wu <walter-zh.wu@mediatek.com> wrote: > > > > > > Move free track from slub alloc meta-data to slub free meta-data in > > > order to make struct kasan_free_meta size is 16 bytes. It is a good > > > size because it is the minimal redzone size and a good number of > > > alignment. > > > > > > For free track in generic KASAN, we do the modification in struct > > > kasan_alloc_meta and kasan_free_meta: > > > - remove free track from kasan_alloc_meta. > > > - add free track into kasan_free_meta. > > > > > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437 > > > > > > Signed-off-by: Walter Wu <walter-zh.wu@mediatek.com> > > > Suggested-by: Dmitry Vyukov <dvyukov@google.com> > > > Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> > > > Cc: Dmitry Vyukov <dvyukov@google.com> > > > Cc: Alexander Potapenko <glider@google.com> > > > --- > > > mm/kasan/common.c | 33 ++++++++++----------------------- > > > mm/kasan/generic.c | 18 ++++++++++++++++++ > > > mm/kasan/kasan.h | 7 +++++++ > > > mm/kasan/report.c | 20 -------------------- > > > mm/kasan/tags.c | 37 +++++++++++++++++++++++++++++++++++++ > > > 5 files changed, 72 insertions(+), 43 deletions(-) > > > > > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c > > > index 8bc618289bb1..6500bc2bb70c 100644 > > > --- a/mm/kasan/common.c > > > +++ b/mm/kasan/common.c > > > @@ -51,7 +51,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags) > > > return stack_depot_save(entries, nr_entries, flags); > > > } > > > > > > -static inline void set_track(struct kasan_track *track, gfp_t flags) > > > +void kasan_set_track(struct kasan_track *track, gfp_t flags) > > > { > > > track->pid = current->pid; > > > track->stack = kasan_save_stack(flags); > > > @@ -249,9 +249,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, > > > *size += sizeof(struct kasan_alloc_meta); > > > > > > /* Add free meta. */ > > > - if (IS_ENABLED(CONFIG_KASAN_GENERIC) && > > > - (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || > > > - cache->object_size < sizeof(struct kasan_free_meta))) { > > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { > > > > Why do we need to increase object size unconditionally? > > We only store info in free track when the object is free, so I would > > assume we still can generally overlap free track and the object > > itself. We store free track at the same time we use the quarantine > > link, and the quarantine link was overlapped with the object just > > fine. > > With this change we indeed increase object size, which we do not want > > in general. > > > > If it doesn't add free meta, but we always store free track into the > object, Is it safe? > > > > > > cache->kasan_info.free_meta_offset = *size; > > > *size += sizeof(struct kasan_free_meta); > > > } > > > @@ -299,24 +297,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache, > > > return (void *)object + cache->kasan_info.free_meta_offset; > > > } > > > > > > - > > > -static void kasan_set_free_info(struct kmem_cache *cache, > > > - void *object, u8 tag) > > > -{ > > > - struct kasan_alloc_meta *alloc_meta; > > > - u8 idx = 0; > > > - > > > - alloc_meta = get_alloc_info(cache, object); > > > - > > > -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > > - idx = alloc_meta->free_track_idx; > > > - alloc_meta->free_pointer_tag[idx] = tag; > > > - alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; > > > -#endif > > > - > > > - set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); > > > -} > > > - > > > void kasan_poison_slab(struct page *page) > > > { > > > unsigned long i; > > > @@ -396,6 +376,13 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, > > > alloc_info = get_alloc_info(cache, object); > > > __memset(alloc_info, 0, sizeof(*alloc_info)); > > > > > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { > > > + struct kasan_free_meta *free_info; > > > + > > > + free_info = get_free_info(cache, object); > > > + __memset(free_info, 0, sizeof(*free_info)); > > > > If we overlap free track with object, this will not be needed as well, right? > > I thought about it, I think you are right, because the free track must be stored when object is free, so even don't clean this meta data. It doesn't matter. Thanks for your review. If there are no other problems, I will send next patch. Thanks. > > Should we not consider those objects which have adding free meta? If > they exist, then we should init their meta data when object re-allocate. > > struct kasan_free_meta { > struct qlist_node quarantine_link; > struct kasan_track free_track; > }; > > > > > + } > > > + > > > if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) > > > object = set_tag(object, > > > assign_tag(cache, object, true, false)); > > > @@ -492,7 +479,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, > > > KASAN_KMALLOC_REDZONE); > > > > > > if (cache->flags & SLAB_KASAN) > > > - set_track(&get_alloc_info(cache, object)->alloc_track, flags); > > > + kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags); > > > > > > return set_tag(object, tag); > > > } > > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c > > > index 78d8e0a75a8a..988bc095b738 100644 > > > --- a/mm/kasan/generic.c > > > +++ b/mm/kasan/generic.c > > > @@ -345,3 +345,21 @@ void kasan_record_aux_stack(void *addr) > > > alloc_info->rcu_stack[1] = alloc_info->rcu_stack[0]; > > > alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT); > > > } > > > + > > > +void kasan_set_free_info(struct kmem_cache *cache, > > > + void *object, u8 tag) > > > +{ > > > + struct kasan_free_meta *free_meta; > > > + > > > + free_meta = get_free_info(cache, object); > > > + kasan_set_track(&free_meta->free_track, GFP_NOWAIT); > > > +} > > > + > > > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > > + void *object, u8 tag) > > > +{ > > > + struct kasan_free_meta *free_meta; > > > + > > > + free_meta = get_free_info(cache, object); > > > + return &free_meta->free_track; > > > +} > > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h > > > index 870c5dd07756..87ee3626b8b0 100644 > > > --- a/mm/kasan/kasan.h > > > +++ b/mm/kasan/kasan.h > > > @@ -127,6 +127,9 @@ struct kasan_free_meta { > > > * Otherwise it might be used for the allocator freelist. > > > */ > > > struct qlist_node quarantine_link; > > > +#ifdef CONFIG_KASAN_GENERIC > > > + struct kasan_track free_track; > > > +#endif > > > }; > > > > > > struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, > > > @@ -168,6 +171,10 @@ void kasan_report_invalid_free(void *object, unsigned long ip); > > > struct page *kasan_addr_to_page(const void *addr); > > > > > > depot_stack_handle_t kasan_save_stack(gfp_t flags); > > > +void kasan_set_track(struct kasan_track *track, gfp_t flags); > > > +void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag); > > > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > > + void *object, u8 tag); > > > > > > #if defined(CONFIG_KASAN_GENERIC) && \ > > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) > > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c > > > index 5ee66cf7e27c..7e9f9f6d5e85 100644 > > > --- a/mm/kasan/report.c > > > +++ b/mm/kasan/report.c > > > @@ -159,26 +159,6 @@ static void describe_object_addr(struct kmem_cache *cache, void *object, > > > (void *)(object_addr + cache->object_size)); > > > } > > > > > > -static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > > - void *object, u8 tag) > > > -{ > > > - struct kasan_alloc_meta *alloc_meta; > > > - int i = 0; > > > - > > > - alloc_meta = get_alloc_info(cache, object); > > > - > > > -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > > - for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { > > > - if (alloc_meta->free_pointer_tag[i] == tag) > > > - break; > > > - } > > > - if (i == KASAN_NR_FREE_STACKS) > > > - i = alloc_meta->free_track_idx; > > > -#endif > > > - > > > - return &alloc_meta->free_track[i]; > > > -} > > > - > > > #ifdef CONFIG_KASAN_GENERIC > > > static void print_stack(depot_stack_handle_t stack) > > > { > > > diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c > > > index 25b7734e7013..201dee5d6ae0 100644 > > > --- a/mm/kasan/tags.c > > > +++ b/mm/kasan/tags.c > > > @@ -162,3 +162,40 @@ void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) > > > kasan_poison_shadow((void *)addr, size, tag); > > > } > > > EXPORT_SYMBOL(__hwasan_tag_memory); > > > + > > > +void kasan_set_free_info(struct kmem_cache *cache, > > > + void *object, u8 tag) > > > +{ > > > + struct kasan_alloc_meta *alloc_meta; > > > + u8 idx = 0; > > > + > > > + alloc_meta = get_alloc_info(cache, object); > > > + > > > +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > > + idx = alloc_meta->free_track_idx; > > > + alloc_meta->free_pointer_tag[idx] = tag; > > > + alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; > > > +#endif > > > + > > > + kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); > > > +} > > > + > > > +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, > > > + void *object, u8 tag) > > > +{ > > > + struct kasan_alloc_meta *alloc_meta; > > > + int i = 0; > > > + > > > + alloc_meta = get_alloc_info(cache, object); > > > + > > > +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY > > > + for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { > > > + if (alloc_meta->free_pointer_tag[i] == tag) > > > + break; > > > + } > > > + if (i == KASAN_NR_FREE_STACKS) > > > + i = alloc_meta->free_track_idx; > > > +#endif > > > + > > > + return &alloc_meta->free_track[i]; > > > +} > > > -- > > > 2.18.0 >
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 8bc618289bb1..6500bc2bb70c 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -51,7 +51,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags) return stack_depot_save(entries, nr_entries, flags); } -static inline void set_track(struct kasan_track *track, gfp_t flags) +void kasan_set_track(struct kasan_track *track, gfp_t flags) { track->pid = current->pid; track->stack = kasan_save_stack(flags); @@ -249,9 +249,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, *size += sizeof(struct kasan_alloc_meta); /* Add free meta. */ - if (IS_ENABLED(CONFIG_KASAN_GENERIC) && - (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || - cache->object_size < sizeof(struct kasan_free_meta))) { + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); } @@ -299,24 +297,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache, return (void *)object + cache->kasan_info.free_meta_offset; } - -static void kasan_set_free_info(struct kmem_cache *cache, - void *object, u8 tag) -{ - struct kasan_alloc_meta *alloc_meta; - u8 idx = 0; - - alloc_meta = get_alloc_info(cache, object); - -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY - idx = alloc_meta->free_track_idx; - alloc_meta->free_pointer_tag[idx] = tag; - alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; -#endif - - set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); -} - void kasan_poison_slab(struct page *page) { unsigned long i; @@ -396,6 +376,13 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, alloc_info = get_alloc_info(cache, object); __memset(alloc_info, 0, sizeof(*alloc_info)); + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { + struct kasan_free_meta *free_info; + + free_info = get_free_info(cache, object); + __memset(free_info, 0, sizeof(*free_info)); + } + if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) object = set_tag(object, assign_tag(cache, object, true, false)); @@ -492,7 +479,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_KMALLOC_REDZONE); if (cache->flags & SLAB_KASAN) - set_track(&get_alloc_info(cache, object)->alloc_track, flags); + kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags); return set_tag(object, tag); } diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 78d8e0a75a8a..988bc095b738 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -345,3 +345,21 @@ void kasan_record_aux_stack(void *addr) alloc_info->rcu_stack[1] = alloc_info->rcu_stack[0]; alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT); } + +void kasan_set_free_info(struct kmem_cache *cache, + void *object, u8 tag) +{ + struct kasan_free_meta *free_meta; + + free_meta = get_free_info(cache, object); + kasan_set_track(&free_meta->free_track, GFP_NOWAIT); +} + +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, + void *object, u8 tag) +{ + struct kasan_free_meta *free_meta; + + free_meta = get_free_info(cache, object); + return &free_meta->free_track; +} diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 870c5dd07756..87ee3626b8b0 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -127,6 +127,9 @@ struct kasan_free_meta { * Otherwise it might be used for the allocator freelist. */ struct qlist_node quarantine_link; +#ifdef CONFIG_KASAN_GENERIC + struct kasan_track free_track; +#endif }; struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, @@ -168,6 +171,10 @@ void kasan_report_invalid_free(void *object, unsigned long ip); struct page *kasan_addr_to_page(const void *addr); depot_stack_handle_t kasan_save_stack(gfp_t flags); +void kasan_set_track(struct kasan_track *track, gfp_t flags); +void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag); +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, + void *object, u8 tag); #if defined(CONFIG_KASAN_GENERIC) && \ (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 5ee66cf7e27c..7e9f9f6d5e85 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -159,26 +159,6 @@ static void describe_object_addr(struct kmem_cache *cache, void *object, (void *)(object_addr + cache->object_size)); } -static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, - void *object, u8 tag) -{ - struct kasan_alloc_meta *alloc_meta; - int i = 0; - - alloc_meta = get_alloc_info(cache, object); - -#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY - for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { - if (alloc_meta->free_pointer_tag[i] == tag) - break; - } - if (i == KASAN_NR_FREE_STACKS) - i = alloc_meta->free_track_idx; -#endif - - return &alloc_meta->free_track[i]; -} - #ifdef CONFIG_KASAN_GENERIC static void print_stack(depot_stack_handle_t stack) { diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 25b7734e7013..201dee5d6ae0 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -162,3 +162,40 @@ void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) kasan_poison_shadow((void *)addr, size, tag); } EXPORT_SYMBOL(__hwasan_tag_memory); + +void kasan_set_free_info(struct kmem_cache *cache, + void *object, u8 tag) +{ + struct kasan_alloc_meta *alloc_meta; + u8 idx = 0; + + alloc_meta = get_alloc_info(cache, object); + +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY + idx = alloc_meta->free_track_idx; + alloc_meta->free_pointer_tag[idx] = tag; + alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; +#endif + + kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); +} + +struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, + void *object, u8 tag) +{ + struct kasan_alloc_meta *alloc_meta; + int i = 0; + + alloc_meta = get_alloc_info(cache, object); + +#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY + for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { + if (alloc_meta->free_pointer_tag[i] == tag) + break; + } + if (i == KASAN_NR_FREE_STACKS) + i = alloc_meta->free_track_idx; +#endif + + return &alloc_meta->free_track[i]; +}
Move free track from slub alloc meta-data to slub free meta-data in order to make struct kasan_free_meta size is 16 bytes. It is a good size because it is the minimal redzone size and a good number of alignment. For free track in generic KASAN, we do the modification in struct kasan_alloc_meta and kasan_free_meta: - remove free track from kasan_alloc_meta. - add free track into kasan_free_meta. [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437 Signed-off-by: Walter Wu <walter-zh.wu@mediatek.com> Suggested-by: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Alexander Potapenko <glider@google.com> --- mm/kasan/common.c | 33 ++++++++++----------------------- mm/kasan/generic.c | 18 ++++++++++++++++++ mm/kasan/kasan.h | 7 +++++++ mm/kasan/report.c | 20 -------------------- mm/kasan/tags.c | 37 +++++++++++++++++++++++++++++++++++++ 5 files changed, 72 insertions(+), 43 deletions(-)