Message ID | 1404905415-9046-15-git-send-email-a.ryabinin@samsung.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Jul 09, 2014 at 03:30:08PM +0400, Andrey Ryabinin wrote: > Some code in slub could validly touch memory marked by kasan as unaccessible. > Even though slub.c doesn't instrumented, functions called in it are instrumented, > so to avoid false positive reports such places are protected by > kasan_disable_local()/kasan_enable_local() calls. > > Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> > --- > mm/slub.c | 21 +++++++++++++++++++-- > 1 file changed, 19 insertions(+), 2 deletions(-) > > diff --git a/mm/slub.c b/mm/slub.c > index 6ddedf9..c8dbea7 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -560,8 +560,10 @@ static void print_tracking(struct kmem_cache *s, void *object) > if (!(s->flags & SLAB_STORE_USER)) > return; > > + kasan_disable_local(); > print_track("Allocated", get_track(s, object, TRACK_ALLOC)); > print_track("Freed", get_track(s, object, TRACK_FREE)); > + kasan_enable_local(); I don't think that this is needed since print_track() doesn't call external function with object pointer. print_track() call pr_err(), but, before calling, it retrieve t->addrs[i] so memory access only occurs in slub.c. > } > > static void print_page_info(struct page *page) > @@ -604,6 +606,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) > unsigned int off; /* Offset of last byte */ > u8 *addr = page_address(page); > > + kasan_disable_local(); > + > print_tracking(s, p); > > print_page_info(page); > @@ -632,6 +636,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) > /* Beginning of the filler is the free pointer */ > print_section("Padding ", p + off, s->size - off); > > + kasan_enable_local(); > + > dump_stack(); > } And, I recommend that you put this hook on right place. At a glance, the problematic function is print_section() which have external function call, print_hex_dump(), with object pointer. If you disable kasan in print_section, all the below thing won't be needed, I guess. Thanks. > > @@ -1012,6 +1018,8 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, > struct page *page, > void *object, unsigned long addr) > { > + > + kasan_disable_local(); > if (!check_slab(s, page)) > goto bad; > > @@ -1028,6 +1036,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, > set_track(s, object, TRACK_ALLOC, addr); > trace(s, page, object, 1); > init_object(s, object, SLUB_RED_ACTIVE); > + kasan_enable_local(); > return 1; > > bad: > @@ -1041,6 +1050,7 @@ bad: > page->inuse = page->objects; > page->freelist = NULL; > } > + kasan_enable_local(); > return 0; > } > > @@ -1052,6 +1062,7 @@ static noinline struct kmem_cache_node *free_debug_processing( > > spin_lock_irqsave(&n->list_lock, *flags); > slab_lock(page); > + kasan_disable_local(); > > if (!check_slab(s, page)) > goto fail; > @@ -1088,6 +1099,7 @@ static noinline struct kmem_cache_node *free_debug_processing( > trace(s, page, object, 0); > init_object(s, object, SLUB_RED_INACTIVE); > out: > + kasan_enable_local(); > slab_unlock(page); > /* > * Keep node_lock to preserve integrity > @@ -1096,6 +1108,7 @@ out: > return n; > > fail: > + kasan_enable_local(); > slab_unlock(page); > spin_unlock_irqrestore(&n->list_lock, *flags); > slab_fix(s, "Object at 0x%p not freed", object); > @@ -1371,8 +1384,11 @@ static void setup_object(struct kmem_cache *s, struct page *page, > void *object) > { > setup_object_debug(s, page, object); > - if (unlikely(s->ctor)) > + if (unlikely(s->ctor)) { > + kasan_disable_local(); > s->ctor(object); > + kasan_enable_local(); > + } > } > static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) > @@ -1425,11 +1441,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page) > > if (kmem_cache_debug(s)) { > void *p; > - > + kasan_disable_local(); > slab_pad_check(s, page); > for_each_object(p, s, page_address(page), > page->objects) > check_object(s, page, p, SLUB_RED_INACTIVE); > + kasan_enable_local(); > } > > kmemcheck_free_shadow(page, compound_order(page)); > -- > 1.8.5.5 > > -- > To unsubscribe, send a message with 'unsubscribe linux-mm' in > the body to majordomo@kvack.org. For more info on Linux MM, > see: http://www.linux-mm.org/ . > Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
On 07/15/14 10:04, Joonsoo Kim wrote: > On Wed, Jul 09, 2014 at 03:30:08PM +0400, Andrey Ryabinin wrote: >> Some code in slub could validly touch memory marked by kasan as unaccessible. >> Even though slub.c doesn't instrumented, functions called in it are instrumented, >> so to avoid false positive reports such places are protected by >> kasan_disable_local()/kasan_enable_local() calls. >> >> Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> >> --- >> mm/slub.c | 21 +++++++++++++++++++-- >> 1 file changed, 19 insertions(+), 2 deletions(-) >> >> diff --git a/mm/slub.c b/mm/slub.c >> index 6ddedf9..c8dbea7 100644 >> --- a/mm/slub.c >> +++ b/mm/slub.c >> @@ -560,8 +560,10 @@ static void print_tracking(struct kmem_cache *s, void *object) >> if (!(s->flags & SLAB_STORE_USER)) >> return; >> >> + kasan_disable_local(); >> print_track("Allocated", get_track(s, object, TRACK_ALLOC)); >> print_track("Freed", get_track(s, object, TRACK_FREE)); >> + kasan_enable_local(); > > I don't think that this is needed since print_track() doesn't call > external function with object pointer. print_track() call pr_err(), but, > before calling, it retrieve t->addrs[i] so memory access only occurs > in slub.c. > Agree. >> } >> >> static void print_page_info(struct page *page) >> @@ -604,6 +606,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) >> unsigned int off; /* Offset of last byte */ >> u8 *addr = page_address(page); >> >> + kasan_disable_local(); >> + >> print_tracking(s, p); >> >> print_page_info(page); >> @@ -632,6 +636,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) >> /* Beginning of the filler is the free pointer */ >> print_section("Padding ", p + off, s->size - off); >> >> + kasan_enable_local(); >> + >> dump_stack(); >> } > > And, I recommend that you put this hook on right place. > At a glance, the problematic function is print_section() which have > external function call, print_hex_dump(), with object pointer. > If you disable kasan in print_section, all the below thing won't be > needed, I guess. > Nope, at least memchr_inv() call in slab_pad_check will be a problem. I think putting disable/enable only where we strictly need them might be a problem for future maintenance of slub. If someone is going to add a new function call somewhere, he must ensure that it this call won't be a problem for kasan. > Thanks. > >> >> @@ -1012,6 +1018,8 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, >> struct page *page, >> void *object, unsigned long addr) >> { >> + >> + kasan_disable_local(); >> if (!check_slab(s, page)) >> goto bad; >> >> @@ -1028,6 +1036,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, >> set_track(s, object, TRACK_ALLOC, addr); >> trace(s, page, object, 1); >> init_object(s, object, SLUB_RED_ACTIVE); >> + kasan_enable_local(); >> return 1; >> >> bad: >> @@ -1041,6 +1050,7 @@ bad: >> page->inuse = page->objects; >> page->freelist = NULL; >> } >> + kasan_enable_local(); >> return 0; >> } >> >> @@ -1052,6 +1062,7 @@ static noinline struct kmem_cache_node *free_debug_processing( >> >> spin_lock_irqsave(&n->list_lock, *flags); >> slab_lock(page); >> + kasan_disable_local(); >> >> if (!check_slab(s, page)) >> goto fail; >> @@ -1088,6 +1099,7 @@ static noinline struct kmem_cache_node *free_debug_processing( >> trace(s, page, object, 0); >> init_object(s, object, SLUB_RED_INACTIVE); >> out: >> + kasan_enable_local(); >> slab_unlock(page); >> /* >> * Keep node_lock to preserve integrity >> @@ -1096,6 +1108,7 @@ out: >> return n; >> >> fail: >> + kasan_enable_local(); >> slab_unlock(page); >> spin_unlock_irqrestore(&n->list_lock, *flags); >> slab_fix(s, "Object at 0x%p not freed", object); >> @@ -1371,8 +1384,11 @@ static void setup_object(struct kmem_cache *s, struct page *page, >> void *object) >> { >> setup_object_debug(s, page, object); >> - if (unlikely(s->ctor)) >> + if (unlikely(s->ctor)) { >> + kasan_disable_local(); >> s->ctor(object); >> + kasan_enable_local(); >> + } >> } >> static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) >> @@ -1425,11 +1441,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page) >> >> if (kmem_cache_debug(s)) { >> void *p; >> - >> + kasan_disable_local(); >> slab_pad_check(s, page); >> for_each_object(p, s, page_address(page), >> page->objects) >> check_object(s, page, p, SLUB_RED_INACTIVE); >> + kasan_enable_local(); >> } >> >> kmemcheck_free_shadow(page, compound_order(page)); >> -- >> 1.8.5.5 >> >> -- >> To unsubscribe, send a message with 'unsubscribe linux-mm' in >> the body to majordomo@kvack.org. For more info on Linux MM, >> see: http://www.linux-mm.org/ . >> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> >
On Tue, Jul 15, 2014 at 11:37:56AM +0400, Andrey Ryabinin wrote: > On 07/15/14 10:04, Joonsoo Kim wrote: > > On Wed, Jul 09, 2014 at 03:30:08PM +0400, Andrey Ryabinin wrote: > >> Some code in slub could validly touch memory marked by kasan as unaccessible. > >> Even though slub.c doesn't instrumented, functions called in it are instrumented, > >> so to avoid false positive reports such places are protected by > >> kasan_disable_local()/kasan_enable_local() calls. > >> > >> Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> > >> --- > >> mm/slub.c | 21 +++++++++++++++++++-- > >> 1 file changed, 19 insertions(+), 2 deletions(-) > >> > >> diff --git a/mm/slub.c b/mm/slub.c > >> index 6ddedf9..c8dbea7 100644 > >> --- a/mm/slub.c > >> +++ b/mm/slub.c > >> @@ -560,8 +560,10 @@ static void print_tracking(struct kmem_cache *s, void *object) > >> if (!(s->flags & SLAB_STORE_USER)) > >> return; > >> > >> + kasan_disable_local(); > >> print_track("Allocated", get_track(s, object, TRACK_ALLOC)); > >> print_track("Freed", get_track(s, object, TRACK_FREE)); > >> + kasan_enable_local(); > > > > I don't think that this is needed since print_track() doesn't call > > external function with object pointer. print_track() call pr_err(), but, > > before calling, it retrieve t->addrs[i] so memory access only occurs > > in slub.c. > > > Agree. > > >> } > >> > >> static void print_page_info(struct page *page) > >> @@ -604,6 +606,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) > >> unsigned int off; /* Offset of last byte */ > >> u8 *addr = page_address(page); > >> > >> + kasan_disable_local(); > >> + > >> print_tracking(s, p); > >> > >> print_page_info(page); > >> @@ -632,6 +636,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) > >> /* Beginning of the filler is the free pointer */ > >> print_section("Padding ", p + off, s->size - off); > >> > >> + kasan_enable_local(); > >> + > >> dump_stack(); > >> } > > > > And, I recommend that you put this hook on right place. > > At a glance, the problematic function is print_section() which have > > external function call, print_hex_dump(), with object pointer. > > If you disable kasan in print_section, all the below thing won't be > > needed, I guess. > > > > Nope, at least memchr_inv() call in slab_pad_check will be a problem. > > I think putting disable/enable only where we strictly need them might be a problem for future maintenance of slub. > If someone is going to add a new function call somewhere, he must ensure that it this call won't be a problem > for kasan. I don't agree with this. If someone is going to add a slab_pad_check() in other places in slub.c, we should disable/enable kasan there, too. This looks same maintenance problem to me. Putting disable/enable only where we strictly need at least ensures that we don't need to care when using slub internal functions. And, if memchr_inv() is problem, I think that you also need to add hook into validate_slab_cache(). validate_slab_cache() -> validate_slab_slab() -> validate_slab() -> check_object() -> check_bytes_and_report() -> memchr_inv() Thanks.
On 07/15/14 12:18, Joonsoo Kim wrote: > On Tue, Jul 15, 2014 at 11:37:56AM +0400, Andrey Ryabinin wrote: >> On 07/15/14 10:04, Joonsoo Kim wrote: >>> On Wed, Jul 09, 2014 at 03:30:08PM +0400, Andrey Ryabinin wrote: >>>> Some code in slub could validly touch memory marked by kasan as unaccessible. >>>> Even though slub.c doesn't instrumented, functions called in it are instrumented, >>>> so to avoid false positive reports such places are protected by >>>> kasan_disable_local()/kasan_enable_local() calls. >>>> >>>> Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> >>>> --- >>>> mm/slub.c | 21 +++++++++++++++++++-- >>>> 1 file changed, 19 insertions(+), 2 deletions(-) >>>> >>>> diff --git a/mm/slub.c b/mm/slub.c >>>> index 6ddedf9..c8dbea7 100644 >>>> --- a/mm/slub.c >>>> +++ b/mm/slub.c >>>> @@ -560,8 +560,10 @@ static void print_tracking(struct kmem_cache *s, void *object) >>>> if (!(s->flags & SLAB_STORE_USER)) >>>> return; >>>> >>>> + kasan_disable_local(); >>>> print_track("Allocated", get_track(s, object, TRACK_ALLOC)); >>>> print_track("Freed", get_track(s, object, TRACK_FREE)); >>>> + kasan_enable_local(); >>> >>> I don't think that this is needed since print_track() doesn't call >>> external function with object pointer. print_track() call pr_err(), but, >>> before calling, it retrieve t->addrs[i] so memory access only occurs >>> in slub.c. >>> >> Agree. >> >>>> } >>>> >>>> static void print_page_info(struct page *page) >>>> @@ -604,6 +606,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) >>>> unsigned int off; /* Offset of last byte */ >>>> u8 *addr = page_address(page); >>>> >>>> + kasan_disable_local(); >>>> + >>>> print_tracking(s, p); >>>> >>>> print_page_info(page); >>>> @@ -632,6 +636,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) >>>> /* Beginning of the filler is the free pointer */ >>>> print_section("Padding ", p + off, s->size - off); >>>> >>>> + kasan_enable_local(); >>>> + >>>> dump_stack(); >>>> } >>> >>> And, I recommend that you put this hook on right place. >>> At a glance, the problematic function is print_section() which have >>> external function call, print_hex_dump(), with object pointer. >>> If you disable kasan in print_section, all the below thing won't be >>> needed, I guess. >>> >> >> Nope, at least memchr_inv() call in slab_pad_check will be a problem. >> >> I think putting disable/enable only where we strictly need them might be a problem for future maintenance of slub. >> If someone is going to add a new function call somewhere, he must ensure that it this call won't be a problem >> for kasan. > > I don't agree with this. > > If someone is going to add a slab_pad_check() in other places in > slub.c, we should disable/enable kasan there, too. This looks same > maintenance problem to me. Putting disable/enable only where we > strictly need at least ensures that we don't need to care when using > slub internal functions. > > And, if memchr_inv() is problem, I think that you also need to add hook > into validate_slab_cache(). > > validate_slab_cache() -> validate_slab_slab() -> validate_slab() -> > check_object() -> check_bytes_and_report() -> memchr_inv() > > Thanks. > Ok, you convinced me. I'll do it.
On Tue, 15 Jul 2014, Joonsoo Kim wrote: > > I think putting disable/enable only where we strictly need them might be a problem for future maintenance of slub. > > If someone is going to add a new function call somewhere, he must ensure that it this call won't be a problem > > for kasan. > > I don't agree with this. > > If someone is going to add a slab_pad_check() in other places in > slub.c, we should disable/enable kasan there, too. This looks same > maintenance problem to me. Putting disable/enable only where we > strictly need at least ensures that we don't need to care when using > slub internal functions. > > And, if memchr_inv() is problem, I think that you also need to add hook > into validate_slab_cache(). > > validate_slab_cache() -> validate_slab_slab() -> validate_slab() -> > check_object() -> check_bytes_and_report() -> memchr_inv() I think adding disable/enable is good because it separates the payload access from metadata accesses. This may be useful for future checkers. Maybe call it something different so that this is more generic. metadata_access_enable() metadata_access_disable() ? Maybe someone else has a better idea?
On 07/15/14 18:26, Christoph Lameter wrote: > On Tue, 15 Jul 2014, Joonsoo Kim wrote: > >>> I think putting disable/enable only where we strictly need them might be a problem for future maintenance of slub. >>> If someone is going to add a new function call somewhere, he must ensure that it this call won't be a problem >>> for kasan. >> >> I don't agree with this. >> >> If someone is going to add a slab_pad_check() in other places in >> slub.c, we should disable/enable kasan there, too. This looks same >> maintenance problem to me. Putting disable/enable only where we >> strictly need at least ensures that we don't need to care when using >> slub internal functions. >> >> And, if memchr_inv() is problem, I think that you also need to add hook >> into validate_slab_cache(). >> >> validate_slab_cache() -> validate_slab_slab() -> validate_slab() -> >> check_object() -> check_bytes_and_report() -> memchr_inv() > > I think adding disable/enable is good because it separates the payload > access from metadata accesses. This may be useful for future checkers. > Maybe call it something different so that this is more generic. > > metadata_access_enable() > > metadata_access_disable() > > ? > It sounds like a good idea to me. However in this patch, besides from protecting metadata accesses, this calls also used in setup_objects for wrapping ctor call. It used there because all pages in allocate_slab are poisoned, so at the time when ctors are called all object's memory marked as poisoned. I think this could be solved by removing kasan_alloc_slab_pages() hook form allocate_slab() and adding kasan_slab_free() hook after ctor call. But I guess in that case padding at the end of slab will be unpoisoined. > Maybe someone else has a better idea? > > >
diff --git a/mm/slub.c b/mm/slub.c index 6ddedf9..c8dbea7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -560,8 +560,10 @@ static void print_tracking(struct kmem_cache *s, void *object) if (!(s->flags & SLAB_STORE_USER)) return; + kasan_disable_local(); print_track("Allocated", get_track(s, object, TRACK_ALLOC)); print_track("Freed", get_track(s, object, TRACK_FREE)); + kasan_enable_local(); } static void print_page_info(struct page *page) @@ -604,6 +606,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) unsigned int off; /* Offset of last byte */ u8 *addr = page_address(page); + kasan_disable_local(); + print_tracking(s, p); print_page_info(page); @@ -632,6 +636,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) /* Beginning of the filler is the free pointer */ print_section("Padding ", p + off, s->size - off); + kasan_enable_local(); + dump_stack(); } @@ -1012,6 +1018,8 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) { + + kasan_disable_local(); if (!check_slab(s, page)) goto bad; @@ -1028,6 +1036,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, set_track(s, object, TRACK_ALLOC, addr); trace(s, page, object, 1); init_object(s, object, SLUB_RED_ACTIVE); + kasan_enable_local(); return 1; bad: @@ -1041,6 +1050,7 @@ bad: page->inuse = page->objects; page->freelist = NULL; } + kasan_enable_local(); return 0; } @@ -1052,6 +1062,7 @@ static noinline struct kmem_cache_node *free_debug_processing( spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); + kasan_disable_local(); if (!check_slab(s, page)) goto fail; @@ -1088,6 +1099,7 @@ static noinline struct kmem_cache_node *free_debug_processing( trace(s, page, object, 0); init_object(s, object, SLUB_RED_INACTIVE); out: + kasan_enable_local(); slab_unlock(page); /* * Keep node_lock to preserve integrity @@ -1096,6 +1108,7 @@ out: return n; fail: + kasan_enable_local(); slab_unlock(page); spin_unlock_irqrestore(&n->list_lock, *flags); slab_fix(s, "Object at 0x%p not freed", object); @@ -1371,8 +1384,11 @@ static void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); - if (unlikely(s->ctor)) + if (unlikely(s->ctor)) { + kasan_disable_local(); s->ctor(object); + kasan_enable_local(); + } } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -1425,11 +1441,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page) if (kmem_cache_debug(s)) { void *p; - + kasan_disable_local(); slab_pad_check(s, page); for_each_object(p, s, page_address(page), page->objects) check_object(s, page, p, SLUB_RED_INACTIVE); + kasan_enable_local(); } kmemcheck_free_shadow(page, compound_order(page));
Some code in slub could validly touch memory marked by kasan as unaccessible. Even though slub.c doesn't instrumented, functions called in it are instrumented, so to avoid false positive reports such places are protected by kasan_disable_local()/kasan_enable_local() calls. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> --- mm/slub.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-)