diff mbox series

[v6,08/18] khwasan: preassign tags to objects with ctors or SLAB_TYPESAFE_BY_RCU

Message ID 95b5beb7ec13b7e998efe84c9a7a5c1fa49a9fe3.1535462971.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series khwasan: kernel hardware assisted address sanitizer | expand

Commit Message

Andrey Konovalov Aug. 29, 2018, 11:35 a.m. UTC
An object constructor can initialize pointers within this objects based on
the address of the object. Since the object address might be tagged, we
need to assign a tag before calling constructor.

The implemented approach is to assign tags to objects with constructors
when a slab is allocated and call constructors once as usual. The
downside is that such object would always have the same tag when it is
reallocated, so we won't catch use-after-frees on it.

Also pressign tags for objects from SLAB_TYPESAFE_BY_RCU caches, since
they can be validy accessed after having been freed.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 mm/slab.c | 6 +++++-
 mm/slub.c | 4 ++++
 2 files changed, 9 insertions(+), 1 deletion(-)

Comments

Christoph Lameter (Ampere) Sept. 4, 2018, 3:17 p.m. UTC | #1
For the slab pieces

Reviewed-by: Christoph Lameter <cl@linux.com>



On Wed, 29 Aug 2018, Andrey Konovalov wrote:

> An object constructor can initialize pointers within this objects based on
> the address of the object. Since the object address might be tagged, we
> need to assign a tag before calling constructor.
>
> The implemented approach is to assign tags to objects with constructors
> when a slab is allocated and call constructors once as usual. The
> downside is that such object would always have the same tag when it is
> reallocated, so we won't catch use-after-frees on it.
>
> Also pressign tags for objects from SLAB_TYPESAFE_BY_RCU caches, since
> they can be validy accessed after having been freed.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---
>  mm/slab.c | 6 +++++-
>  mm/slub.c | 4 ++++
>  2 files changed, 9 insertions(+), 1 deletion(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index 6fdca9ec2ea4..3b4227059f2e 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -403,7 +403,11 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
>  static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
>  				 unsigned int idx)
>  {
> -	return page->s_mem + cache->size * idx;
> +	void *obj;
> +
> +	obj = page->s_mem + cache->size * idx;
> +	obj = khwasan_preset_slab_tag(cache, idx, obj);
> +	return obj;
>  }
>
>  /*
> diff --git a/mm/slub.c b/mm/slub.c
> index 4206e1b616e7..086d6558a6b6 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1531,12 +1531,14 @@ static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
>  	/* First entry is used as the base of the freelist */
>  	cur = next_freelist_entry(s, page, &pos, start, page_limit,
>  				freelist_count);
> +	cur = khwasan_preset_slub_tag(s, cur);
>  	page->freelist = cur;
>
>  	for (idx = 1; idx < page->objects; idx++) {
>  		setup_object(s, page, cur);
>  		next = next_freelist_entry(s, page, &pos, start, page_limit,
>  			freelist_count);
> +		next = khwasan_preset_slub_tag(s, next);
>  		set_freepointer(s, cur, next);
>  		cur = next;
>  	}
> @@ -1613,8 +1615,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
>  	shuffle = shuffle_freelist(s, page);
>
>  	if (!shuffle) {
> +		start = khwasan_preset_slub_tag(s, start);
>  		for_each_object_idx(p, idx, s, start, page->objects) {
>  			setup_object(s, page, p);
> +			p = khwasan_preset_slub_tag(s, p);
>  			if (likely(idx < page->objects))
>  				set_freepointer(s, p, p + s->size);
>  			else
>
Dmitry Vyukov Sept. 12, 2018, 4:36 p.m. UTC | #2
On Wed, Aug 29, 2018 at 1:35 PM, Andrey Konovalov <andreyknvl@google.com> wrote:
> An object constructor can initialize pointers within this objects based on
> the address of the object. Since the object address might be tagged, we
> need to assign a tag before calling constructor.
>
> The implemented approach is to assign tags to objects with constructors
> when a slab is allocated and call constructors once as usual. The
> downside is that such object would always have the same tag when it is
> reallocated, so we won't catch use-after-frees on it.
>
> Also pressign tags for objects from SLAB_TYPESAFE_BY_RCU caches, since
> they can be validy accessed after having been freed.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---
>  mm/slab.c | 6 +++++-
>  mm/slub.c | 4 ++++
>  2 files changed, 9 insertions(+), 1 deletion(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index 6fdca9ec2ea4..3b4227059f2e 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -403,7 +403,11 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
>  static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
>                                  unsigned int idx)
>  {
> -       return page->s_mem + cache->size * idx;
> +       void *obj;
> +
> +       obj = page->s_mem + cache->size * idx;
> +       obj = khwasan_preset_slab_tag(cache, idx, obj);
> +       return obj;
>  }
>
>  /*
> diff --git a/mm/slub.c b/mm/slub.c
> index 4206e1b616e7..086d6558a6b6 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1531,12 +1531,14 @@ static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
>         /* First entry is used as the base of the freelist */
>         cur = next_freelist_entry(s, page, &pos, start, page_limit,
>                                 freelist_count);
> +       cur = khwasan_preset_slub_tag(s, cur);
>         page->freelist = cur;
>
>         for (idx = 1; idx < page->objects; idx++) {
>                 setup_object(s, page, cur);
>                 next = next_freelist_entry(s, page, &pos, start, page_limit,
>                         freelist_count);
> +               next = khwasan_preset_slub_tag(s, next);
>                 set_freepointer(s, cur, next);
>                 cur = next;
>         }
> @@ -1613,8 +1615,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
>         shuffle = shuffle_freelist(s, page);
>
>         if (!shuffle) {
> +               start = khwasan_preset_slub_tag(s, start);
>                 for_each_object_idx(p, idx, s, start, page->objects) {
>                         setup_object(s, page, p);
> +                       p = khwasan_preset_slub_tag(s, p);


As I commented in the previous patch, can't we do this in
kasan_init_slab_obj(), which should be called in all the right places
already?


>                         if (likely(idx < page->objects))
>                                 set_freepointer(s, p, p + s->size);
>                         else
> --
> 2.19.0.rc0.228.g281dcd1b4d0-goog
>
Andrey Konovalov Sept. 18, 2018, 4:50 p.m. UTC | #3
On Wed, Sep 12, 2018 at 6:36 PM, Dmitry Vyukov <dvyukov@google.com> wrote:
> On Wed, Aug 29, 2018 at 1:35 PM, Andrey Konovalov <andreyknvl@google.com> wrote:

>>         if (!shuffle) {
>> +               start = khwasan_preset_slub_tag(s, start);
>>                 for_each_object_idx(p, idx, s, start, page->objects) {
>>                         setup_object(s, page, p);
>> +                       p = khwasan_preset_slub_tag(s, p);
>
>
> As I commented in the previous patch, can't we do this in
> kasan_init_slab_obj(), which should be called in all the right places
> already?
>

As per offline discussion, will do in v7.
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index 6fdca9ec2ea4..3b4227059f2e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -403,7 +403,11 @@  static inline struct kmem_cache *virt_to_cache(const void *obj)
 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 				 unsigned int idx)
 {
-	return page->s_mem + cache->size * idx;
+	void *obj;
+
+	obj = page->s_mem + cache->size * idx;
+	obj = khwasan_preset_slab_tag(cache, idx, obj);
+	return obj;
 }
 
 /*
diff --git a/mm/slub.c b/mm/slub.c
index 4206e1b616e7..086d6558a6b6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1531,12 +1531,14 @@  static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
 	/* First entry is used as the base of the freelist */
 	cur = next_freelist_entry(s, page, &pos, start, page_limit,
 				freelist_count);
+	cur = khwasan_preset_slub_tag(s, cur);
 	page->freelist = cur;
 
 	for (idx = 1; idx < page->objects; idx++) {
 		setup_object(s, page, cur);
 		next = next_freelist_entry(s, page, &pos, start, page_limit,
 			freelist_count);
+		next = khwasan_preset_slub_tag(s, next);
 		set_freepointer(s, cur, next);
 		cur = next;
 	}
@@ -1613,8 +1615,10 @@  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	shuffle = shuffle_freelist(s, page);
 
 	if (!shuffle) {
+		start = khwasan_preset_slub_tag(s, start);
 		for_each_object_idx(p, idx, s, start, page->objects) {
 			setup_object(s, page, p);
+			p = khwasan_preset_slub_tag(s, p);
 			if (likely(idx < page->objects))
 				set_freepointer(s, p, p + s->size);
 			else