diff mbox series

[v3] mempool: Do not use ksize() for poisoning

Message ID 20221025233421.you.825-kees@kernel.org (mailing list archive)
State Superseded
Headers show
Series [v3] mempool: Do not use ksize() for poisoning | expand

Commit Message

Kees Cook Oct. 25, 2022, 11:36 p.m. UTC
Nothing appears to be using ksize() within the kmalloc-backed mempools
except the mempool poisoning logic. Use the actual pool size instead
of the ksize() to avoid needing any special handling of the memory as
needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/lkml/f4fc52c4-7c18-1d76-0c7a-4058ea2486b9@suse.cz/
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Signed-off-by: Kees Cook <keescook@chromium.org>
---
v3: remove ksize() calls instead of adding kmalloc_roundup_size() calls (vbabka)
v2: https://lore.kernel.org/lkml/20221018090323.never.897-kees@kernel.org/
v1: https://lore.kernel.org/lkml/20220923202822.2667581-14-keescook@chromium.org/
---
 mm/mempool.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Vlastimil Babka Oct. 26, 2022, 10:02 a.m. UTC | #1
On 10/26/22 01:36, Kees Cook wrote:
> Nothing appears to be using ksize() within the kmalloc-backed mempools
> except the mempool poisoning logic. Use the actual pool size instead
> of the ksize() to avoid needing any special handling of the memory as
> needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE.
> 
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Link: https://lore.kernel.org/lkml/f4fc52c4-7c18-1d76-0c7a-4058ea2486b9@suse.cz/
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Marco Elver <elver@google.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: linux-mm@kvack.org
> Signed-off-by: Kees Cook <keescook@chromium.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
> v3: remove ksize() calls instead of adding kmalloc_roundup_size() calls (vbabka)
> v2: https://lore.kernel.org/lkml/20221018090323.never.897-kees@kernel.org/
> v1: https://lore.kernel.org/lkml/20220923202822.2667581-14-keescook@chromium.org/
> ---
>  mm/mempool.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/mempool.c b/mm/mempool.c
> index 96488b13a1ef..54204065037d 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -58,7 +58,7 @@ static void check_element(mempool_t *pool, void *element)
>  {
>  	/* Mempools backed by slab allocator */
>  	if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
> -		__check_element(pool, element, ksize(element));
> +		__check_element(pool, element, (size_t)pool->pool_data);
>  	} else if (pool->free == mempool_free_pages) {
>  		/* Mempools backed by page allocator */
>  		int order = (int)(long)pool->pool_data;
> @@ -81,7 +81,7 @@ static void poison_element(mempool_t *pool, void *element)
>  {
>  	/* Mempools backed by slab allocator */
>  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
> -		__poison_element(element, ksize(element));
> +		__poison_element(element, (size_t)pool->pool_data);
>  	} else if (pool->alloc == mempool_alloc_pages) {
>  		/* Mempools backed by page allocator */
>  		int order = (int)(long)pool->pool_data;
> @@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
>  static void kasan_unpoison_element(mempool_t *pool, void *element)
>  {
>  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> -		kasan_unpoison_range(element, __ksize(element));
> +		kasan_unpoison_range(element, (size_t)pool->pool_data);
>  	else if (pool->alloc == mempool_alloc_pages)
>  		kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
>  				     false);
Vlastimil Babka Oct. 26, 2022, 10:08 a.m. UTC | #2
On 10/26/22 12:02, Vlastimil Babka wrote:
> On 10/26/22 01:36, Kees Cook wrote:
>> Nothing appears to be using ksize() within the kmalloc-backed mempools
>> except the mempool poisoning logic. Use the actual pool size instead
>> of the ksize() to avoid needing any special handling of the memory as
>> needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE.
>> 
>> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
>> Link: https://lore.kernel.org/lkml/f4fc52c4-7c18-1d76-0c7a-4058ea2486b9@suse.cz/
>> Cc: Andrey Konovalov <andreyknvl@gmail.com>
>> Cc: David Rientjes <rientjes@google.com>
>> Cc: Marco Elver <elver@google.com>
>> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
>> Cc: Andrew Morton <akpm@linux-foundation.org>
>> Cc: linux-mm@kvack.org
>> Signed-off-by: Kees Cook <keescook@chromium.org>
> 
> Acked-by: Vlastimil Babka <vbabka@suse.cz>

Ah and since the subject was updated too, note this is supposed to
replace/fixup the patch in mm-unstable:

mempool-use-kmalloc_size_roundup-to-match-ksize-usage.patch

>> ---
>> v3: remove ksize() calls instead of adding kmalloc_roundup_size() calls (vbabka)
>> v2: https://lore.kernel.org/lkml/20221018090323.never.897-kees@kernel.org/
>> v1: https://lore.kernel.org/lkml/20220923202822.2667581-14-keescook@chromium.org/
>> ---
>>  mm/mempool.c | 6 +++---
>>  1 file changed, 3 insertions(+), 3 deletions(-)
>> 
>> diff --git a/mm/mempool.c b/mm/mempool.c
>> index 96488b13a1ef..54204065037d 100644
>> --- a/mm/mempool.c
>> +++ b/mm/mempool.c
>> @@ -58,7 +58,7 @@ static void check_element(mempool_t *pool, void *element)
>>  {
>>  	/* Mempools backed by slab allocator */
>>  	if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
>> -		__check_element(pool, element, ksize(element));
>> +		__check_element(pool, element, (size_t)pool->pool_data);
>>  	} else if (pool->free == mempool_free_pages) {
>>  		/* Mempools backed by page allocator */
>>  		int order = (int)(long)pool->pool_data;
>> @@ -81,7 +81,7 @@ static void poison_element(mempool_t *pool, void *element)
>>  {
>>  	/* Mempools backed by slab allocator */
>>  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
>> -		__poison_element(element, ksize(element));
>> +		__poison_element(element, (size_t)pool->pool_data);
>>  	} else if (pool->alloc == mempool_alloc_pages) {
>>  		/* Mempools backed by page allocator */
>>  		int order = (int)(long)pool->pool_data;
>> @@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
>>  static void kasan_unpoison_element(mempool_t *pool, void *element)
>>  {
>>  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
>> -		kasan_unpoison_range(element, __ksize(element));
>> +		kasan_unpoison_range(element, (size_t)pool->pool_data);
>>  	else if (pool->alloc == mempool_alloc_pages)
>>  		kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
>>  				     false);
>
Andrey Konovalov Oct. 27, 2022, 7:15 p.m. UTC | #3
On Wed, Oct 26, 2022 at 1:36 AM Kees Cook <keescook@chromium.org> wrote:
>
> Nothing appears to be using ksize() within the kmalloc-backed mempools
> except the mempool poisoning logic. Use the actual pool size instead
> of the ksize() to avoid needing any special handling of the memory as
> needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE.
>
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Link: https://lore.kernel.org/lkml/f4fc52c4-7c18-1d76-0c7a-4058ea2486b9@suse.cz/
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Marco Elver <elver@google.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: linux-mm@kvack.org
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
> v3: remove ksize() calls instead of adding kmalloc_roundup_size() calls (vbabka)
> v2: https://lore.kernel.org/lkml/20221018090323.never.897-kees@kernel.org/
> v1: https://lore.kernel.org/lkml/20220923202822.2667581-14-keescook@chromium.org/
> ---
>  mm/mempool.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/mm/mempool.c b/mm/mempool.c
> index 96488b13a1ef..54204065037d 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -58,7 +58,7 @@ static void check_element(mempool_t *pool, void *element)
>  {
>         /* Mempools backed by slab allocator */
>         if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
> -               __check_element(pool, element, ksize(element));
> +               __check_element(pool, element, (size_t)pool->pool_data);
>         } else if (pool->free == mempool_free_pages) {
>                 /* Mempools backed by page allocator */
>                 int order = (int)(long)pool->pool_data;
> @@ -81,7 +81,7 @@ static void poison_element(mempool_t *pool, void *element)
>  {
>         /* Mempools backed by slab allocator */
>         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
> -               __poison_element(element, ksize(element));
> +               __poison_element(element, (size_t)pool->pool_data);
>         } else if (pool->alloc == mempool_alloc_pages) {
>                 /* Mempools backed by page allocator */
>                 int order = (int)(long)pool->pool_data;
> @@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
>  static void kasan_unpoison_element(mempool_t *pool, void *element)
>  {
>         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> -               kasan_unpoison_range(element, __ksize(element));
> +               kasan_unpoison_range(element, (size_t)pool->pool_data);
>         else if (pool->alloc == mempool_alloc_pages)
>                 kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
>                                      false);
> --
> 2.34.1
>

For the KASAN change:

Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>

Thanks!
diff mbox series

Patch

diff --git a/mm/mempool.c b/mm/mempool.c
index 96488b13a1ef..54204065037d 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -58,7 +58,7 @@  static void check_element(mempool_t *pool, void *element)
 {
 	/* Mempools backed by slab allocator */
 	if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
-		__check_element(pool, element, ksize(element));
+		__check_element(pool, element, (size_t)pool->pool_data);
 	} else if (pool->free == mempool_free_pages) {
 		/* Mempools backed by page allocator */
 		int order = (int)(long)pool->pool_data;
@@ -81,7 +81,7 @@  static void poison_element(mempool_t *pool, void *element)
 {
 	/* Mempools backed by slab allocator */
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
-		__poison_element(element, ksize(element));
+		__poison_element(element, (size_t)pool->pool_data);
 	} else if (pool->alloc == mempool_alloc_pages) {
 		/* Mempools backed by page allocator */
 		int order = (int)(long)pool->pool_data;
@@ -112,7 +112,7 @@  static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 static void kasan_unpoison_element(mempool_t *pool, void *element)
 {
 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-		kasan_unpoison_range(element, __ksize(element));
+		kasan_unpoison_range(element, (size_t)pool->pool_data);
 	else if (pool->alloc == mempool_alloc_pages)
 		kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
 				     false);