diff mbox

[04/13] mm: Use array_size() helpers for kmalloc()

Message ID 20180509004229.36341-5-keescook@chromium.org (mailing list archive)
State New, archived
Headers show

Commit Message

Kees Cook May 9, 2018, 12:42 a.m. UTC
Instead of open-coded multiplication, use the new array_size() helper
to detect overflow in kmalloc()-family functions.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 include/linux/slab.h | 31 +++++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 8 deletions(-)

Comments

Matthew Wilcox May 9, 2018, 11:34 a.m. UTC | #1
On Tue, May 08, 2018 at 05:42:20PM -0700, Kees Cook wrote:
> @@ -499,6 +500,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
>   */
>  static __always_inline void *kmalloc(size_t size, gfp_t flags)
>  {
> +	if (size == SIZE_MAX)
> +		return NULL;
>  	if (__builtin_constant_p(size)) {
>  		if (size > KMALLOC_MAX_CACHE_SIZE)
>  			return kmalloc_large(size, flags);

I don't like the add-checking-to-every-call-site part of this patch.
Fine, the compiler will optimise it away if it can calculate it at compile
time, but there are a lot of situations where it can't.  You aren't
adding any safety by doing this; trying to allocate SIZE_MAX bytes is
guaranteed to fail, and it doesn't need to fail quickly.

> @@ -624,11 +629,13 @@ int memcg_update_all_caches(int num_memcgs);
>   */
>  static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
>  {
> -	if (size != 0 && n > SIZE_MAX / size)
> +	size_t bytes = array_size(n, size);
> +
> +	if (bytes == SIZE_MAX)
>  		return NULL;
>  	if (__builtin_constant_p(n) && __builtin_constant_p(size))
> -		return kmalloc(n * size, flags);
> -	return __kmalloc(n * size, flags);
> +		return kmalloc(bytes, flags);
> +	return __kmalloc(bytes, flags);
>  }
>  
>  /**
> @@ -639,7 +646,9 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
>   */
>  static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
>  {
> -	return kmalloc_array(n, size, flags | __GFP_ZERO);
> +	size_t bytes = array_size(n, size);
> +
> +	return kmalloc(bytes, flags | __GFP_ZERO);
>  }

Hmm.  I wonder why we have the kmalloc/__kmalloc "optimisation"
in kmalloc_array, but not kcalloc.  Bet we don't really need it in
kmalloc_array.  I'll do some testing.
Kees Cook May 9, 2018, 5:58 p.m. UTC | #2
On Wed, May 9, 2018 at 4:34 AM, Matthew Wilcox <willy@infradead.org> wrote:
> On Tue, May 08, 2018 at 05:42:20PM -0700, Kees Cook wrote:
>> @@ -499,6 +500,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
>>   */
>>  static __always_inline void *kmalloc(size_t size, gfp_t flags)
>>  {
>> +     if (size == SIZE_MAX)
>> +             return NULL;
>>       if (__builtin_constant_p(size)) {
>>               if (size > KMALLOC_MAX_CACHE_SIZE)
>>                       return kmalloc_large(size, flags);
>
> I don't like the add-checking-to-every-call-site part of this patch.
> Fine, the compiler will optimise it away if it can calculate it at compile
> time, but there are a lot of situations where it can't.  You aren't
> adding any safety by doing this; trying to allocate SIZE_MAX bytes is
> guaranteed to fail, and it doesn't need to fail quickly.

Fun bit of paranoia: I added early checks to devm_kmalloc() too in
another patch after 0-day started yelling about other things, and this
morning while removing the SIZE_MAX checks based on your feedback, I
discovered:

void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
{
        struct devres *dr;

        /* use raw alloc_dr for kmalloc caller tracing */
        dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
...

static __always_inline struct devres * alloc_dr(dr_release_t release,
                                               size_t size, gfp_t gfp, int nid)
{
       size_t tot_size = sizeof(struct devres) + size;
        struct devres *dr;

        dr = kmalloc_node_track_caller(tot_size, gfp, nid);
...

which is exactly the pattern I was worried about: SIZE_MAX plus some
small number would overflow. :(

So, I've added an explicit overflow check in devm_kmalloc() now.

Thoughts: making {struct,array,array3}_size() return "SIZE_MAX -
something" could help for generic cases (like above), but it might
still be possible in a buggy situation for an attacker to choose
factors that do NOT overflow, but reach something like "SIZE_MAX - 1"
and then the later addition will wrap it around. I'm leaning towards
doing it anyway, though, since not all factors in a given bug may have
very high granularity, giving us better protection than SIZE_MAX.

However, since now we're separating overflow checking from saturation
(i.e. we could calculate a non-overflowing value that lands in the
saturation zone), we can't sanely to the check_*_overflow() cases,
since the "saturated" results from array_size() aren't SIZE_MAX any
more...

I can't decide which is a safer failure case...

> Hmm.  I wonder why we have the kmalloc/__kmalloc "optimisation"
> in kmalloc_array, but not kcalloc.  Bet we don't really need it in
> kmalloc_array.  I'll do some testing.

Not sure; my new patches drop it entirely since they're redefining
*calloc() and *_array*() with the helpers now...

I'll send a v2 shortly (without the treewide changes, since those are
huge and only changed slightly with some 0-day noticed glitches).

-Kees
Rasmus Villemoes May 9, 2018, 6 p.m. UTC | #3
On 2018-05-09 13:34, Matthew Wilcox wrote:
> On Tue, May 08, 2018 at 05:42:20PM -0700, Kees Cook wrote:
>> @@ -499,6 +500,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
>>   */
>>  static __always_inline void *kmalloc(size_t size, gfp_t flags)
>>  {
>> +	if (size == SIZE_MAX)
>> +		return NULL;
>>  	if (__builtin_constant_p(size)) {
>>  		if (size > KMALLOC_MAX_CACHE_SIZE)
>>  			return kmalloc_large(size, flags);
> 
> I don't like the add-checking-to-every-call-site part of this patch.
> Fine, the compiler will optimise it away if it can calculate it at compile
> time, but there are a lot of situations where it can't.  You aren't
> adding any safety by doing this; trying to allocate SIZE_MAX bytes is
> guaranteed to fail, and it doesn't need to fail quickly.

Yeah, agree that we don't want to add a size check to all callers,
including those where the size doesn't even come from one of the new
*_size helpers; that just adds bloat. It's true that the overflow case
does not have to fail quickly, but I was worried that the saturating
helpers would end up making gcc emit a cmov instruction, thus stalling
the regular path. But it seems that it actually ends up doing a forward
jump, sets %rdi to SIZE_MAX, then jumps back to the call of __kmalloc,
so it should be ok.

With __builtin_constant_p(size) && size == SIZE_MAX, gcc could be smart
enough to elide those two instructions and have the jo go directly to
the caller's error handling, but at least gcc 5.4 doesn't seem to be
that smart. So let's just omit that part for now.

But in case of the kmalloc_array functions, with a direct call of
__builtin_mul_overflow(), gcc does combine the "return NULL" with the
callers error handling, thus avoiding the six byte "%rdi = -1; jmp
back;" thunk. That, along with the churn factor, might be an argument
for leaving the current callers of *_array alone. But if we are going to
keep those longer-term, we might as well convert kmalloc(a, b) into
kmalloc_array(a, b) instead of kmalloc(array_size(a, b)). In any case, I
do see the usefulness of the struct_size helper, and agree that we
definitely should not introduce a new *_struct variant that needs to be
implemented in all families.

>> @@ -624,11 +629,13 @@ int memcg_update_all_caches(int num_memcgs);
>>   */
>>  static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
>>  {
>> -	if (size != 0 && n > SIZE_MAX / size)
>> +	size_t bytes = array_size(n, size);
>> +
>> +	if (bytes == SIZE_MAX)
>>  		return NULL;
>>  	if (__builtin_constant_p(n) && __builtin_constant_p(size))
>> -		return kmalloc(n * size, flags);
>> -	return __kmalloc(n * size, flags);
>> +		return kmalloc(bytes, flags);
>> +	return __kmalloc(bytes, flags);
>>  }
>>  
>>  /**
>> @@ -639,7 +646,9 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
>>   */
>>  static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
>>  {
>> -	return kmalloc_array(n, size, flags | __GFP_ZERO);
>> +	size_t bytes = array_size(n, size);
>> +
>> +	return kmalloc(bytes, flags | __GFP_ZERO);
>>  }
> 
> Hmm.  I wonder why we have the kmalloc/__kmalloc "optimisation"
> in kmalloc_array, but not kcalloc.  Bet we don't really need it in
> kmalloc_array.  I'll do some testing.
> 

Huh? Because kcalloc is implemented in terms of kmalloc_array? And can't
we just keep it that way?

Rasmus
Kees Cook May 9, 2018, 6:07 p.m. UTC | #4
On Wed, May 9, 2018 at 11:00 AM, Rasmus Villemoes
<linux@rasmusvillemoes.dk> wrote:
> On 2018-05-09 13:34, Matthew Wilcox wrote:
>> On Tue, May 08, 2018 at 05:42:20PM -0700, Kees Cook wrote:
>>> @@ -499,6 +500,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
>>>   */
>>>  static __always_inline void *kmalloc(size_t size, gfp_t flags)
>>>  {
>>> +    if (size == SIZE_MAX)
>>> +            return NULL;
>>>      if (__builtin_constant_p(size)) {
>>>              if (size > KMALLOC_MAX_CACHE_SIZE)
>>>                      return kmalloc_large(size, flags);
>>
>> I don't like the add-checking-to-every-call-site part of this patch.
>> Fine, the compiler will optimise it away if it can calculate it at compile
>> time, but there are a lot of situations where it can't.  You aren't
>> adding any safety by doing this; trying to allocate SIZE_MAX bytes is
>> guaranteed to fail, and it doesn't need to fail quickly.
>
> Yeah, agree that we don't want to add a size check to all callers,
> including those where the size doesn't even come from one of the new
> *_size helpers; that just adds bloat. It's true that the overflow case
> does not have to fail quickly, but I was worried that the saturating
> helpers would end up making gcc emit a cmov instruction, thus stalling
> the regular path. But it seems that it actually ends up doing a forward
> jump, sets %rdi to SIZE_MAX, then jumps back to the call of __kmalloc,
> so it should be ok.

Okay, consensus is to remove new SIZE_MAX checks, then?

> With __builtin_constant_p(size) && size == SIZE_MAX, gcc could be smart
> enough to elide those two instructions and have the jo go directly to
> the caller's error handling, but at least gcc 5.4 doesn't seem to be
> that smart. So let's just omit that part for now.
>
> But in case of the kmalloc_array functions, with a direct call of
> __builtin_mul_overflow(), gcc does combine the "return NULL" with the
> callers error handling, thus avoiding the six byte "%rdi = -1; jmp
> back;" thunk. That, along with the churn factor, might be an argument
> for leaving the current callers of *_array alone. But if we are going to
> keep those longer-term, we might as well convert kmalloc(a, b) into
> kmalloc_array(a, b) instead of kmalloc(array_size(a, b)). In any case, I
> do see the usefulness of the struct_size helper, and agree that we
> definitely should not introduce a new *_struct variant that needs to be
> implemented in all families.

I'd like to drop *calloc() and *_array() to simplify APIs (and improve
developer sanity). Are you suggesting we should not use the overflow
helpers in kmalloc_array(), instead leaving the existing open-coded
overflow check?

-Kees
Rasmus Villemoes May 9, 2018, 6:39 p.m. UTC | #5
On 2018-05-09 20:07, Kees Cook wrote:
> On Wed, May 9, 2018 at 11:00 AM, Rasmus Villemoes
> <linux@rasmusvillemoes.dk> wrote:
> Okay, consensus is to remove new SIZE_MAX checks, then?

Yes, don't add such to static inlines. But the out-of-line
implementations do need an audit (as you've observed) for unsafe
arithmetic on the passed-in size.

>> With __builtin_constant_p(size) && size == SIZE_MAX, gcc could be smart
>> enough to elide those two instructions and have the jo go directly to
>> the caller's error handling, but at least gcc 5.4 doesn't seem to be
>> that smart. So let's just omit that part for now.
>>
>> But in case of the kmalloc_array functions, with a direct call of
>> __builtin_mul_overflow(), gcc does combine the "return NULL" with the
>> callers error handling, thus avoiding the six byte "%rdi = -1; jmp
>> back;" thunk. That, along with the churn factor, might be an argument
>> for leaving the current callers of *_array alone. But if we are going to
>> keep those longer-term, we might as well convert kmalloc(a, b) into
>> kmalloc_array(a, b) instead of kmalloc(array_size(a, b)). In any case, I
>> do see the usefulness of the struct_size helper, and agree that we
>> definitely should not introduce a new *_struct variant that needs to be
>> implemented in all families.
> 
> I'd like to drop *calloc() and *_array() to simplify APIs (and improve
> developer sanity). Are you suggesting we should not use the overflow
> helpers in kmalloc_array(), instead leaving the existing open-coded
> overflow check?

No, quite the contrary. I suggest using check_mul_overflow() directly in
kmalloc_array (and by implication, kcalloc), and also all other *_array
or *_calloc that are static inlines. That's separate from converting
kmalloc(a*b) to use some safer variant, and should not be controversial
(and can generate better code for all the existing callers).

Now, what kmalloc(a*b) should be converted to is a question of the
long-term plans for *_array. If you want to remove it completely,
eventually, it doesn't make sense to coccinel (yeah, that's a verb) in
new users.

And a third question is whether and when to mechanically change all
(pre-)existing kmalloc_array() into kmalloc(array_size()). I don't have
an opinion on the latter two.

Rasmus
diff mbox

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 81ebd71f8c03..d03e0726e136 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -13,6 +13,7 @@ 
 #define	_LINUX_SLAB_H
 
 #include <linux/gfp.h>
+#include <linux/overflow.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
@@ -499,6 +500,8 @@  static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  */
 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
+	if (size == SIZE_MAX)
+		return NULL;
 	if (__builtin_constant_p(size)) {
 		if (size > KMALLOC_MAX_CACHE_SIZE)
 			return kmalloc_large(size, flags);
@@ -539,6 +542,8 @@  static __always_inline unsigned int kmalloc_size(unsigned int n)
 
 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
+	if (size == SIZE_MAX)
+		return NULL;
 #ifndef CONFIG_SLOB
 	if (__builtin_constant_p(size) &&
 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
@@ -624,11 +629,13 @@  int memcg_update_all_caches(int num_memcgs);
  */
 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
-	if (size != 0 && n > SIZE_MAX / size)
+	size_t bytes = array_size(n, size);
+
+	if (bytes == SIZE_MAX)
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc(n * size, flags);
-	return __kmalloc(n * size, flags);
+		return kmalloc(bytes, flags);
+	return __kmalloc(bytes, flags);
 }
 
 /**
@@ -639,7 +646,9 @@  static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  */
 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 {
-	return kmalloc_array(n, size, flags | __GFP_ZERO);
+	size_t bytes = array_size(n, size);
+
+	return kmalloc(bytes, flags | __GFP_ZERO);
 }
 
 /*
@@ -657,16 +666,22 @@  extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
 				       int node)
 {
-	if (size != 0 && n > SIZE_MAX / size)
+	size_t bytes = array_size(n, size);
+
+	if (bytes == SIZE_MAX)
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc_node(n * size, flags, node);
-	return __kmalloc_node(n * size, flags, node);
+		return kmalloc_node(bytes, flags, node);
+	return __kmalloc_node(bytes, flags, node);
 }
 
 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
 {
-	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
+	size_t bytes = array_size(n, size);
+
+	if (bytes == SIZE_MAX)
+		return NULL;
+	return kmalloc_node(bytes, flags | __GFP_ZERO, node);
 }