diff mbox series

[v3,4/8] slab: Add __alloc_size attributes for better bounds checking

Message ID 20210930222704.2631604-5-keescook@chromium.org (mailing list archive)
State New, archived
Headers show
Series Add __alloc_size() | expand

Commit Message

Kees Cook Sept. 30, 2021, 10:27 p.m. UTC
As already done in GrapheneOS, add the __alloc_size attribute for regular
kmalloc interfaces, to provide additional hinting for better bounds
checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
optimizations.

Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andy Whitcroft <apw@canonical.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Dwaipayan Ray <dwaipayanray1@gmail.com>
Cc: Joe Perches <joe@perches.com>
Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Co-developed-by: Daniel Micay <danielmicay@gmail.com>
Signed-off-by: Daniel Micay <danielmicay@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
---
 include/linux/slab.h | 61 ++++++++++++++++++++++++--------------------
 1 file changed, 33 insertions(+), 28 deletions(-)

Comments

Andrew Morton Oct. 6, 2021, 1:47 a.m. UTC | #1
On Thu, 30 Sep 2021 15:27:00 -0700 Kees Cook <keescook@chromium.org> wrote:

> As already done in GrapheneOS, add the __alloc_size attribute for regular
> kmalloc interfaces, to provide additional hinting for better bounds
> checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> optimizations.

x86_64 allmodconfig:

In file included from ./arch/x86/include/asm/preempt.h:7,
                 from ./include/linux/preempt.h:78,
                 from ./include/linux/spinlock.h:55,
                 from ./include/linux/mmzone.h:8,
                 from ./include/linux/gfp.h:6,
                 from ./include/linux/mm.h:10,
                 from ./include/linux/mman.h:5,
                 from lib/test_kasan_module.c:10:
In function 'check_copy_size',
    inlined from 'copy_user_test' at ./include/linux/uaccess.h:191:6:
./include/linux/thread_info.h:213:4: error: call to '__bad_copy_to' declared with attribute error: copy destination size is too small
  213 |    __bad_copy_to();
      |    ^~~~~~~~~~~~~~~
In function 'check_copy_size',
    inlined from 'copy_user_test' at ./include/linux/uaccess.h:199:6:
./include/linux/thread_info.h:211:4: error: call to '__bad_copy_from' declared with attribute error: copy source size is too small
  211 |    __bad_copy_from();
      |    ^~~~~~~~~~~~~~~~~
make[1]: *** [lib/test_kasan_module.o] Error 1
make: *** [lib] Error 2
Kees Cook Oct. 6, 2021, 3:06 a.m. UTC | #2
On Tue, Oct 05, 2021 at 06:47:17PM -0700, Andrew Morton wrote:
> On Thu, 30 Sep 2021 15:27:00 -0700 Kees Cook <keescook@chromium.org> wrote:
> 
> > As already done in GrapheneOS, add the __alloc_size attribute for regular
> > kmalloc interfaces, to provide additional hinting for better bounds
> > checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> > optimizations.
> 
> x86_64 allmodconfig:

What compiler and version?

> 
> In file included from ./arch/x86/include/asm/preempt.h:7,
>                  from ./include/linux/preempt.h:78,
>                  from ./include/linux/spinlock.h:55,
>                  from ./include/linux/mmzone.h:8,
>                  from ./include/linux/gfp.h:6,
>                  from ./include/linux/mm.h:10,
>                  from ./include/linux/mman.h:5,
>                  from lib/test_kasan_module.c:10:
> In function 'check_copy_size',
>     inlined from 'copy_user_test' at ./include/linux/uaccess.h:191:6:
> ./include/linux/thread_info.h:213:4: error: call to '__bad_copy_to' declared with attribute error: copy destination size is too small
>   213 |    __bad_copy_to();
>       |    ^~~~~~~~~~~~~~~
> In function 'check_copy_size',
>     inlined from 'copy_user_test' at ./include/linux/uaccess.h:199:6:
> ./include/linux/thread_info.h:211:4: error: call to '__bad_copy_from' declared with attribute error: copy source size is too small
>   211 |    __bad_copy_from();
>       |    ^~~~~~~~~~~~~~~~~
> make[1]: *** [lib/test_kasan_module.o] Error 1
> make: *** [lib] Error 2

Hah, yes, it caught an intentionally bad copy. This may bypass the
check, as I've had to do in LKDTM before. I will test...

diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
index 7ebf433edef3..9fb2fb2937da 100644
--- a/lib/test_kasan_module.c
+++ b/lib/test_kasan_module.c
@@ -19,7 +19,12 @@ static noinline void __init copy_user_test(void)
 {
 	char *kmem;
 	char __user *usermem;
-	size_t size = 128 - KASAN_GRANULE_SIZE;
+	/*
+	 * This is marked volatile to avoid __alloc_size()
+	 * noticing the intentionally out-of-bounds copys
+	 * being done on the allocation.
+	 */
+	volatile size_t size = 128 - KASAN_GRANULE_SIZE;
 	int __maybe_unused unused;
 
 	kmem = kmalloc(size, GFP_KERNEL);
Jann Horn Oct. 6, 2021, 3:22 a.m. UTC | #3
On Wed, Oct 6, 2021 at 5:06 AM Kees Cook <keescook@chromium.org> wrote:
> On Tue, Oct 05, 2021 at 06:47:17PM -0700, Andrew Morton wrote:
> > On Thu, 30 Sep 2021 15:27:00 -0700 Kees Cook <keescook@chromium.org> wrote:
> >
> > > As already done in GrapheneOS, add the __alloc_size attribute for regular
> > > kmalloc interfaces, to provide additional hinting for better bounds
> > > checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> > > optimizations.
> >
> > x86_64 allmodconfig:
>
> What compiler and version?
>
> >
> > In file included from ./arch/x86/include/asm/preempt.h:7,
> >                  from ./include/linux/preempt.h:78,
> >                  from ./include/linux/spinlock.h:55,
> >                  from ./include/linux/mmzone.h:8,
> >                  from ./include/linux/gfp.h:6,
> >                  from ./include/linux/mm.h:10,
> >                  from ./include/linux/mman.h:5,
> >                  from lib/test_kasan_module.c:10:
> > In function 'check_copy_size',
> >     inlined from 'copy_user_test' at ./include/linux/uaccess.h:191:6:
> > ./include/linux/thread_info.h:213:4: error: call to '__bad_copy_to' declared with attribute error: copy destination size is too small
> >   213 |    __bad_copy_to();
> >       |    ^~~~~~~~~~~~~~~
> > In function 'check_copy_size',
> >     inlined from 'copy_user_test' at ./include/linux/uaccess.h:199:6:
> > ./include/linux/thread_info.h:211:4: error: call to '__bad_copy_from' declared with attribute error: copy source size is too small
> >   211 |    __bad_copy_from();
> >       |    ^~~~~~~~~~~~~~~~~
> > make[1]: *** [lib/test_kasan_module.o] Error 1
> > make: *** [lib] Error 2
>
> Hah, yes, it caught an intentionally bad copy. This may bypass the
> check, as I've had to do in LKDTM before. I will test...
>
> diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
> index 7ebf433edef3..9fb2fb2937da 100644
> --- a/lib/test_kasan_module.c
> +++ b/lib/test_kasan_module.c
> @@ -19,7 +19,12 @@ static noinline void __init copy_user_test(void)
>  {
>         char *kmem;
>         char __user *usermem;
> -       size_t size = 128 - KASAN_GRANULE_SIZE;
> +       /*
> +        * This is marked volatile to avoid __alloc_size()
> +        * noticing the intentionally out-of-bounds copys
> +        * being done on the allocation.
> +        */
> +       volatile size_t size = 128 - KASAN_GRANULE_SIZE;

Maybe OPTIMIZER_HIDE_VAR()? The normal version of that abuses an empty
asm statement to hide the value from the compiler.
Kees Cook Oct. 6, 2021, 3:56 a.m. UTC | #4
On Wed, Oct 06, 2021 at 05:22:06AM +0200, Jann Horn wrote:
> On Wed, Oct 6, 2021 at 5:06 AM Kees Cook <keescook@chromium.org> wrote:
> > On Tue, Oct 05, 2021 at 06:47:17PM -0700, Andrew Morton wrote:
> > > On Thu, 30 Sep 2021 15:27:00 -0700 Kees Cook <keescook@chromium.org> wrote:
> > >
> > > > As already done in GrapheneOS, add the __alloc_size attribute for regular
> > > > kmalloc interfaces, to provide additional hinting for better bounds
> > > > checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> > > > optimizations.
> > >
> > > x86_64 allmodconfig:
> >
> > What compiler and version?
> >
> > >
> > > In file included from ./arch/x86/include/asm/preempt.h:7,
> > >                  from ./include/linux/preempt.h:78,
> > >                  from ./include/linux/spinlock.h:55,
> > >                  from ./include/linux/mmzone.h:8,
> > >                  from ./include/linux/gfp.h:6,
> > >                  from ./include/linux/mm.h:10,
> > >                  from ./include/linux/mman.h:5,
> > >                  from lib/test_kasan_module.c:10:
> > > In function 'check_copy_size',
> > >     inlined from 'copy_user_test' at ./include/linux/uaccess.h:191:6:
> > > ./include/linux/thread_info.h:213:4: error: call to '__bad_copy_to' declared with attribute error: copy destination size is too small
> > >   213 |    __bad_copy_to();
> > >       |    ^~~~~~~~~~~~~~~
> > > In function 'check_copy_size',
> > >     inlined from 'copy_user_test' at ./include/linux/uaccess.h:199:6:
> > > ./include/linux/thread_info.h:211:4: error: call to '__bad_copy_from' declared with attribute error: copy source size is too small
> > >   211 |    __bad_copy_from();
> > >       |    ^~~~~~~~~~~~~~~~~
> > > make[1]: *** [lib/test_kasan_module.o] Error 1
> > > make: *** [lib] Error 2
> >
> > Hah, yes, it caught an intentionally bad copy. This may bypass the
> > check, as I've had to do in LKDTM before. I will test...
> >
> > diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
> > index 7ebf433edef3..9fb2fb2937da 100644
> > --- a/lib/test_kasan_module.c
> > +++ b/lib/test_kasan_module.c
> > @@ -19,7 +19,12 @@ static noinline void __init copy_user_test(void)
> >  {
> >         char *kmem;
> >         char __user *usermem;
> > -       size_t size = 128 - KASAN_GRANULE_SIZE;
> > +       /*
> > +        * This is marked volatile to avoid __alloc_size()
> > +        * noticing the intentionally out-of-bounds copys
> > +        * being done on the allocation.
> > +        */
> > +       volatile size_t size = 128 - KASAN_GRANULE_SIZE;
> 
> Maybe OPTIMIZER_HIDE_VAR()? The normal version of that abuses an empty
> asm statement to hide the value from the compiler.

Oh! I hadn't seen that before. Is that better than volatile in this
case?
Jann Horn Oct. 6, 2021, 4:52 a.m. UTC | #5
On Wed, Oct 6, 2021 at 5:56 AM Kees Cook <keescook@chromium.org> wrote:
> On Wed, Oct 06, 2021 at 05:22:06AM +0200, Jann Horn wrote:
> > On Wed, Oct 6, 2021 at 5:06 AM Kees Cook <keescook@chromium.org> wrote:
> > > On Tue, Oct 05, 2021 at 06:47:17PM -0700, Andrew Morton wrote:
> > > > On Thu, 30 Sep 2021 15:27:00 -0700 Kees Cook <keescook@chromium.org> wrote:
> > > >
> > > > > As already done in GrapheneOS, add the __alloc_size attribute for regular
> > > > > kmalloc interfaces, to provide additional hinting for better bounds
> > > > > checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
> > > > > optimizations.
> > > >
> > > > x86_64 allmodconfig:
> > >
> > > What compiler and version?
> > >
> > > >
> > > > In file included from ./arch/x86/include/asm/preempt.h:7,
> > > >                  from ./include/linux/preempt.h:78,
> > > >                  from ./include/linux/spinlock.h:55,
> > > >                  from ./include/linux/mmzone.h:8,
> > > >                  from ./include/linux/gfp.h:6,
> > > >                  from ./include/linux/mm.h:10,
> > > >                  from ./include/linux/mman.h:5,
> > > >                  from lib/test_kasan_module.c:10:
> > > > In function 'check_copy_size',
> > > >     inlined from 'copy_user_test' at ./include/linux/uaccess.h:191:6:
> > > > ./include/linux/thread_info.h:213:4: error: call to '__bad_copy_to' declared with attribute error: copy destination size is too small
> > > >   213 |    __bad_copy_to();
> > > >       |    ^~~~~~~~~~~~~~~
> > > > In function 'check_copy_size',
> > > >     inlined from 'copy_user_test' at ./include/linux/uaccess.h:199:6:
> > > > ./include/linux/thread_info.h:211:4: error: call to '__bad_copy_from' declared with attribute error: copy source size is too small
> > > >   211 |    __bad_copy_from();
> > > >       |    ^~~~~~~~~~~~~~~~~
> > > > make[1]: *** [lib/test_kasan_module.o] Error 1
> > > > make: *** [lib] Error 2
> > >
> > > Hah, yes, it caught an intentionally bad copy. This may bypass the
> > > check, as I've had to do in LKDTM before. I will test...
> > >
> > > diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
> > > index 7ebf433edef3..9fb2fb2937da 100644
> > > --- a/lib/test_kasan_module.c
> > > +++ b/lib/test_kasan_module.c
> > > @@ -19,7 +19,12 @@ static noinline void __init copy_user_test(void)
> > >  {
> > >         char *kmem;
> > >         char __user *usermem;
> > > -       size_t size = 128 - KASAN_GRANULE_SIZE;
> > > +       /*
> > > +        * This is marked volatile to avoid __alloc_size()
> > > +        * noticing the intentionally out-of-bounds copys
> > > +        * being done on the allocation.
> > > +        */
> > > +       volatile size_t size = 128 - KASAN_GRANULE_SIZE;
> >
> > Maybe OPTIMIZER_HIDE_VAR()? The normal version of that abuses an empty
> > asm statement to hide the value from the compiler.
>
> Oh! I hadn't seen that before. Is that better than volatile in this
> case?

It forces the compiler to assume an arbitrary modification of the
value, but doesn't force allocation of a stack slot like "volatile"
does, so it probably generates nicer code? Not that it really matters here...

It also has the difference that you can explicitly specify where the
compiler's analysis should cut off, instead of just doing it on every
access to a variable. But I guess maybe in this case, that's an
argument in favor of "volatile"? I don't know... I guess maybe
"volatile" does make sense here.
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index d9f14125d7a2..844b776deecf 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -181,7 +181,7 @@  int kmem_cache_shrink(struct kmem_cache *s);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags);
+void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
 void kfree(const void *objp);
 void kfree_sensitive(const void *objp);
 size_t __ksize(const void *objp);
@@ -425,7 +425,7 @@  static __always_inline unsigned int __kmalloc_index(size_t size,
 #define kmalloc_index(s) __kmalloc_index(s, true)
 #endif /* !CONFIG_SLOB */
 
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
 void kmem_cache_free(struct kmem_cache *s, void *objp);
 
@@ -449,11 +449,12 @@  static __always_inline void kfree_bulk(size_t size, void **p)
 }
 
 #ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
+							 __alloc_size(1);
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
 									 __malloc;
 #else
-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	return __kmalloc(size, flags);
 }
@@ -466,23 +467,23 @@  static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
 
 #ifdef CONFIG_TRACING
 extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
-				   __assume_slab_alignment __malloc;
+				   __assume_slab_alignment __alloc_size(3);
 
 #ifdef CONFIG_NUMA
 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
-					 int node, size_t size) __assume_slab_alignment __malloc;
+					 int node, size_t size) __assume_slab_alignment
+								__alloc_size(4);
 #else
-static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
-							 gfp_t gfpflags, int node,
-							 size_t size)
+static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+						 gfp_t gfpflags, int node, size_t size)
 {
 	return kmem_cache_alloc_trace(s, gfpflags, size);
 }
 #endif /* CONFIG_NUMA */
 
 #else /* CONFIG_TRACING */
-static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags,
-						    size_t size)
+static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
+								    gfp_t flags, size_t size)
 {
 	void *ret = kmem_cache_alloc(s, flags);
 
@@ -501,19 +502,20 @@  static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
 #endif /* CONFIG_TRACING */
 
 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
-									 __malloc;
+									 __alloc_size(1);
 
 #ifdef CONFIG_TRACING
 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-				__assume_page_alignment __malloc;
+				__assume_page_alignment __alloc_size(1);
 #else
-static __always_inline void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
+								 unsigned int order)
 {
 	return kmalloc_order(size, flags, order);
 }
 #endif
 
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
 {
 	unsigned int order = get_order(size);
 	return kmalloc_order_trace(size, flags, order);
@@ -573,7 +575,7 @@  static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  *	Try really hard to succeed the allocation but fail
  *	eventually.
  */
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
 {
 	if (__builtin_constant_p(size)) {
 #ifndef CONFIG_SLOB
@@ -595,7 +597,7 @@  static __always_inline void *kmalloc(size_t size, gfp_t flags)
 	return __kmalloc(size, flags);
 }
 
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
 #ifndef CONFIG_SLOB
 	if (__builtin_constant_p(size) &&
@@ -619,7 +621,7 @@  static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
 	size_t bytes;
 
@@ -637,8 +639,10 @@  static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  * @new_size: new size of a single member of the array
  * @flags: the type of memory to allocate (see kmalloc)
  */
-static inline void * __must_check krealloc_array(void *p, size_t new_n, size_t new_size,
-						 gfp_t flags)
+static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
+								    size_t new_n,
+								    size_t new_size,
+								    gfp_t flags)
 {
 	size_t bytes;
 
@@ -654,7 +658,7 @@  static inline void * __must_check krealloc_array(void *p, size_t new_n, size_t n
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
 {
 	return kmalloc_array(n, size, flags | __GFP_ZERO);
 }
@@ -667,12 +671,13 @@  static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
+				   __alloc_size(1);
 #define kmalloc_track_caller(size, flags) \
 	__kmalloc_track_caller(size, flags, _RET_IP_)
 
-static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
-				       int node)
+static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+							  int node)
 {
 	size_t bytes;
 
@@ -683,7 +688,7 @@  static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
 	return __kmalloc_node(bytes, flags, node);
 }
 
-static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
 {
 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
 }
@@ -691,7 +696,7 @@  static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
 
 #ifdef CONFIG_NUMA
 extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
-					 unsigned long caller);
+					 unsigned long caller) __alloc_size(1);
 #define kmalloc_node_track_caller(size, flags, node) \
 	__kmalloc_node_track_caller(size, flags, node, \
 			_RET_IP_)
@@ -716,7 +721,7 @@  static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  * @size: how many bytes of memory are required.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline void *kzalloc(size_t size, gfp_t flags)
+static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
 {
 	return kmalloc(size, flags | __GFP_ZERO);
 }
@@ -727,7 +732,7 @@  static inline void *kzalloc(size_t size, gfp_t flags)
  * @flags: the type of memory to allocate (see kmalloc).
  * @node: memory node from which to allocate
  */
-static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
+static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
 {
 	return kmalloc_node(size, flags | __GFP_ZERO, node);
 }