@@ -685,6 +685,7 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
return __kmalloc_noprof(size, flags);
}
#define kmalloc_sized(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
+#define kmalloc_aligned(size, align, gfp) alloc_hooks(kmalloc_noprof(size, gfp))
#define __size_force_positive(x) \
({ \
@@ -701,7 +702,10 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
signed char: kmalloc_sized(__size_force_positive(p), gfp), \
signed short: kmalloc_sized(__size_force_positive(p), gfp), \
signed int: kmalloc_sized(__size_force_positive(p), gfp), \
- signed long: kmalloc_sized(__size_force_positive(p), gfp))
+ signed long: kmalloc_sized(__size_force_positive(p), gfp), \
+ default: (typeof(__force_ptr_expr(p)))kmalloc_aligned( \
+ sizeof(*__force_ptr_expr(p)), \
+ __alignof__(*__force_ptr_expr(p)), gfp))
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
@@ -816,14 +820,11 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_
/**
* kzalloc - allocate memory. The memory is set to zero.
- * @size: how many bytes of memory are required.
+ * @p: either a pointer to an object to be allocated or bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
-{
- return kmalloc_noprof(size, flags | __GFP_ZERO);
-}
-#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
+#define kzalloc(_p, _flags) kmalloc(_p, (_flags)|__GFP_ZERO)
+#define kzalloc_noprof(_size, _flags) kmalloc_noprof(_size, (_flags)|__GFP_ZERO)
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1);
There is currently no way for the slab to know what type is being allocated, and this hampers the development of any logic that would need this information including basic type checking, alignment need analysis, etc. Allow the size argument to optionally be a variable, from which the type (and there by the size, alignment, or any other features) can be determined at compile-time. This allows for the incremental replacement of the classic code pattern: obj = kmalloc(sizeof(*obj), gfp); into: obj = kmalloc(obj, gfp); As an additional build-time safety feature, the return value of kmalloc() also becomes typed so that the assignment and first argument cannot drift, doing away with the other, more fragile, classic code pattern: obj = kmalloc(sizeof(struct the_object), gfp); into: obj = kmalloc(obj, gfp); And any accidental variable drift will not be masked by the traditional default "void *" return value: obj = kmalloc(something_else, gfp); error: assignment to 'struct the_object *' from incompatible pointer type 'struct foo *' [-Wincompatible-pointer-types] 71 | obj = kmalloc(something_else, gfp); | ^ This also opens the door for a proposed heap hardening feature that would randomize the starting offset of the allocated object within its power-of-2 bucket. Without being able to introspect the type for alignment needs, this can't be done safely (or cannot be done without significant memory usage overhead). For example, a 132 byte structure with an 8 byte alignment could be randomized into 15 locations within the 256 byte bucket: (256 - 132) / 8. Signed-off-by: Kees Cook <kees@kernel.org> --- Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: linux-mm@kvack.org --- include/linux/slab.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-)