@@ -834,6 +834,9 @@ void *__kmalloc_large_noprof(size_t size, gfp_t flags)
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
__assume_page_alignment __alloc_size(1);
+void *__kmalloc_node_inline(size_t size, kmem_buckets *b, gfp_t flags,
+ int node, unsigned long caller);
+
/**
* kmalloc - allocate kernel memory
* @size: how many bytes of memory are required.
@@ -4319,6 +4319,13 @@ void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flag
}
EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
+__always_inline void *__kmalloc_node_inline(size_t size, kmem_buckets *b,
+ gfp_t flags, int node,
+ unsigned long caller)
+{
+ return __do_kmalloc_node(size, b, flags, node, caller);
+}
+
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
@@ -642,9 +642,9 @@ void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
* It doesn't really make sense to fallback to vmalloc for sub page
* requests
*/
- ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
+ ret = __kmalloc_node_inline(size, PASS_BUCKET_PARAM(b),
kmalloc_gfp_adjust(flags, size),
- node);
+ node, _RET_IP_);
if (ret || size <= PAGE_SIZE)
return ret;
As revealed by this writeup[1], due to the fact that __kmalloc_node (now renamed to __kmalloc_node_noprof) is an exported symbol and will never get inlined, using it in kvmalloc_node (now is __kvmalloc_node_noprof) would make the RET_IP inside always point to the same address: upper_caller kvmalloc kvmalloc_node kvmalloc_node_noprof __kvmalloc_node_noprof <-- all macros all the way down here __kmalloc_node_noprof __do_kmalloc_node(.., _RET_IP_) ... <-- _RET_IP_ points to That literally means all kmalloc invoked via kvmalloc would use the same seed for cache randomization (CONFIG_RANDOM_KMALLOC_CACHES), which makes this hardening unfunctional. The root cause of this problem, IMHO, is that using RET_IP only cannot identify the actual allocation site in case of kmalloc being called inside wrappers or helper functions. And I believe there could be similar cases in other functions. Nevertheless, I haven't thought of any good solution for this. So for now let's solve this specific case first. For __kvmalloc_node_noprof, replace __kmalloc_node_noprof with an inline version, so that RET_IP can take the return address of kvmalloc and differentiate each kvmalloc invocation: upper_caller kvmalloc kvmalloc_node kvmalloc_node_noprof __kvmalloc_node_noprof <-- all macros all the way down here __kmalloc_node_inline(.., _RET_IP_) ... <-- _RET_IP_ points to Thanks to Tamás Koczka for the report and discussion! Links: [1] https://github.com/google/security-research/pull/83/files#diff-1604319b55a48c39a210ee52034ed7ff5b9cdc3d704d2d9e34eb230d19fae235R200 Signed-off-by: GONG Ruiqi <gongruiqi1@huawei.com> --- include/linux/slab.h | 3 +++ mm/slub.c | 7 +++++++ mm/util.c | 4 ++-- 3 files changed, 12 insertions(+), 2 deletions(-)