@@ -222,10 +222,23 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
+/* See https://lore.kernel.org/all/202501310832.kiAeOt2z-lkp@intel.com/ */
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION >= 140000 && CONFIG_CLANG_VERSION < 150000
+static inline bool store_current_tag(void)
+{
+ return true;
+}
+#else
+static inline bool store_current_tag(void)
+{
+ return mem_alloc_profiling_enabled();
+}
+#endif
+
#define alloc_hooks_tag(_tag, _do_alloc) \
({ \
typeof(_do_alloc) _res; \
- if (mem_alloc_profiling_enabled()) { \
+ if (store_current_tag()) { \
struct alloc_tag * __maybe_unused _old; \
_old = alloc_tag_save(_tag); \
_res = _do_alloc; \
Additional condition in the allocation hooks causes Clang version 14 (tested on 14.0.6) to treat the allocated object size as unknown at compile-time (__builtin_object_size(obj, 1) returns -1) even though both branches of that condition yield the same result. Other versions of Clang (tested with 13.0.1, 15.0.7, 16.0.6 and 17.0.6) compile the same code without issues. Add build-time Clang version check which removes this condition and effectively restores the unconditional tag store/restore flow when compiled with clang-14. Fixes: 07438779313c ("alloc_tag: avoid current->alloc_tag manipulations when profiling is disabled") Reported-by: kernel test robot <lkp@intel.com> Closes: https://lore.kernel.org/oe-kbuild-all/202501310832.kiAeOt2z-lkp@intel.com/ Signed-off-by: Suren Baghdasaryan <surenb@google.com> --- include/linux/alloc_tag.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) base-commit: 60c828cf80c07394762a1edfaff63bea55cc8e45