@@ -68,22 +68,26 @@ static void check_poison_mem(unsigned char *mem, size_t bytes)
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
unsigned char *start;
unsigned char *end;
+ int pattern = PAGE_POISON;
if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
return;
- start = memchr_inv(mem, PAGE_POISON, bytes);
+ if (static_branch_unlikely(&init_on_free))
+ pattern = 0;
+
+ start = memchr_inv(mem, pattern, bytes);
if (!start)
return;
for (end = mem + bytes - 1; end > start; end--) {
- if (*end != PAGE_POISON)
+ if (*end != pattern)
break;
}
if (!__ratelimit(&ratelimit))
return;
- else if (start == end && single_bit_flip(*start, PAGE_POISON))
+ else if (start == end && single_bit_flip(*start, pattern))
pr_err("pagealloc: single bit error\n");
else
pr_err("pagealloc: memory corruption\n");
The linux-next commit "mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options" [1] introduced a false positive when init_on_free=1 and page_poison=on, due to the page_poison expects the pattern 0xaa when allocating pages which were overwritten by init_on_free=1 with 0. It is not possible to switch the order between kernel_init_free_pages() and kernel_poison_pages() in free_pages_prepare(), because at least on powerpc the formal will call clear_page() and the subsequence access by kernel_poison_pages() will trigger the kernel access of bad area errors. Fix it by treating init_on_free=1 the same as CONFIG_PAGE_POISONING_ZERO=y. [1] https://patchwork.kernel.org/patch/10999465/ Signed-off-by: Qian Cai <cai@lca.pw> --- mm/page_poison.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-)