@@ -825,7 +825,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
error = PTR_ERR(folio);
goto out;
}
- folio_zero_user(folio, ALIGN_DOWN(addr, hpage_size));
+ folio_zero_user(folio, addr);
__folio_mark_uptodate(folio);
error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
@@ -6802,6 +6802,7 @@ static void clear_gigantic_page(struct folio *folio, unsigned long addr,
int i;
might_sleep();
+ addr = ALIGN_DOWN(addr, folio_size(folio));
for (i = 0; i < nr_pages; i++) {
cond_resched();
clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
When clear gigantic page, it zeros page from the first subpage to the last subpage, that is, aligned base address is needed in it, and we don't need to aligned down the address in the caller as the real address will be passed to process_huge_page(). Fixes: 78fefd04c123 ("mm: memory: convert clear_huge_page() to folio_zero_user()") Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- fs/hugetlbfs/inode.c | 2 +- mm/memory.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-)