@@ -3693,7 +3693,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
- __SetPageUptodate(new_page);
+ SetPageUptodate(new_page);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
haddr + huge_page_size(h));
@@ -3879,7 +3879,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
goto out;
}
clear_huge_page(page, address, pages_per_huge_page(h));
- __SetPageUptodate(page);
+ SetPageUptodate(page);
new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
@@ -4180,11 +4180,11 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
}
/*
- * The memory barrier inside __SetPageUptodate makes sure that
+ * The memory barrier inside SetPageUptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
- __SetPageUptodate(page);
+ SetPageUptodate(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
We don't want to expose a hugetlb page to the fast gup running on a remote CPU before the local non-atomic op __SetPageUptodate() is visible first. For a hugetlb page, there is no memory barrier between the non-atomic op and set_huge_pte_at(). Therefore, the page can appear to the fast gup before the flag does. There is no evidence this would cause any problem, but there is no point risking the race either. This patch simply replace 3 uses of the non-atomic op with its atomic version though out mm/hugetlb.c. The only one left in hugetlbfs_fallocate() is safe because huge_add_to_page_cache() serves as a valid write barrier. Signed-off-by: Yu Zhao <yuzhao@google.com> --- mm/hugetlb.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)