@@ -1638,7 +1638,7 @@ extern void dump_page(struct page *page);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
- unsigned long addr,
+ unsigned long haddr, unsigned long fault_address,
unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma,
@@ -644,7 +644,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
if (unlikely(!pgtable))
return VM_FAULT_OOM;
- clear_huge_page(page, haddr, HPAGE_PMD_NR);
+ clear_huge_page(page, haddr, address, HPAGE_PMD_NR);
__SetPageUptodate(page);
spin_lock(&mm->page_table_lock);
@@ -2718,7 +2718,8 @@ retry:
ret = VM_FAULT_SIGBUS;
goto out;
}
- clear_huge_page(page, haddr, pages_per_huge_page(h));
+ clear_huge_page(page, haddr, fault_address,
+ pages_per_huge_page(h));
__SetPageUptodate(page);
if (vma->vm_flags & VM_MAYSHARE) {
@@ -3984,19 +3984,20 @@ static void clear_gigantic_page(struct page *page,
}
}
void clear_huge_page(struct page *page,
- unsigned long addr, unsigned int pages_per_huge_page)
+ unsigned long haddr, unsigned long fault_address,
+ unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
- clear_gigantic_page(page, addr, pages_per_huge_page);
+ clear_gigantic_page(page, haddr, pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
- clear_user_highpage(page + i, addr + i * PAGE_SIZE);
+ clear_user_highpage(page + i, haddr + i * PAGE_SIZE);
}
}