@@ -2579,6 +2579,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
+#define FOLL_HUGE 0x40000 /* only return huge mappings */
/*
* NOTE on FOLL_LONGTERM:
@@ -361,9 +361,11 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (page)
return page;
}
- if (likely(!pmd_trans_huge(pmdval)))
+ if (likely(!pmd_trans_huge(pmdval))) {
+ if (flags & FOLL_HUGE)
+ return ERR_PTR(-EFAULT);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
-
+ }
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
@@ -382,6 +384,8 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
+ if (flags & FOLL_HUGE)
+ return ERR_PTR(-EFAULT);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
@@ -513,6 +517,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
struct page *page;
struct mm_struct *mm = vma->vm_mm;
+ VM_BUG_ON((flags & (FOLL_SPLIT | FOLL_HUGE)) == (FOLL_SPLIT | FOLL_HUGE));
+
ctx->page_mask = 0;
/* make this handle hugepd */
@@ -685,6 +691,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
return -EFAULT;
+ if (gup_flags & FOLL_HUGE && !transparent_hugepage_enabled(vma))
+ return -EFAULT;
+
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
Sometimes the user needs to look up a transhuge page mapped at a given address. So instead of being given a normal page and having to test it, save some cycles by filtering out PTE mappings. Signed-off-by: Mircea Cirjaliu <mcirjaliu@bitdefender.com> --- include/linux/mm.h | 1 + mm/gup.c | 13 +++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-)