diff mbox series

[RFC,v2,11/19] mm/gup: Pass follow_page_context further down the call stack

Message ID 20190809225833.6657-12-ira.weiny@intel.com (mailing list archive)
State RFC
Headers show
Series RDMA/FS DAX truncate proposal V1,000,002 ;-) | expand

Commit Message

Ira Weiny Aug. 9, 2019, 10:58 p.m. UTC
From: Ira Weiny <ira.weiny@intel.com>

In preparation for passing more information (vaddr_pin) into
follow_page_pte(), follow_devmap_pud(), and follow_devmap_pmd().

Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
 include/linux/huge_mm.h | 17 -----------------
 mm/gup.c                | 31 +++++++++++++++----------------
 mm/huge_memory.c        |  6 ++++--
 mm/internal.h           | 28 ++++++++++++++++++++++++++++
 4 files changed, 47 insertions(+), 35 deletions(-)

Comments

John Hubbard Aug. 10, 2019, 12:18 a.m. UTC | #1
On 8/9/19 3:58 PM, ira.weiny@intel.com wrote:
> From: Ira Weiny <ira.weiny@intel.com>
> 
> In preparation for passing more information (vaddr_pin) into
> follow_page_pte(), follow_devmap_pud(), and follow_devmap_pmd().
> 
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> ---
>  include/linux/huge_mm.h | 17 -----------------
>  mm/gup.c                | 31 +++++++++++++++----------------
>  mm/huge_memory.c        |  6 ++++--
>  mm/internal.h           | 28 ++++++++++++++++++++++++++++
>  4 files changed, 47 insertions(+), 35 deletions(-)
> 
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 45ede62aa85b..b01a20ce0bb9 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -233,11 +233,6 @@ static inline int hpage_nr_pages(struct page *page)
>  	return 1;
>  }
>  
> -struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
> -		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
> -struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
> -		pud_t *pud, int flags, struct dev_pagemap **pgmap);
> -
>  extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
>  
>  extern struct page *huge_zero_page;
> @@ -375,18 +370,6 @@ static inline void mm_put_huge_zero_page(struct mm_struct *mm)
>  	return;
>  }
>  
> -static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
> -	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
> -{
> -	return NULL;
> -}
> -
> -static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
> -	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
> -{
> -	return NULL;
> -}
> -
>  static inline bool thp_migration_supported(void)
>  {
>  	return false;
> diff --git a/mm/gup.c b/mm/gup.c
> index 504af3e9a942..a7a9d2f5278c 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -24,11 +24,6 @@
>  
>  #include "internal.h"
>  
> -struct follow_page_context {
> -	struct dev_pagemap *pgmap;
> -	unsigned int page_mask;
> -};
> -
>  /**
>   * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
>   * @pages:  array of pages to be maybe marked dirty, and definitely released.
> @@ -172,8 +167,9 @@ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
>  
>  static struct page *follow_page_pte(struct vm_area_struct *vma,
>  		unsigned long address, pmd_t *pmd, unsigned int flags,
> -		struct dev_pagemap **pgmap)
> +		struct follow_page_context *ctx)
>  {
> +	struct dev_pagemap **pgmap = &ctx->pgmap;
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct page *page;
>  	spinlock_t *ptl;
> @@ -363,13 +359,13 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
>  	}
>  	if (pmd_devmap(pmdval)) {
>  		ptl = pmd_lock(mm, pmd);
> -		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
> +		page = follow_devmap_pmd(vma, address, pmd, flags, ctx);
>  		spin_unlock(ptl);
>  		if (page)
>  			return page;
>  	}
>  	if (likely(!pmd_trans_huge(pmdval)))
> -		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
> +		return follow_page_pte(vma, address, pmd, flags, ctx);
>  
>  	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
>  		return no_page_table(vma, flags);
> @@ -389,7 +385,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
>  	}
>  	if (unlikely(!pmd_trans_huge(*pmd))) {
>  		spin_unlock(ptl);
> -		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
> +		return follow_page_pte(vma, address, pmd, flags, ctx);
>  	}
>  	if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
>  		int ret;
> @@ -419,7 +415,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
>  		}
>  
>  		return ret ? ERR_PTR(ret) :
> -			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
> +			follow_page_pte(vma, address, pmd, flags, ctx);
>  	}
>  	page = follow_trans_huge_pmd(vma, address, pmd, flags);
>  	spin_unlock(ptl);
> @@ -456,7 +452,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
>  	}
>  	if (pud_devmap(*pud)) {
>  		ptl = pud_lock(mm, pud);
> -		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
> +		page = follow_devmap_pud(vma, address, pud, flags, ctx);
>  		spin_unlock(ptl);
>  		if (page)
>  			return page;
> @@ -786,7 +782,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
>  static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
>  		unsigned long start, unsigned long nr_pages,
>  		unsigned int gup_flags, struct page **pages,
> -		struct vm_area_struct **vmas, int *nonblocking)
> +		struct vm_area_struct **vmas, int *nonblocking,
> +		struct vaddr_pin *vaddr_pin)

I didn't expect to see more vaddr_pin arg passing, based on the commit
description. Did you want this as part of patch 9 or 10 instead? If not,
then let's mention it in the commit description.

>  {
>  	long ret = 0, i = 0;
>  	struct vm_area_struct *vma = NULL;
> @@ -797,6 +794,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
>  
>  	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
>  
> +	ctx.vaddr_pin = vaddr_pin;
> +
>  	/*
>  	 * If FOLL_FORCE is set then do not force a full fault as the hinting
>  	 * fault information is unrelated to the reference behaviour of a task
> @@ -1025,7 +1024,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
>  	lock_dropped = false;
>  	for (;;) {
>  		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
> -				       vmas, locked);
> +				       vmas, locked, vaddr_pin);
>  		if (!locked)
>  			/* VM_FAULT_RETRY couldn't trigger, bypass */
>  			return ret;
> @@ -1068,7 +1067,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
>  		lock_dropped = true;
>  		down_read(&mm->mmap_sem);
>  		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
> -				       pages, NULL, NULL);
> +				       pages, NULL, NULL, vaddr_pin);
>  		if (ret != 1) {
>  			BUG_ON(ret > 1);
>  			if (!pages_done)
> @@ -1226,7 +1225,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
>  	 * not result in a stack expansion that recurses back here.
>  	 */
>  	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
> -				NULL, NULL, nonblocking);
> +				NULL, NULL, nonblocking, NULL);
>  }
>  
>  /*
> @@ -1311,7 +1310,7 @@ struct page *get_dump_page(unsigned long addr)
>  
>  	if (__get_user_pages(current, current->mm, addr, 1,
>  			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
> -			     NULL) < 1)
> +			     NULL, NULL) < 1)
>  		return NULL;
>  	flush_cache_page(vma, addr, page_to_pfn(page));
>  	return page;
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index bc1a07a55be1..7e09f2f17ed8 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -916,8 +916,9 @@ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
>  }
>  
>  struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
> -		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
> +		pmd_t *pmd, int flags, struct follow_page_context *ctx)
>  {
> +	struct dev_pagemap **pgmap = &ctx->pgmap;
>  	unsigned long pfn = pmd_pfn(*pmd);
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct page *page;
> @@ -1068,8 +1069,9 @@ static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
>  }
>  
>  struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
> -		pud_t *pud, int flags, struct dev_pagemap **pgmap)
> +		pud_t *pud, int flags, struct follow_page_context *ctx)
>  {
> +	struct dev_pagemap **pgmap = &ctx->pgmap;
>  	unsigned long pfn = pud_pfn(*pud);
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct page *page;
> diff --git a/mm/internal.h b/mm/internal.h
> index 0d5f720c75ab..46ada5279856 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -12,6 +12,34 @@
>  #include <linux/pagemap.h>
>  #include <linux/tracepoint-defs.h>
>  
> +struct follow_page_context {
> +	struct dev_pagemap *pgmap;
> +	unsigned int page_mask;
> +	struct vaddr_pin *vaddr_pin;
> +};
> +
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
> +		pmd_t *pmd, int flags, struct follow_page_context *ctx);
> +struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
> +		pud_t *pud, int flags, struct follow_page_context *ctx);
> +#else
> +static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
> +	unsigned long addr, pmd_t *pmd, int flags,
> +	struct follow_page_context *ctx)
> +{
> +	return NULL;
> +}
> +
> +static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
> +	unsigned long addr, pud_t *pud, int flags,
> +	struct follow_page_context *ctx)
> +{
> +	return NULL;
> +}
> +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> +
> +
>  /*
>   * The set of flags that only affect watermark checking and reclaim
>   * behaviour. This is used by the MM to obey the caller constraints
> 




thanks,
Ira Weiny Aug. 12, 2019, 7:01 p.m. UTC | #2
On Fri, Aug 09, 2019 at 05:18:31PM -0700, John Hubbard wrote:
> On 8/9/19 3:58 PM, ira.weiny@intel.com wrote:
> > From: Ira Weiny <ira.weiny@intel.com>
> > 
> > In preparation for passing more information (vaddr_pin) into
> > follow_page_pte(), follow_devmap_pud(), and follow_devmap_pmd().
> > 
> > Signed-off-by: Ira Weiny <ira.weiny@intel.com>

[snip]

> > @@ -786,7 +782,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
> >  static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> >  		unsigned long start, unsigned long nr_pages,
> >  		unsigned int gup_flags, struct page **pages,
> > -		struct vm_area_struct **vmas, int *nonblocking)
> > +		struct vm_area_struct **vmas, int *nonblocking,
> > +		struct vaddr_pin *vaddr_pin)
> 
> I didn't expect to see more vaddr_pin arg passing, based on the commit
> description. Did you want this as part of patch 9 or 10 instead? If not,
> then let's mention it in the commit description.

Yea that does seem out of place now that I look at it.  I'll add to the commit
message because this is really getting vaddr_pin into the context _and_ passing
it down the stack.  With all the rebasing I may have squashed something I did
not mean to.  But I think this patch is ok because it is not to complicated to
see what is going on.

Thanks,
Ira

> 
> >  {
> >  	long ret = 0, i = 0;
> >  	struct vm_area_struct *vma = NULL;
> > @@ -797,6 +794,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> >  
> >  	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
> >  
> > +	ctx.vaddr_pin = vaddr_pin;
> > +
> >  	/*
> >  	 * If FOLL_FORCE is set then do not force a full fault as the hinting
> >  	 * fault information is unrelated to the reference behaviour of a task
> > @@ -1025,7 +1024,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
> >  	lock_dropped = false;
> >  	for (;;) {
> >  		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
> > -				       vmas, locked);
> > +				       vmas, locked, vaddr_pin);
> >  		if (!locked)
> >  			/* VM_FAULT_RETRY couldn't trigger, bypass */
> >  			return ret;
> > @@ -1068,7 +1067,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
> >  		lock_dropped = true;
> >  		down_read(&mm->mmap_sem);
> >  		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
> > -				       pages, NULL, NULL);
> > +				       pages, NULL, NULL, vaddr_pin);
> >  		if (ret != 1) {
> >  			BUG_ON(ret > 1);
> >  			if (!pages_done)
> > @@ -1226,7 +1225,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
> >  	 * not result in a stack expansion that recurses back here.
> >  	 */
> >  	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
> > -				NULL, NULL, nonblocking);
> > +				NULL, NULL, nonblocking, NULL);
> >  }
> >  
> >  /*
> > @@ -1311,7 +1310,7 @@ struct page *get_dump_page(unsigned long addr)
> >  
> >  	if (__get_user_pages(current, current->mm, addr, 1,
> >  			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
> > -			     NULL) < 1)
> > +			     NULL, NULL) < 1)
> >  		return NULL;
> >  	flush_cache_page(vma, addr, page_to_pfn(page));
> >  	return page;
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index bc1a07a55be1..7e09f2f17ed8 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -916,8 +916,9 @@ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
> >  }
> >  
> >  struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
> > -		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
> > +		pmd_t *pmd, int flags, struct follow_page_context *ctx)
> >  {
> > +	struct dev_pagemap **pgmap = &ctx->pgmap;
> >  	unsigned long pfn = pmd_pfn(*pmd);
> >  	struct mm_struct *mm = vma->vm_mm;
> >  	struct page *page;
> > @@ -1068,8 +1069,9 @@ static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
> >  }
> >  
> >  struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
> > -		pud_t *pud, int flags, struct dev_pagemap **pgmap)
> > +		pud_t *pud, int flags, struct follow_page_context *ctx)
> >  {
> > +	struct dev_pagemap **pgmap = &ctx->pgmap;
> >  	unsigned long pfn = pud_pfn(*pud);
> >  	struct mm_struct *mm = vma->vm_mm;
> >  	struct page *page;
> > diff --git a/mm/internal.h b/mm/internal.h
> > index 0d5f720c75ab..46ada5279856 100644
> > --- a/mm/internal.h
> > +++ b/mm/internal.h
> > @@ -12,6 +12,34 @@
> >  #include <linux/pagemap.h>
> >  #include <linux/tracepoint-defs.h>
> >  
> > +struct follow_page_context {
> > +	struct dev_pagemap *pgmap;
> > +	unsigned int page_mask;
> > +	struct vaddr_pin *vaddr_pin;
> > +};
> > +
> > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> > +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
> > +		pmd_t *pmd, int flags, struct follow_page_context *ctx);
> > +struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
> > +		pud_t *pud, int flags, struct follow_page_context *ctx);
> > +#else
> > +static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
> > +	unsigned long addr, pmd_t *pmd, int flags,
> > +	struct follow_page_context *ctx)
> > +{
> > +	return NULL;
> > +}
> > +
> > +static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
> > +	unsigned long addr, pud_t *pud, int flags,
> > +	struct follow_page_context *ctx)
> > +{
> > +	return NULL;
> > +}
> > +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> > +
> > +
> >  /*
> >   * The set of flags that only affect watermark checking and reclaim
> >   * behaviour. This is used by the MM to obey the caller constraints
> > 
> 
> 
> 
> 
> thanks,
> -- 
> John Hubbard
> NVIDIA
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 45ede62aa85b..b01a20ce0bb9 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -233,11 +233,6 @@  static inline int hpage_nr_pages(struct page *page)
 	return 1;
 }
 
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
-		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
-struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
-		pud_t *pud, int flags, struct dev_pagemap **pgmap);
-
 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
 
 extern struct page *huge_zero_page;
@@ -375,18 +370,6 @@  static inline void mm_put_huge_zero_page(struct mm_struct *mm)
 	return;
 }
 
-static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
-	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
-{
-	return NULL;
-}
-
-static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
-	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
-{
-	return NULL;
-}
-
 static inline bool thp_migration_supported(void)
 {
 	return false;
diff --git a/mm/gup.c b/mm/gup.c
index 504af3e9a942..a7a9d2f5278c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -24,11 +24,6 @@ 
 
 #include "internal.h"
 
-struct follow_page_context {
-	struct dev_pagemap *pgmap;
-	unsigned int page_mask;
-};
-
 /**
  * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
  * @pages:  array of pages to be maybe marked dirty, and definitely released.
@@ -172,8 +167,9 @@  static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmd, unsigned int flags,
-		struct dev_pagemap **pgmap)
+		struct follow_page_context *ctx)
 {
+	struct dev_pagemap **pgmap = &ctx->pgmap;
 	struct mm_struct *mm = vma->vm_mm;
 	struct page *page;
 	spinlock_t *ptl;
@@ -363,13 +359,13 @@  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 	}
 	if (pmd_devmap(pmdval)) {
 		ptl = pmd_lock(mm, pmd);
-		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
+		page = follow_devmap_pmd(vma, address, pmd, flags, ctx);
 		spin_unlock(ptl);
 		if (page)
 			return page;
 	}
 	if (likely(!pmd_trans_huge(pmdval)))
-		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
+		return follow_page_pte(vma, address, pmd, flags, ctx);
 
 	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
 		return no_page_table(vma, flags);
@@ -389,7 +385,7 @@  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 	}
 	if (unlikely(!pmd_trans_huge(*pmd))) {
 		spin_unlock(ptl);
-		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
+		return follow_page_pte(vma, address, pmd, flags, ctx);
 	}
 	if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
 		int ret;
@@ -419,7 +415,7 @@  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 		}
 
 		return ret ? ERR_PTR(ret) :
-			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
+			follow_page_pte(vma, address, pmd, flags, ctx);
 	}
 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
 	spin_unlock(ptl);
@@ -456,7 +452,7 @@  static struct page *follow_pud_mask(struct vm_area_struct *vma,
 	}
 	if (pud_devmap(*pud)) {
 		ptl = pud_lock(mm, pud);
-		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
+		page = follow_devmap_pud(vma, address, pud, flags, ctx);
 		spin_unlock(ptl);
 		if (page)
 			return page;
@@ -786,7 +782,8 @@  static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		unsigned long start, unsigned long nr_pages,
 		unsigned int gup_flags, struct page **pages,
-		struct vm_area_struct **vmas, int *nonblocking)
+		struct vm_area_struct **vmas, int *nonblocking,
+		struct vaddr_pin *vaddr_pin)
 {
 	long ret = 0, i = 0;
 	struct vm_area_struct *vma = NULL;
@@ -797,6 +794,8 @@  static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 
 	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
 
+	ctx.vaddr_pin = vaddr_pin;
+
 	/*
 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
 	 * fault information is unrelated to the reference behaviour of a task
@@ -1025,7 +1024,7 @@  static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 	lock_dropped = false;
 	for (;;) {
 		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
-				       vmas, locked);
+				       vmas, locked, vaddr_pin);
 		if (!locked)
 			/* VM_FAULT_RETRY couldn't trigger, bypass */
 			return ret;
@@ -1068,7 +1067,7 @@  static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 		lock_dropped = true;
 		down_read(&mm->mmap_sem);
 		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
-				       pages, NULL, NULL);
+				       pages, NULL, NULL, vaddr_pin);
 		if (ret != 1) {
 			BUG_ON(ret > 1);
 			if (!pages_done)
@@ -1226,7 +1225,7 @@  long populate_vma_page_range(struct vm_area_struct *vma,
 	 * not result in a stack expansion that recurses back here.
 	 */
 	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
-				NULL, NULL, nonblocking);
+				NULL, NULL, nonblocking, NULL);
 }
 
 /*
@@ -1311,7 +1310,7 @@  struct page *get_dump_page(unsigned long addr)
 
 	if (__get_user_pages(current, current->mm, addr, 1,
 			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
-			     NULL) < 1)
+			     NULL, NULL) < 1)
 		return NULL;
 	flush_cache_page(vma, addr, page_to_pfn(page));
 	return page;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bc1a07a55be1..7e09f2f17ed8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -916,8 +916,9 @@  static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
 }
 
 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
-		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
+		pmd_t *pmd, int flags, struct follow_page_context *ctx)
 {
+	struct dev_pagemap **pgmap = &ctx->pgmap;
 	unsigned long pfn = pmd_pfn(*pmd);
 	struct mm_struct *mm = vma->vm_mm;
 	struct page *page;
@@ -1068,8 +1069,9 @@  static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
 }
 
 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
-		pud_t *pud, int flags, struct dev_pagemap **pgmap)
+		pud_t *pud, int flags, struct follow_page_context *ctx)
 {
+	struct dev_pagemap **pgmap = &ctx->pgmap;
 	unsigned long pfn = pud_pfn(*pud);
 	struct mm_struct *mm = vma->vm_mm;
 	struct page *page;
diff --git a/mm/internal.h b/mm/internal.h
index 0d5f720c75ab..46ada5279856 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -12,6 +12,34 @@ 
 #include <linux/pagemap.h>
 #include <linux/tracepoint-defs.h>
 
+struct follow_page_context {
+	struct dev_pagemap *pgmap;
+	unsigned int page_mask;
+	struct vaddr_pin *vaddr_pin;
+};
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
+		pmd_t *pmd, int flags, struct follow_page_context *ctx);
+struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+		pud_t *pud, int flags, struct follow_page_context *ctx);
+#else
+static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
+	unsigned long addr, pmd_t *pmd, int flags,
+	struct follow_page_context *ctx)
+{
+	return NULL;
+}
+
+static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
+	unsigned long addr, pud_t *pud, int flags,
+	struct follow_page_context *ctx)
+{
+	return NULL;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+
 /*
  * The set of flags that only affect watermark checking and reclaim
  * behaviour. This is used by the MM to obey the caller constraints