diff mbox series

xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import

Message ID 20240104185327.177376-1-olekstysh@gmail.com (mailing list archive)
State Superseded
Headers show
Series xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import | expand

Commit Message

Oleksandr Tyshchenko Jan. 4, 2024, 6:53 p.m. UTC
From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

DO NOT access the underlying struct page of an sg table exported
by DMA-buf in dmabuf_imp_to_refs(), this is not allowed.
Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details.

Fortunately, here (for special Xen device) we can avoid using
pages and calculate gfns directly from dma addresses provided by
the sg table.

Suggested-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
---
Please note, I didn't manage to test the patch against the latest master branch
on real HW (patch was only build tested there). Patch was tested on Arm64
guests using Linux v5.10.41 from vendor's BSP, this is the environment where
running this use-case is possible and to which I have an access (Xen PV display
with zero-copy and backend domain as a buffer provider - be-alloc=1, so dma-buf
import part was involved). A little bit old, but the dma-buf import code
in gntdev-dmabuf.c hasn't been changed much since that time, all context
remains allmost the same according to my code inspection.
---
---
 drivers/xen/gntdev-dmabuf.c | 42 +++++++++++++++----------------------
 1 file changed, 17 insertions(+), 25 deletions(-)

Comments

Stefano Stabellini Jan. 5, 2024, 12:58 a.m. UTC | #1
On Thu, 4 Jan 2024, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> DO NOT access the underlying struct page of an sg table exported
> by DMA-buf in dmabuf_imp_to_refs(), this is not allowed.
> Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details.
> 
> Fortunately, here (for special Xen device) we can avoid using
> pages and calculate gfns directly from dma addresses provided by
> the sg table.
> 
> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
> Please note, I didn't manage to test the patch against the latest master branch
> on real HW (patch was only build tested there). Patch was tested on Arm64
> guests using Linux v5.10.41 from vendor's BSP, this is the environment where
> running this use-case is possible and to which I have an access (Xen PV display
> with zero-copy and backend domain as a buffer provider - be-alloc=1, so dma-buf
> import part was involved). A little bit old, but the dma-buf import code
> in gntdev-dmabuf.c hasn't been changed much since that time, all context
> remains allmost the same according to my code inspection.
> ---
> ---
>  drivers/xen/gntdev-dmabuf.c | 42 +++++++++++++++----------------------
>  1 file changed, 17 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
> index 4440e626b797..0dde49fca9a5 100644
> --- a/drivers/xen/gntdev-dmabuf.c
> +++ b/drivers/xen/gntdev-dmabuf.c
> @@ -11,6 +11,7 @@
>  #include <linux/kernel.h>
>  #include <linux/errno.h>
>  #include <linux/dma-buf.h>
> +#include <linux/dma-direct.h>
>  #include <linux/slab.h>
>  #include <linux/types.h>
>  #include <linux/uaccess.h>
> @@ -50,7 +51,7 @@ struct gntdev_dmabuf {
>  
>  	/* Number of pages this buffer has. */
>  	int nr_pages;
> -	/* Pages of this buffer. */
> +	/* Pages of this buffer (only for dma-buf export). */
>  	struct page **pages;
>  };
>  
> @@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
>  /* DMA buffer import support. */
>  
>  static int
> -dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
> +dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
>  				int count, int domid)
>  {
>  	grant_ref_t priv_gref_head;
> @@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
>  		}
>  
>  		gnttab_grant_foreign_access_ref(cur_ref, domid,
> -						xen_page_to_gfn(pages[i]), 0);
> +						gfns[i], 0);
>  		refs[i] = cur_ref;
>  	}
>  
> @@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
>  
>  static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
>  {
> -	kfree(gntdev_dmabuf->pages);
>  	kfree(gntdev_dmabuf->u.imp.refs);
>  	kfree(gntdev_dmabuf);
>  }
> @@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
>  	if (!gntdev_dmabuf->u.imp.refs)
>  		goto fail;
>  
> -	gntdev_dmabuf->pages = kcalloc(count,
> -				       sizeof(gntdev_dmabuf->pages[0]),
> -				       GFP_KERNEL);
> -	if (!gntdev_dmabuf->pages)
> -		goto fail;
> -
>  	gntdev_dmabuf->nr_pages = count;
>  
>  	for (i = 0; i < count; i++)
> @@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>  	struct dma_buf *dma_buf;
>  	struct dma_buf_attachment *attach;
>  	struct sg_table *sgt;
> -	struct sg_page_iter sg_iter;
> +	struct sg_dma_page_iter sg_iter;
> +	unsigned long *gfns;
>  	int i;
>  
>  	dma_buf = dma_buf_get(fd);
> @@ -624,26 +619,23 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>  
>  	gntdev_dmabuf->u.imp.sgt = sgt;
>  
> -	/* Now convert sgt to array of pages and check for page validity. */
> +	gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
> +	if (!gfns)
> +		goto fail_unmap;
> +
> +	/* Now convert sgt to array of gfns without accessing underlying pages. */
>  	i = 0;
> -	for_each_sgtable_page(sgt, &sg_iter, 0) {
> -		struct page *page = sg_page_iter_page(&sg_iter);
> -		/*
> -		 * Check if page is valid: this can happen if we are given
> -		 * a page from VRAM or other resources which are not backed
> -		 * by a struct page.
> -		 */
> -		if (!pfn_valid(page_to_pfn(page))) {
> -			ret = ERR_PTR(-EINVAL);
> -			goto fail_unmap;
> -		}
> +	for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
> +		dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
> +		unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
>  
> -		gntdev_dmabuf->pages[i++] = page;
> +		gfns[i++] = pfn_to_gfn(pfn);
>  	}
>  
> -	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
> +	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
>  						      gntdev_dmabuf->u.imp.refs,
>  						      count, domid));
> +	kfree(gfns);
>  	if (IS_ERR(ret))
>  		goto fail_end_access;
>  
> -- 
> 2.34.1
>
Christian König Jan. 5, 2024, 9:38 a.m. UTC | #2
Am 04.01.24 um 19:53 schrieb Oleksandr Tyshchenko:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>
> DO NOT access the underlying struct page of an sg table exported
> by DMA-buf in dmabuf_imp_to_refs(), this is not allowed.
> Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details.
>
> Fortunately, here (for special Xen device) we can avoid using
> pages and calculate gfns directly from dma addresses provided by
> the sg table.
>
> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

I can't say that I can judge the full technical background, but that 
looks reasonable to me.

Acked-by: Christian König <christian.koenig@amd.com>

> ---
> Please note, I didn't manage to test the patch against the latest master branch
> on real HW (patch was only build tested there). Patch was tested on Arm64
> guests using Linux v5.10.41 from vendor's BSP, this is the environment where
> running this use-case is possible and to which I have an access (Xen PV display
> with zero-copy and backend domain as a buffer provider - be-alloc=1, so dma-buf
> import part was involved). A little bit old, but the dma-buf import code
> in gntdev-dmabuf.c hasn't been changed much since that time, all context
> remains allmost the same according to my code inspection.
> ---
> ---
>   drivers/xen/gntdev-dmabuf.c | 42 +++++++++++++++----------------------
>   1 file changed, 17 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
> index 4440e626b797..0dde49fca9a5 100644
> --- a/drivers/xen/gntdev-dmabuf.c
> +++ b/drivers/xen/gntdev-dmabuf.c
> @@ -11,6 +11,7 @@
>   #include <linux/kernel.h>
>   #include <linux/errno.h>
>   #include <linux/dma-buf.h>
> +#include <linux/dma-direct.h>
>   #include <linux/slab.h>
>   #include <linux/types.h>
>   #include <linux/uaccess.h>
> @@ -50,7 +51,7 @@ struct gntdev_dmabuf {
>   
>   	/* Number of pages this buffer has. */
>   	int nr_pages;
> -	/* Pages of this buffer. */
> +	/* Pages of this buffer (only for dma-buf export). */
>   	struct page **pages;
>   };
>   
> @@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
>   /* DMA buffer import support. */
>   
>   static int
> -dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
> +dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
>   				int count, int domid)
>   {
>   	grant_ref_t priv_gref_head;
> @@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
>   		}
>   
>   		gnttab_grant_foreign_access_ref(cur_ref, domid,
> -						xen_page_to_gfn(pages[i]), 0);
> +						gfns[i], 0);
>   		refs[i] = cur_ref;
>   	}
>   
> @@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
>   
>   static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
>   {
> -	kfree(gntdev_dmabuf->pages);
>   	kfree(gntdev_dmabuf->u.imp.refs);
>   	kfree(gntdev_dmabuf);
>   }
> @@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
>   	if (!gntdev_dmabuf->u.imp.refs)
>   		goto fail;
>   
> -	gntdev_dmabuf->pages = kcalloc(count,
> -				       sizeof(gntdev_dmabuf->pages[0]),
> -				       GFP_KERNEL);
> -	if (!gntdev_dmabuf->pages)
> -		goto fail;
> -
>   	gntdev_dmabuf->nr_pages = count;
>   
>   	for (i = 0; i < count; i++)
> @@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>   	struct dma_buf *dma_buf;
>   	struct dma_buf_attachment *attach;
>   	struct sg_table *sgt;
> -	struct sg_page_iter sg_iter;
> +	struct sg_dma_page_iter sg_iter;
> +	unsigned long *gfns;
>   	int i;
>   
>   	dma_buf = dma_buf_get(fd);
> @@ -624,26 +619,23 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>   
>   	gntdev_dmabuf->u.imp.sgt = sgt;
>   
> -	/* Now convert sgt to array of pages and check for page validity. */
> +	gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
> +	if (!gfns)
> +		goto fail_unmap;
> +
> +	/* Now convert sgt to array of gfns without accessing underlying pages. */
>   	i = 0;
> -	for_each_sgtable_page(sgt, &sg_iter, 0) {
> -		struct page *page = sg_page_iter_page(&sg_iter);
> -		/*
> -		 * Check if page is valid: this can happen if we are given
> -		 * a page from VRAM or other resources which are not backed
> -		 * by a struct page.
> -		 */
> -		if (!pfn_valid(page_to_pfn(page))) {
> -			ret = ERR_PTR(-EINVAL);
> -			goto fail_unmap;
> -		}
> +	for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
> +		dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
> +		unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
>   
> -		gntdev_dmabuf->pages[i++] = page;
> +		gfns[i++] = pfn_to_gfn(pfn);
>   	}
>   
> -	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
> +	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
>   						      gntdev_dmabuf->u.imp.refs,
>   						      count, domid));
> +	kfree(gfns);
>   	if (IS_ERR(ret))
>   		goto fail_end_access;
>
kernel test robot Jan. 6, 2024, 1:56 p.m. UTC | #3
Hi Oleksandr,

kernel test robot noticed the following build warnings:

[auto build test WARNING on xen-tip/linux-next]
[also build test WARNING on linus/master v6.7-rc8 next-20240105]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Oleksandr-Tyshchenko/xen-gntdev-Fix-the-abuse-of-underlying-struct-page-in-DMA-buf-import/20240105-025516
base:   https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git linux-next
patch link:    https://lore.kernel.org/r/20240104185327.177376-1-olekstysh%40gmail.com
patch subject: [PATCH] xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import
config: x86_64-allmodconfig (https://download.01.org/0day-ci/archive/20240106/202401062122.it6zvLG0-lkp@intel.com/config)
compiler: ClangBuiltLinux clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240106/202401062122.it6zvLG0-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202401062122.it6zvLG0-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/xen/gntdev-dmabuf.c:623:6: warning: variable 'ret' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
     623 |         if (!gfns)
         |             ^~~~~
   drivers/xen/gntdev-dmabuf.c:660:9: note: uninitialized use occurs here
     660 |         return ret;
         |                ^~~
   drivers/xen/gntdev-dmabuf.c:623:2: note: remove the 'if' if its condition is always false
     623 |         if (!gfns)
         |         ^~~~~~~~~~
     624 |                 goto fail_unmap;
         |                 ~~~~~~~~~~~~~~~
   drivers/xen/gntdev-dmabuf.c:569:43: note: initialize the variable 'ret' to silence this warning
     569 |         struct gntdev_dmabuf *gntdev_dmabuf, *ret;
         |                                                  ^
         |                                                   = NULL
   1 warning generated.


vim +623 drivers/xen/gntdev-dmabuf.c

   564	
   565	static struct gntdev_dmabuf *
   566	dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
   567			   int fd, int count, int domid)
   568	{
   569		struct gntdev_dmabuf *gntdev_dmabuf, *ret;
   570		struct dma_buf *dma_buf;
   571		struct dma_buf_attachment *attach;
   572		struct sg_table *sgt;
   573		struct sg_dma_page_iter sg_iter;
   574		unsigned long *gfns;
   575		int i;
   576	
   577		dma_buf = dma_buf_get(fd);
   578		if (IS_ERR(dma_buf))
   579			return ERR_CAST(dma_buf);
   580	
   581		gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
   582		if (IS_ERR(gntdev_dmabuf)) {
   583			ret = gntdev_dmabuf;
   584			goto fail_put;
   585		}
   586	
   587		gntdev_dmabuf->priv = priv;
   588		gntdev_dmabuf->fd = fd;
   589	
   590		attach = dma_buf_attach(dma_buf, dev);
   591		if (IS_ERR(attach)) {
   592			ret = ERR_CAST(attach);
   593			goto fail_free_obj;
   594		}
   595	
   596		gntdev_dmabuf->u.imp.attach = attach;
   597	
   598		sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
   599		if (IS_ERR(sgt)) {
   600			ret = ERR_CAST(sgt);
   601			goto fail_detach;
   602		}
   603	
   604		/* Check that we have zero offset. */
   605		if (sgt->sgl->offset) {
   606			ret = ERR_PTR(-EINVAL);
   607			pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
   608				 sgt->sgl->offset);
   609			goto fail_unmap;
   610		}
   611	
   612		/* Check number of pages that imported buffer has. */
   613		if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
   614			ret = ERR_PTR(-EINVAL);
   615			pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
   616				 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
   617			goto fail_unmap;
   618		}
   619	
   620		gntdev_dmabuf->u.imp.sgt = sgt;
   621	
   622		gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
 > 623		if (!gfns)
   624			goto fail_unmap;
   625	
   626		/* Now convert sgt to array of gfns without accessing underlying pages. */
   627		i = 0;
   628		for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
   629			dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
   630			unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
   631	
   632			gfns[i++] = pfn_to_gfn(pfn);
   633		}
   634	
   635		ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
   636							      gntdev_dmabuf->u.imp.refs,
   637							      count, domid));
   638		kfree(gfns);
   639		if (IS_ERR(ret))
   640			goto fail_end_access;
   641	
   642		pr_debug("Imported DMA buffer with fd %d\n", fd);
   643	
   644		mutex_lock(&priv->lock);
   645		list_add(&gntdev_dmabuf->next, &priv->imp_list);
   646		mutex_unlock(&priv->lock);
   647	
   648		return gntdev_dmabuf;
   649	
   650	fail_end_access:
   651		dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
   652	fail_unmap:
   653		dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
   654	fail_detach:
   655		dma_buf_detach(dma_buf, attach);
   656	fail_free_obj:
   657		dmabuf_imp_free_storage(gntdev_dmabuf);
   658	fail_put:
   659		dma_buf_put(dma_buf);
   660		return ret;
   661	}
   662
diff mbox series

Patch

diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 4440e626b797..0dde49fca9a5 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -11,6 +11,7 @@ 
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
@@ -50,7 +51,7 @@  struct gntdev_dmabuf {
 
 	/* Number of pages this buffer has. */
 	int nr_pages;
-	/* Pages of this buffer. */
+	/* Pages of this buffer (only for dma-buf export). */
 	struct page **pages;
 };
 
@@ -484,7 +485,7 @@  static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
 /* DMA buffer import support. */
 
 static int
-dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
 				int count, int domid)
 {
 	grant_ref_t priv_gref_head;
@@ -507,7 +508,7 @@  dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
 		}
 
 		gnttab_grant_foreign_access_ref(cur_ref, domid,
-						xen_page_to_gfn(pages[i]), 0);
+						gfns[i], 0);
 		refs[i] = cur_ref;
 	}
 
@@ -529,7 +530,6 @@  static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
 
 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
 {
-	kfree(gntdev_dmabuf->pages);
 	kfree(gntdev_dmabuf->u.imp.refs);
 	kfree(gntdev_dmabuf);
 }
@@ -549,12 +549,6 @@  static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
 	if (!gntdev_dmabuf->u.imp.refs)
 		goto fail;
 
-	gntdev_dmabuf->pages = kcalloc(count,
-				       sizeof(gntdev_dmabuf->pages[0]),
-				       GFP_KERNEL);
-	if (!gntdev_dmabuf->pages)
-		goto fail;
-
 	gntdev_dmabuf->nr_pages = count;
 
 	for (i = 0; i < count; i++)
@@ -576,7 +570,8 @@  dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 	struct dma_buf *dma_buf;
 	struct dma_buf_attachment *attach;
 	struct sg_table *sgt;
-	struct sg_page_iter sg_iter;
+	struct sg_dma_page_iter sg_iter;
+	unsigned long *gfns;
 	int i;
 
 	dma_buf = dma_buf_get(fd);
@@ -624,26 +619,23 @@  dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 
 	gntdev_dmabuf->u.imp.sgt = sgt;
 
-	/* Now convert sgt to array of pages and check for page validity. */
+	gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
+	if (!gfns)
+		goto fail_unmap;
+
+	/* Now convert sgt to array of gfns without accessing underlying pages. */
 	i = 0;
-	for_each_sgtable_page(sgt, &sg_iter, 0) {
-		struct page *page = sg_page_iter_page(&sg_iter);
-		/*
-		 * Check if page is valid: this can happen if we are given
-		 * a page from VRAM or other resources which are not backed
-		 * by a struct page.
-		 */
-		if (!pfn_valid(page_to_pfn(page))) {
-			ret = ERR_PTR(-EINVAL);
-			goto fail_unmap;
-		}
+	for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
+		dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
+		unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
 
-		gntdev_dmabuf->pages[i++] = page;
+		gfns[i++] = pfn_to_gfn(pfn);
 	}
 
-	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
+	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
 						      gntdev_dmabuf->u.imp.refs,
 						      count, domid));
+	kfree(gfns);
 	if (IS_ERR(ret))
 		goto fail_end_access;