diff mbox series

[v2] xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import

Message ID 20240107103426.2038075-1-olekstysh@gmail.com (mailing list archive)
State Superseded
Commit 2bca26103d130a7179976a816809456555064e44
Headers show
Series [v2] xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import | expand

Commit Message

Oleksandr Tyshchenko Jan. 7, 2024, 10:34 a.m. UTC
From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

DO NOT access the underlying struct page of an sg table exported
by DMA-buf in dmabuf_imp_to_refs(), this is not allowed.
Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details.

Fortunately, here (for special Xen device) we can avoid using
pages and calculate gfns directly from dma addresses provided by
the sg table.

Suggested-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
Please note, I didn't manage to test the patch against the latest master branch
on real HW (patch was only build tested there). Patch was tested on Arm64
guests using Linux v5.10.41 from vendor's BSP, this is the environment where
running this use-case is possible and to which I have an access (Xen PV display
with zero-copy and backend domain as a buffer provider - be-alloc=1, so dma-buf
import part was involved). A little bit old, but the dma-buf import code
in gntdev-dmabuf.c hasn't been changed much since that time, all context
remains allmost the same according to my code inspection.

  v2:
   - add R-b and A-b
   - fix build warning noticed by kernel test robot by initializing
     "ret" in case of error
     https://lore.kernel.org/oe-kbuild-all/202401062122.it6zvLG0-lkp@intel.com/
---
---
 drivers/xen/gntdev-dmabuf.c | 44 ++++++++++++++++---------------------
 1 file changed, 19 insertions(+), 25 deletions(-)

Comments

Daniel Vetter Jan. 8, 2024, 12:05 p.m. UTC | #1
On Sun, 7 Jan 2024 at 11:35, Oleksandr Tyshchenko <olekstysh@gmail.com> wrote:
>
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>
> DO NOT access the underlying struct page of an sg table exported
> by DMA-buf in dmabuf_imp_to_refs(), this is not allowed.
> Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details.
>
> Fortunately, here (for special Xen device) we can avoid using
> pages and calculate gfns directly from dma addresses provided by
> the sg table.
>
> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> Acked-by: Christian König <christian.koenig@amd.com>
> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> Please note, I didn't manage to test the patch against the latest master branch
> on real HW (patch was only build tested there). Patch was tested on Arm64
> guests using Linux v5.10.41 from vendor's BSP, this is the environment where
> running this use-case is possible and to which I have an access (Xen PV display
> with zero-copy and backend domain as a buffer provider - be-alloc=1, so dma-buf
> import part was involved). A little bit old, but the dma-buf import code
> in gntdev-dmabuf.c hasn't been changed much since that time, all context
> remains allmost the same according to my code inspection.
>
>   v2:
>    - add R-b and A-b
>    - fix build warning noticed by kernel test robot by initializing
>      "ret" in case of error
>      https://lore.kernel.org/oe-kbuild-all/202401062122.it6zvLG0-lkp@intel.com/
> ---
> ---
>  drivers/xen/gntdev-dmabuf.c | 44 ++++++++++++++++---------------------
>  1 file changed, 19 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
> index 4440e626b797..272c0ab01ef5 100644
> --- a/drivers/xen/gntdev-dmabuf.c
> +++ b/drivers/xen/gntdev-dmabuf.c
> @@ -11,6 +11,7 @@
>  #include <linux/kernel.h>
>  #include <linux/errno.h>
>  #include <linux/dma-buf.h>
> +#include <linux/dma-direct.h>
>  #include <linux/slab.h>
>  #include <linux/types.h>
>  #include <linux/uaccess.h>
> @@ -50,7 +51,7 @@ struct gntdev_dmabuf {
>
>         /* Number of pages this buffer has. */
>         int nr_pages;
> -       /* Pages of this buffer. */
> +       /* Pages of this buffer (only for dma-buf export). */
>         struct page **pages;
>  };
>
> @@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
>  /* DMA buffer import support. */
>
>  static int
> -dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
> +dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
>                                 int count, int domid)
>  {
>         grant_ref_t priv_gref_head;
> @@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
>                 }
>
>                 gnttab_grant_foreign_access_ref(cur_ref, domid,
> -                                               xen_page_to_gfn(pages[i]), 0);
> +                                               gfns[i], 0);
>                 refs[i] = cur_ref;
>         }
>
> @@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
>
>  static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
>  {
> -       kfree(gntdev_dmabuf->pages);
>         kfree(gntdev_dmabuf->u.imp.refs);
>         kfree(gntdev_dmabuf);
>  }
> @@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
>         if (!gntdev_dmabuf->u.imp.refs)
>                 goto fail;
>
> -       gntdev_dmabuf->pages = kcalloc(count,
> -                                      sizeof(gntdev_dmabuf->pages[0]),
> -                                      GFP_KERNEL);
> -       if (!gntdev_dmabuf->pages)
> -               goto fail;
> -
>         gntdev_dmabuf->nr_pages = count;
>
>         for (i = 0; i < count; i++)
> @@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>         struct dma_buf *dma_buf;
>         struct dma_buf_attachment *attach;
>         struct sg_table *sgt;
> -       struct sg_page_iter sg_iter;
> +       struct sg_dma_page_iter sg_iter;
> +       unsigned long *gfns;
>         int i;
>
>         dma_buf = dma_buf_get(fd);
> @@ -624,26 +619,25 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>
>         gntdev_dmabuf->u.imp.sgt = sgt;
>
> -       /* Now convert sgt to array of pages and check for page validity. */
> +       gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
> +       if (!gfns) {
> +               ret = ERR_PTR(-ENOMEM);
> +               goto fail_unmap;
> +       }
> +
> +       /* Now convert sgt to array of gfns without accessing underlying pages. */
>         i = 0;
> -       for_each_sgtable_page(sgt, &sg_iter, 0) {
> -               struct page *page = sg_page_iter_page(&sg_iter);
> -               /*
> -                * Check if page is valid: this can happen if we are given
> -                * a page from VRAM or other resources which are not backed
> -                * by a struct page.
> -                */
> -               if (!pfn_valid(page_to_pfn(page))) {
> -                       ret = ERR_PTR(-EINVAL);
> -                       goto fail_unmap;
> -               }
> +       for_each_sgtable_dma_page(sgt, &sg_iter, 0) {

Maybe add a comment here to explain why this is done and why it's ok?
Either way:

Acked-by: Daniel Vetter <daniel@ffwll.ch>


> +               dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
> +               unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
>
> -               gntdev_dmabuf->pages[i++] = page;
> +               gfns[i++] = pfn_to_gfn(pfn);
>         }
>
> -       ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
> +       ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
>                                                       gntdev_dmabuf->u.imp.refs,
>                                                       count, domid));
> +       kfree(gfns);
>         if (IS_ERR(ret))
>                 goto fail_end_access;
>
> --
> 2.34.1
>
Oleksandr Tyshchenko Jan. 9, 2024, 5:03 p.m. UTC | #2
On 08.01.24 14:05, Daniel Vetter wrote:

Hello Daniel


> On Sun, 7 Jan 2024 at 11:35, Oleksandr Tyshchenko <olekstysh@gmail.com> wrote:
>>
>> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>>
>> DO NOT access the underlying struct page of an sg table exported
>> by DMA-buf in dmabuf_imp_to_refs(), this is not allowed.
>> Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details.
>>
>> Fortunately, here (for special Xen device) we can avoid using
>> pages and calculate gfns directly from dma addresses provided by
>> the sg table.
>>
>> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
>> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>> Acked-by: Christian König <christian.koenig@amd.com>
>> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
>> ---
>> Please note, I didn't manage to test the patch against the latest master branch
>> on real HW (patch was only build tested there). Patch was tested on Arm64
>> guests using Linux v5.10.41 from vendor's BSP, this is the environment where
>> running this use-case is possible and to which I have an access (Xen PV display
>> with zero-copy and backend domain as a buffer provider - be-alloc=1, so dma-buf
>> import part was involved). A little bit old, but the dma-buf import code
>> in gntdev-dmabuf.c hasn't been changed much since that time, all context
>> remains allmost the same according to my code inspection.
>>
>>    v2:
>>     - add R-b and A-b
>>     - fix build warning noticed by kernel test robot by initializing
>>       "ret" in case of error
>>       https://urldefense.com/v3/__https://lore.kernel.org/oe-kbuild-all/202401062122.it6zvLG0-lkp@intel.com/__;!!GF_29dbcQIUBPA!38-mwT9HCtOeZC3m4I-m9n0hragYMHfmWcHKgDxEpGs9mg35M0bpPWWORK8aichxHtO36GZ_JnCWTLdJXdZYBmCv$ [lore[.]kernel[.]org]
>> ---
>> ---
>>   drivers/xen/gntdev-dmabuf.c | 44 ++++++++++++++++---------------------
>>   1 file changed, 19 insertions(+), 25 deletions(-)
>>
>> diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
>> index 4440e626b797..272c0ab01ef5 100644
>> --- a/drivers/xen/gntdev-dmabuf.c
>> +++ b/drivers/xen/gntdev-dmabuf.c
>> @@ -11,6 +11,7 @@
>>   #include <linux/kernel.h>
>>   #include <linux/errno.h>
>>   #include <linux/dma-buf.h>
>> +#include <linux/dma-direct.h>
>>   #include <linux/slab.h>
>>   #include <linux/types.h>
>>   #include <linux/uaccess.h>
>> @@ -50,7 +51,7 @@ struct gntdev_dmabuf {
>>
>>          /* Number of pages this buffer has. */
>>          int nr_pages;
>> -       /* Pages of this buffer. */
>> +       /* Pages of this buffer (only for dma-buf export). */
>>          struct page **pages;
>>   };
>>
>> @@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
>>   /* DMA buffer import support. */
>>
>>   static int
>> -dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
>> +dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
>>                                  int count, int domid)
>>   {
>>          grant_ref_t priv_gref_head;
>> @@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
>>                  }
>>
>>                  gnttab_grant_foreign_access_ref(cur_ref, domid,
>> -                                               xen_page_to_gfn(pages[i]), 0);
>> +                                               gfns[i], 0);
>>                  refs[i] = cur_ref;
>>          }
>>
>> @@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
>>
>>   static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
>>   {
>> -       kfree(gntdev_dmabuf->pages);
>>          kfree(gntdev_dmabuf->u.imp.refs);
>>          kfree(gntdev_dmabuf);
>>   }
>> @@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
>>          if (!gntdev_dmabuf->u.imp.refs)
>>                  goto fail;
>>
>> -       gntdev_dmabuf->pages = kcalloc(count,
>> -                                      sizeof(gntdev_dmabuf->pages[0]),
>> -                                      GFP_KERNEL);
>> -       if (!gntdev_dmabuf->pages)
>> -               goto fail;
>> -
>>          gntdev_dmabuf->nr_pages = count;
>>
>>          for (i = 0; i < count; i++)
>> @@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>>          struct dma_buf *dma_buf;
>>          struct dma_buf_attachment *attach;
>>          struct sg_table *sgt;
>> -       struct sg_page_iter sg_iter;
>> +       struct sg_dma_page_iter sg_iter;
>> +       unsigned long *gfns;
>>          int i;
>>
>>          dma_buf = dma_buf_get(fd);
>> @@ -624,26 +619,25 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
>>
>>          gntdev_dmabuf->u.imp.sgt = sgt;
>>
>> -       /* Now convert sgt to array of pages and check for page validity. */
>> +       gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
>> +       if (!gfns) {
>> +               ret = ERR_PTR(-ENOMEM);
>> +               goto fail_unmap;
>> +       }
>> +
>> +       /* Now convert sgt to array of gfns without accessing underlying pages. */
>>          i = 0;
>> -       for_each_sgtable_page(sgt, &sg_iter, 0) {
>> -               struct page *page = sg_page_iter_page(&sg_iter);
>> -               /*
>> -                * Check if page is valid: this can happen if we are given
>> -                * a page from VRAM or other resources which are not backed
>> -                * by a struct page.
>> -                */
>> -               if (!pfn_valid(page_to_pfn(page))) {
>> -                       ret = ERR_PTR(-EINVAL);
>> -                       goto fail_unmap;
>> -               }
>> +       for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
> 
> Maybe add a comment here to explain why this is done and why it's ok?


Makes sense, will do for v3.


> Either way:
> 
> Acked-by: Daniel Vetter <daniel@ffwll.ch>


Thanks!


[snip]
diff mbox series

Patch

diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 4440e626b797..272c0ab01ef5 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -11,6 +11,7 @@ 
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
@@ -50,7 +51,7 @@  struct gntdev_dmabuf {
 
 	/* Number of pages this buffer has. */
 	int nr_pages;
-	/* Pages of this buffer. */
+	/* Pages of this buffer (only for dma-buf export). */
 	struct page **pages;
 };
 
@@ -484,7 +485,7 @@  static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
 /* DMA buffer import support. */
 
 static int
-dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
 				int count, int domid)
 {
 	grant_ref_t priv_gref_head;
@@ -507,7 +508,7 @@  dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
 		}
 
 		gnttab_grant_foreign_access_ref(cur_ref, domid,
-						xen_page_to_gfn(pages[i]), 0);
+						gfns[i], 0);
 		refs[i] = cur_ref;
 	}
 
@@ -529,7 +530,6 @@  static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
 
 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
 {
-	kfree(gntdev_dmabuf->pages);
 	kfree(gntdev_dmabuf->u.imp.refs);
 	kfree(gntdev_dmabuf);
 }
@@ -549,12 +549,6 @@  static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
 	if (!gntdev_dmabuf->u.imp.refs)
 		goto fail;
 
-	gntdev_dmabuf->pages = kcalloc(count,
-				       sizeof(gntdev_dmabuf->pages[0]),
-				       GFP_KERNEL);
-	if (!gntdev_dmabuf->pages)
-		goto fail;
-
 	gntdev_dmabuf->nr_pages = count;
 
 	for (i = 0; i < count; i++)
@@ -576,7 +570,8 @@  dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 	struct dma_buf *dma_buf;
 	struct dma_buf_attachment *attach;
 	struct sg_table *sgt;
-	struct sg_page_iter sg_iter;
+	struct sg_dma_page_iter sg_iter;
+	unsigned long *gfns;
 	int i;
 
 	dma_buf = dma_buf_get(fd);
@@ -624,26 +619,25 @@  dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 
 	gntdev_dmabuf->u.imp.sgt = sgt;
 
-	/* Now convert sgt to array of pages and check for page validity. */
+	gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
+	if (!gfns) {
+		ret = ERR_PTR(-ENOMEM);
+		goto fail_unmap;
+	}
+
+	/* Now convert sgt to array of gfns without accessing underlying pages. */
 	i = 0;
-	for_each_sgtable_page(sgt, &sg_iter, 0) {
-		struct page *page = sg_page_iter_page(&sg_iter);
-		/*
-		 * Check if page is valid: this can happen if we are given
-		 * a page from VRAM or other resources which are not backed
-		 * by a struct page.
-		 */
-		if (!pfn_valid(page_to_pfn(page))) {
-			ret = ERR_PTR(-EINVAL);
-			goto fail_unmap;
-		}
+	for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
+		dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
+		unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
 
-		gntdev_dmabuf->pages[i++] = page;
+		gfns[i++] = pfn_to_gfn(pfn);
 	}
 
-	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
+	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
 						      gntdev_dmabuf->u.imp.refs,
 						      count, domid));
+	kfree(gfns);
 	if (IS_ERR(ret))
 		goto fail_end_access;