diff mbox series

[v2,3/4] drm/i915/dmabuf: dmabuf cleanup

Message ID 20221028155029.494736-3-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/4] drm/i915/dmabuf: fix sg_table handling in map_dma_buf | expand

Commit Message

Matthew Auld Oct. 28, 2022, 3:50 p.m. UTC
From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>

Some minor cleanup of some variables for consistency.

Normalize struct sg_table to sgt.
Normalize struct dma_buf_attachment to attach.
checkpatch issues sizeof(), !NULL updates.

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 47 ++++++++++++----------
 1 file changed, 25 insertions(+), 22 deletions(-)

Comments

Matthew Auld Oct. 28, 2022, 3:53 p.m. UTC | #1
On Fri, 28 Oct 2022 at 16:51, Matthew Auld <matthew.auld@intel.com> wrote:
>
> From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
>
> Some minor cleanup of some variables for consistency.
>
> Normalize struct sg_table to sgt.
> Normalize struct dma_buf_attachment to attach.
> checkpatch issues sizeof(), !NULL updates.
>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>

> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 47 ++++++++++++----------
>  1 file changed, 25 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> index 05ebbdfd3b3b..8342e01a0d27 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> @@ -25,43 +25,46 @@ static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
>         return to_intel_bo(buf->priv);
>  }
>
> -static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
> +static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
>                                              enum dma_data_direction dir)
>  {
> -       struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
> -       struct sg_table *st;
> +       struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
> +       struct sg_table *sgt;
>         struct scatterlist *src, *dst;
>         int ret, i;
>
> -       /* Copy sg so that we make an independent mapping */
> -       st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> -       if (st == NULL) {
> +       /*
> +        * Make a copy of the object's sgt, so that we can make an independent
> +        * mapping
> +        */
> +       sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
> +       if (!sgt) {
>                 ret = -ENOMEM;
>                 goto err;
>         }
>
> -       ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
> +       ret = sg_alloc_table(sgt, obj->mm.pages->orig_nents, GFP_KERNEL);
>         if (ret)
>                 goto err_free;
>
>         src = obj->mm.pages->sgl;
> -       dst = st->sgl;
> +       dst = sgt->sgl;
>         for (i = 0; i < obj->mm.pages->orig_nents; i++) {
>                 sg_set_page(dst, sg_page(src), src->length, 0);
>                 dst = sg_next(dst);
>                 src = sg_next(src);
>         }
>
> -       ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
> +       ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
>         if (ret)
>                 goto err_free_sg;
>
> -       return st;
> +       return sgt;
>
>  err_free_sg:
> -       sg_free_table(st);
> +       sg_free_table(sgt);
>  err_free:
> -       kfree(st);
> +       kfree(sgt);
>  err:
>         return ERR_PTR(ret);
>  }
> @@ -236,15 +239,15 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
>  static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
>  {
>         struct drm_i915_private *i915 = to_i915(obj->base.dev);
> -       struct sg_table *pages;
> +       struct sg_table *sgt;
>         unsigned int sg_page_sizes;
>
>         assert_object_held(obj);
>
> -       pages = dma_buf_map_attachment(obj->base.import_attach,
> -                                      DMA_BIDIRECTIONAL);
> -       if (IS_ERR(pages))
> -               return PTR_ERR(pages);
> +       sgt = dma_buf_map_attachment(obj->base.import_attach,
> +                                    DMA_BIDIRECTIONAL);
> +       if (IS_ERR(sgt))
> +               return PTR_ERR(sgt);
>
>         /*
>          * DG1 is special here since it still snoops transactions even with
> @@ -261,16 +264,16 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
>             (!HAS_LLC(i915) && !IS_DG1(i915)))
>                 wbinvd_on_all_cpus();
>
> -       sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
> -       __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
> +       sg_page_sizes = i915_sg_dma_sizes(sgt->sgl);
> +       __i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
>
>         return 0;
>  }
>
>  static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
> -                                            struct sg_table *pages)
> +                                            struct sg_table *sgt)
>  {
> -       dma_buf_unmap_attachment(obj->base.import_attach, pages,
> +       dma_buf_unmap_attachment(obj->base.import_attach, sgt,
>                                  DMA_BIDIRECTIONAL);
>  }
>
> @@ -313,7 +316,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
>         get_dma_buf(dma_buf);
>
>         obj = i915_gem_object_alloc();
> -       if (obj == NULL) {
> +       if (!obj) {
>                 ret = -ENOMEM;
>                 goto fail_detach;
>         }
> --
> 2.37.3
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 05ebbdfd3b3b..8342e01a0d27 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -25,43 +25,46 @@  static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 	return to_intel_bo(buf->priv);
 }
 
-static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
 					     enum dma_data_direction dir)
 {
-	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-	struct sg_table *st;
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
+	struct sg_table *sgt;
 	struct scatterlist *src, *dst;
 	int ret, i;
 
-	/* Copy sg so that we make an independent mapping */
-	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (st == NULL) {
+	/*
+	 * Make a copy of the object's sgt, so that we can make an independent
+	 * mapping
+	 */
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt) {
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
+	ret = sg_alloc_table(sgt, obj->mm.pages->orig_nents, GFP_KERNEL);
 	if (ret)
 		goto err_free;
 
 	src = obj->mm.pages->sgl;
-	dst = st->sgl;
+	dst = sgt->sgl;
 	for (i = 0; i < obj->mm.pages->orig_nents; i++) {
 		sg_set_page(dst, sg_page(src), src->length, 0);
 		dst = sg_next(dst);
 		src = sg_next(src);
 	}
 
-	ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
+	ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
 	if (ret)
 		goto err_free_sg;
 
-	return st;
+	return sgt;
 
 err_free_sg:
-	sg_free_table(st);
+	sg_free_table(sgt);
 err_free:
-	kfree(st);
+	kfree(sgt);
 err:
 	return ERR_PTR(ret);
 }
@@ -236,15 +239,15 @@  struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct sg_table *pages;
+	struct sg_table *sgt;
 	unsigned int sg_page_sizes;
 
 	assert_object_held(obj);
 
-	pages = dma_buf_map_attachment(obj->base.import_attach,
-				       DMA_BIDIRECTIONAL);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
+	sgt = dma_buf_map_attachment(obj->base.import_attach,
+				     DMA_BIDIRECTIONAL);
+	if (IS_ERR(sgt))
+		return PTR_ERR(sgt);
 
 	/*
 	 * DG1 is special here since it still snoops transactions even with
@@ -261,16 +264,16 @@  static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 	    (!HAS_LLC(i915) && !IS_DG1(i915)))
 		wbinvd_on_all_cpus();
 
-	sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
-	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+	sg_page_sizes = i915_sg_dma_sizes(sgt->sgl);
+	__i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
 
 	return 0;
 }
 
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
-					     struct sg_table *pages)
+					     struct sg_table *sgt)
 {
-	dma_buf_unmap_attachment(obj->base.import_attach, pages,
+	dma_buf_unmap_attachment(obj->base.import_attach, sgt,
 				 DMA_BIDIRECTIONAL);
 }
 
@@ -313,7 +316,7 @@  struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 	get_dma_buf(dma_buf);
 
 	obj = i915_gem_object_alloc();
-	if (obj == NULL) {
+	if (!obj) {
 		ret = -ENOMEM;
 		goto fail_detach;
 	}