diff mbox series

drm/i915: individualize fences before adding

Message ID 20220516155108.2103-1-nirmoy.das@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: individualize fences before adding | expand

Commit Message

Nirmoy Das May 16, 2022, 3:51 p.m. UTC
_i915_vma_move_to_active() can receive > 1 fence for
multiple batch buffer submission so make sure to
individualize fences before adding to dma_resv obj

v2: make sure to reserve fence slots before adding.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
---
 drivers/gpu/drm/i915/i915_vma.c | 44 +++++++++++++++++++++------------
 1 file changed, 28 insertions(+), 16 deletions(-)

Comments

Nirmoy Das May 16, 2022, 4:30 p.m. UTC | #1
Please ignore this revision. I will send another one tomorrow.


Nirmoy

On 5/16/2022 5:51 PM, Nirmoy Das wrote:
> _i915_vma_move_to_active() can receive > 1 fence for
> multiple batch buffer submission so make sure to
> individualize fences before adding to dma_resv obj
>
> v2: make sure to reserve fence slots before adding.
>
> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_vma.c | 44 +++++++++++++++++++++------------
>   1 file changed, 28 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 4f6db539571a..b622e51bf132 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -23,6 +23,7 @@
>    */
>   
>   #include <linux/sched/mm.h>
> +#include <linux/dma-fence-array.h>
>   #include <drm/drm_gem.h>
>   
>   #include "display/intel_frontbuffer.h"
> @@ -1833,28 +1834,39 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
>   			intel_frontbuffer_put(front);
>   		}
>   
> -		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> -			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> -			if (unlikely(err))
> -				return err;
> -		}
> -
>   		if (fence) {
> -			dma_resv_add_fence(vma->obj->base.resv, fence,
> -					   DMA_RESV_USAGE_WRITE);
> +			int idx;
> +			struct dma_fence *curr;
> +
> +			dma_fence_array_for_each(curr, idx, fence) {
> +				if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> +					err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> +					if (unlikely(err))
> +						return err;
> +				}
> +
> +				dma_resv_add_fence(vma->obj->base.resv, curr,
> +						   DMA_RESV_USAGE_WRITE);
> +			}
>   			obj->write_domain = I915_GEM_DOMAIN_RENDER;
>   			obj->read_domains = 0;
>   		}
>   	} else {
> -		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> -			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> -			if (unlikely(err))
> -				return err;
> -		}
> -
>   		if (fence) {
> -			dma_resv_add_fence(vma->obj->base.resv, fence,
> -					   DMA_RESV_USAGE_READ);
> +			int idx;
> +			struct dma_fence *curr;
> +
> +			dma_fence_array_for_each(curr, idx, fence) {
> +				if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> +					err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> +					if (unlikely(err))
> +						return err;
> +				}
> +
> +
> +				dma_resv_add_fence(vma->obj->base.resv, curr,
> +						DMA_RESV_USAGE_READ);
> +			}
>   			obj->write_domain = 0;
>   		}
>   	}
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4f6db539571a..b622e51bf132 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -23,6 +23,7 @@ 
  */
 
 #include <linux/sched/mm.h>
+#include <linux/dma-fence-array.h>
 #include <drm/drm_gem.h>
 
 #include "display/intel_frontbuffer.h"
@@ -1833,28 +1834,39 @@  int _i915_vma_move_to_active(struct i915_vma *vma,
 			intel_frontbuffer_put(front);
 		}
 
-		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
-			if (unlikely(err))
-				return err;
-		}
-
 		if (fence) {
-			dma_resv_add_fence(vma->obj->base.resv, fence,
-					   DMA_RESV_USAGE_WRITE);
+			int idx;
+			struct dma_fence *curr;
+
+			dma_fence_array_for_each(curr, idx, fence) {
+				if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+					err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+					if (unlikely(err))
+						return err;
+				}
+
+				dma_resv_add_fence(vma->obj->base.resv, curr,
+						   DMA_RESV_USAGE_WRITE);
+			}
 			obj->write_domain = I915_GEM_DOMAIN_RENDER;
 			obj->read_domains = 0;
 		}
 	} else {
-		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
-			if (unlikely(err))
-				return err;
-		}
-
 		if (fence) {
-			dma_resv_add_fence(vma->obj->base.resv, fence,
-					   DMA_RESV_USAGE_READ);
+			int idx;
+			struct dma_fence *curr;
+
+			dma_fence_array_for_each(curr, idx, fence) {
+				if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+					err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+					if (unlikely(err))
+						return err;
+				}
+
+
+				dma_resv_add_fence(vma->obj->base.resv, curr,
+						DMA_RESV_USAGE_READ);
+			}
 			obj->write_domain = 0;
 		}
 	}