diff mbox series

[08/20] drm/i915: Always defer fenced work to the worker

Message ID 20200706061926.6687-9-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [01/20] drm/i915: Preallocate stashes for vma page-directories | expand

Commit Message

Chris Wilson July 6, 2020, 6:19 a.m. UTC
Currently, if an error is raised we always call the cleanup locally
[and skip the main work callback]. However, some future users may need
to take a mutex to cleanup and so we cannot immediately execute the
cleanup as we may still be in interrupt context.

With the execute-immediate flag, for most cases this should result in
immediate cleanup of an error.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_sw_fence_work.c | 25 +++++++++++------------
 1 file changed, 12 insertions(+), 13 deletions(-)

Comments

Tvrtko Ursulin July 8, 2020, 12:18 p.m. UTC | #1
On 06/07/2020 07:19, Chris Wilson wrote:
> Currently, if an error is raised we always call the cleanup locally
> [and skip the main work callback]. However, some future users may need
> to take a mutex to cleanup and so we cannot immediately execute the
> cleanup as we may still be in interrupt context.
> 
> With the execute-immediate flag, for most cases this should result in
> immediate cleanup of an error.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_sw_fence_work.c | 25 +++++++++++------------
>   1 file changed, 12 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
> index a3a81bb8f2c3..29f63ebc24e8 100644
> --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
> +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
> @@ -16,11 +16,14 @@ static void fence_complete(struct dma_fence_work *f)
>   static void fence_work(struct work_struct *work)
>   {
>   	struct dma_fence_work *f = container_of(work, typeof(*f), work);
> -	int err;
>   
> -	err = f->ops->work(f);
> -	if (err)
> -		dma_fence_set_error(&f->dma, err);
> +	if (!f->dma.error) {
> +		int err;
> +
> +		err = f->ops->work(f);
> +		if (err)
> +			dma_fence_set_error(&f->dma, err);
> +	}
>   
>   	fence_complete(f);
>   	dma_fence_put(&f->dma);
> @@ -36,15 +39,11 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
>   		if (fence->error)
>   			dma_fence_set_error(&f->dma, fence->error);
>   
> -		if (!f->dma.error) {
> -			dma_fence_get(&f->dma);
> -			if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
> -				fence_work(&f->work);
> -			else
> -				queue_work(system_unbound_wq, &f->work);
> -		} else {
> -			fence_complete(f);
> -		}
> +		dma_fence_get(&f->dma);
> +		if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
> +			fence_work(&f->work);
> +		else
> +			queue_work(system_unbound_wq, &f->work);

Right the commit wording really confused me since it is obviously still 
deferring stuff to the worker. By "fenced work" I understand you 
actually mean more like "never signal non-immediate work from the notify 
callback" (even in the error case).

Regards,

Tvrtko

>   		break;
>   
>   	case FENCE_FREE:
>
Chris Wilson July 8, 2020, 12:25 p.m. UTC | #2
Quoting Tvrtko Ursulin (2020-07-08 13:18:21)
> 
> On 06/07/2020 07:19, Chris Wilson wrote:
> > Currently, if an error is raised we always call the cleanup locally
> > [and skip the main work callback]. However, some future users may need
> > to take a mutex to cleanup and so we cannot immediately execute the
> > cleanup as we may still be in interrupt context.
> > 
> > With the execute-immediate flag, for most cases this should result in
> > immediate cleanup of an error.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/i915_sw_fence_work.c | 25 +++++++++++------------
> >   1 file changed, 12 insertions(+), 13 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
> > index a3a81bb8f2c3..29f63ebc24e8 100644
> > --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
> > +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
> > @@ -16,11 +16,14 @@ static void fence_complete(struct dma_fence_work *f)
> >   static void fence_work(struct work_struct *work)
> >   {
> >       struct dma_fence_work *f = container_of(work, typeof(*f), work);
> > -     int err;
> >   
> > -     err = f->ops->work(f);
> > -     if (err)
> > -             dma_fence_set_error(&f->dma, err);
> > +     if (!f->dma.error) {
> > +             int err;
> > +
> > +             err = f->ops->work(f);
> > +             if (err)
> > +                     dma_fence_set_error(&f->dma, err);
> > +     }
> >   
> >       fence_complete(f);
> >       dma_fence_put(&f->dma);
> > @@ -36,15 +39,11 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
> >               if (fence->error)
> >                       dma_fence_set_error(&f->dma, fence->error);
> >   
> > -             if (!f->dma.error) {
> > -                     dma_fence_get(&f->dma);
> > -                     if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
> > -                             fence_work(&f->work);
> > -                     else
> > -                             queue_work(system_unbound_wq, &f->work);
> > -             } else {
> > -                     fence_complete(f);
> > -             }
> > +             dma_fence_get(&f->dma);
> > +             if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
> > +                     fence_work(&f->work);
> > +             else
> > +                     queue_work(system_unbound_wq, &f->work);
> 
> Right the commit wording really confused me since it is obviously still 
> deferring stuff to the worker. By "fenced work" I understand you 
> actually mean more like "never signal non-immediate work from the notify 
> callback" (even in the error case).

Work that had to wait for a fence should always take the worker to avoid
being run in interrupt context (from the fence signal callback), even in
the case of errors [so that the work can take its carefully considered
mutexes]. I anticipate that most errors will be generated before we
starting waiting for fences, and those will remain immediately executed
(when asked to do so by the caller).
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index a3a81bb8f2c3..29f63ebc24e8 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -16,11 +16,14 @@  static void fence_complete(struct dma_fence_work *f)
 static void fence_work(struct work_struct *work)
 {
 	struct dma_fence_work *f = container_of(work, typeof(*f), work);
-	int err;
 
-	err = f->ops->work(f);
-	if (err)
-		dma_fence_set_error(&f->dma, err);
+	if (!f->dma.error) {
+		int err;
+
+		err = f->ops->work(f);
+		if (err)
+			dma_fence_set_error(&f->dma, err);
+	}
 
 	fence_complete(f);
 	dma_fence_put(&f->dma);
@@ -36,15 +39,11 @@  fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 		if (fence->error)
 			dma_fence_set_error(&f->dma, fence->error);
 
-		if (!f->dma.error) {
-			dma_fence_get(&f->dma);
-			if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
-				fence_work(&f->work);
-			else
-				queue_work(system_unbound_wq, &f->work);
-		} else {
-			fence_complete(f);
-		}
+		dma_fence_get(&f->dma);
+		if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
+			fence_work(&f->work);
+		else
+			queue_work(system_unbound_wq, &f->work);
 		break;
 
 	case FENCE_FREE: