diff mbox series

Revert "drm/msm/gpu: Push gpu lock down past runpm"

Message ID 20240109182218.193804-1-robdclark@gmail.com (mailing list archive)
State Not Applicable
Headers show
Series Revert "drm/msm/gpu: Push gpu lock down past runpm" | expand

Commit Message

Rob Clark Jan. 9, 2024, 6:22 p.m. UTC
From: Rob Clark <robdclark@chromium.org>

This reverts commit abe2023b4cea192ab266b351fd38dc9dbd846df0.

Changing the locking order means that scheduler/msm_job_run() can race
with the recovery kthread worker, with the result that the GPU gets an
extra runpm get when we are trying to power it off.  Leaving the GPU in
an unrecovered state.

I'll need to come up with a different scheme for appeasing lockdep.

Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/msm_gpu.c        | 11 +++++------
 drivers/gpu/drm/msm/msm_ringbuffer.c |  7 +++++--
 2 files changed, 10 insertions(+), 8 deletions(-)

Comments

Daniel Vetter Jan. 10, 2024, 10:50 a.m. UTC | #1
On Tue, Jan 09, 2024 at 10:22:17AM -0800, Rob Clark wrote:
> From: Rob Clark <robdclark@chromium.org>
> 
> This reverts commit abe2023b4cea192ab266b351fd38dc9dbd846df0.
> 
> Changing the locking order means that scheduler/msm_job_run() can race
> with the recovery kthread worker, with the result that the GPU gets an
> extra runpm get when we are trying to power it off.  Leaving the GPU in
> an unrecovered state.

The recovery kthread is supposed to stop all the relevant schedulers,
which should remove any possible race conditions. So unless there's more
going on, or you have your own recovery kthread (don't, reuse the one from
the scheduler with your own work items, that's why you can provide that)
this looks like an incomplete/incorrect explanation ... ?

Slightly confused
-Sima

> 
> I'll need to come up with a different scheme for appeasing lockdep.
> 
> Signed-off-by: Rob Clark <robdclark@chromium.org>
> ---
>  drivers/gpu/drm/msm/msm_gpu.c        | 11 +++++------
>  drivers/gpu/drm/msm/msm_ringbuffer.c |  7 +++++--
>  2 files changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
> index 095390774f22..655002b21b0d 100644
> --- a/drivers/gpu/drm/msm/msm_gpu.c
> +++ b/drivers/gpu/drm/msm/msm_gpu.c
> @@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
>  	struct msm_ringbuffer *ring = submit->ring;
>  	unsigned long flags;
>  
> -	pm_runtime_get_sync(&gpu->pdev->dev);
> +	WARN_ON(!mutex_is_locked(&gpu->lock));
>  
> -	mutex_lock(&gpu->lock);
> +	pm_runtime_get_sync(&gpu->pdev->dev);
>  
>  	msm_gpu_hw_init(gpu);
>  
> +	submit->seqno = submit->hw_fence->seqno;
> +
>  	update_sw_cntrs(gpu);
>  
>  	/*
> @@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
>  	gpu->funcs->submit(gpu, submit);
>  	gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
>  
> -	hangcheck_timer_reset(gpu);
> -
> -	mutex_unlock(&gpu->lock);
> -
>  	pm_runtime_put(&gpu->pdev->dev);
> +	hangcheck_timer_reset(gpu);
>  }
>  
>  /*
> diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
> index e0ed27739449..548f5266a7d3 100644
> --- a/drivers/gpu/drm/msm/msm_ringbuffer.c
> +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
> @@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
>  
>  	msm_fence_init(submit->hw_fence, fctx);
>  
> -	submit->seqno = submit->hw_fence->seqno;
> -
>  	mutex_lock(&priv->lru.lock);
>  
>  	for (i = 0; i < submit->nr_bos; i++) {
> @@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
>  
>  	mutex_unlock(&priv->lru.lock);
>  
> +	/* TODO move submit path over to using a per-ring lock.. */
> +	mutex_lock(&gpu->lock);
> +
>  	msm_gpu_submit(gpu, submit);
>  
> +	mutex_unlock(&gpu->lock);
> +
>  	return dma_fence_get(submit->hw_fence);
>  }
>  
> -- 
> 2.43.0
>
Rob Clark Jan. 10, 2024, 2:54 p.m. UTC | #2
On Wed, Jan 10, 2024 at 2:50 AM Daniel Vetter <daniel@ffwll.ch> wrote:
>
> On Tue, Jan 09, 2024 at 10:22:17AM -0800, Rob Clark wrote:
> > From: Rob Clark <robdclark@chromium.org>
> >
> > This reverts commit abe2023b4cea192ab266b351fd38dc9dbd846df0.
> >
> > Changing the locking order means that scheduler/msm_job_run() can race
> > with the recovery kthread worker, with the result that the GPU gets an
> > extra runpm get when we are trying to power it off.  Leaving the GPU in
> > an unrecovered state.
>
> The recovery kthread is supposed to stop all the relevant schedulers,
> which should remove any possible race conditions. So unless there's more
> going on, or you have your own recovery kthread (don't, reuse the one from
> the scheduler with your own work items, that's why you can provide that)
> this looks like an incomplete/incorrect explanation ... ?
>
> Slightly confused

msm still uses it's own recovery, which pre-dates the scheduler
conversion.  At one point (a yr or two back?) I started looking at
integrating recovery w/ scheduler.. at the time I think you talked me
out of it, but I don't remember the reason

BR,
-R

> -Sima
>
> >
> > I'll need to come up with a different scheme for appeasing lockdep.
> >
> > Signed-off-by: Rob Clark <robdclark@chromium.org>
> > ---
> >  drivers/gpu/drm/msm/msm_gpu.c        | 11 +++++------
> >  drivers/gpu/drm/msm/msm_ringbuffer.c |  7 +++++--
> >  2 files changed, 10 insertions(+), 8 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
> > index 095390774f22..655002b21b0d 100644
> > --- a/drivers/gpu/drm/msm/msm_gpu.c
> > +++ b/drivers/gpu/drm/msm/msm_gpu.c
> > @@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
> >       struct msm_ringbuffer *ring = submit->ring;
> >       unsigned long flags;
> >
> > -     pm_runtime_get_sync(&gpu->pdev->dev);
> > +     WARN_ON(!mutex_is_locked(&gpu->lock));
> >
> > -     mutex_lock(&gpu->lock);
> > +     pm_runtime_get_sync(&gpu->pdev->dev);
> >
> >       msm_gpu_hw_init(gpu);
> >
> > +     submit->seqno = submit->hw_fence->seqno;
> > +
> >       update_sw_cntrs(gpu);
> >
> >       /*
> > @@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
> >       gpu->funcs->submit(gpu, submit);
> >       gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
> >
> > -     hangcheck_timer_reset(gpu);
> > -
> > -     mutex_unlock(&gpu->lock);
> > -
> >       pm_runtime_put(&gpu->pdev->dev);
> > +     hangcheck_timer_reset(gpu);
> >  }
> >
> >  /*
> > diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
> > index e0ed27739449..548f5266a7d3 100644
> > --- a/drivers/gpu/drm/msm/msm_ringbuffer.c
> > +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
> > @@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
> >
> >       msm_fence_init(submit->hw_fence, fctx);
> >
> > -     submit->seqno = submit->hw_fence->seqno;
> > -
> >       mutex_lock(&priv->lru.lock);
> >
> >       for (i = 0; i < submit->nr_bos; i++) {
> > @@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
> >
> >       mutex_unlock(&priv->lru.lock);
> >
> > +     /* TODO move submit path over to using a per-ring lock.. */
> > +     mutex_lock(&gpu->lock);
> > +
> >       msm_gpu_submit(gpu, submit);
> >
> > +     mutex_unlock(&gpu->lock);
> > +
> >       return dma_fence_get(submit->hw_fence);
> >  }
> >
> > --
> > 2.43.0
> >
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
Daniel Vetter Jan. 11, 2024, 2:19 p.m. UTC | #3
On Wed, Jan 10, 2024 at 06:54:53AM -0800, Rob Clark wrote:
> On Wed, Jan 10, 2024 at 2:50 AM Daniel Vetter <daniel@ffwll.ch> wrote:
> >
> > On Tue, Jan 09, 2024 at 10:22:17AM -0800, Rob Clark wrote:
> > > From: Rob Clark <robdclark@chromium.org>
> > >
> > > This reverts commit abe2023b4cea192ab266b351fd38dc9dbd846df0.
> > >
> > > Changing the locking order means that scheduler/msm_job_run() can race
> > > with the recovery kthread worker, with the result that the GPU gets an
> > > extra runpm get when we are trying to power it off.  Leaving the GPU in
> > > an unrecovered state.
> >
> > The recovery kthread is supposed to stop all the relevant schedulers,
> > which should remove any possible race conditions. So unless there's more
> > going on, or you have your own recovery kthread (don't, reuse the one from
> > the scheduler with your own work items, that's why you can provide that)
> > this looks like an incomplete/incorrect explanation ... ?
> >
> > Slightly confused
> 
> msm still uses it's own recovery, which pre-dates the scheduler
> conversion.  At one point (a yr or two back?) I started looking at
> integrating recovery w/ scheduler.. at the time I think you talked me
> out of it, but I don't remember the reason

hm ... most scheduler discussions I remember was around the "allocate your
own workqueue and hand that to scheduler to avoid races/deadlocks". Which
iirc Boris implemented a while ago. Once you have that workqueue you can
then also process any other error condition on there with the exact same
locking design (like hw error or page faults or whatever), not just
drm/sched tdr.

I don't remember anything else that ever came up at least at a fundamental
level ...

So if that discussion was older than 78efe21b6f8e ("drm/sched: Allow using
a dedicated workqueue for the timeout/fault tdr") you should be covered.
Fingers crossed :-)

Meanwhile if you do not use drm/sched tdr at all then doing the exact same
design but just on your own workqueue should also work. The critical thing
is really only:
- have one single-thread workqueue for all gpu recover
- bracket each handler in there with drm_sched_stop/start for all affected
  engines

No more races!

Cheers, Sima

> 
> BR,
> -R
> 
> > -Sima
> >
> > >
> > > I'll need to come up with a different scheme for appeasing lockdep.
> > >
> > > Signed-off-by: Rob Clark <robdclark@chromium.org>
> > > ---
> > >  drivers/gpu/drm/msm/msm_gpu.c        | 11 +++++------
> > >  drivers/gpu/drm/msm/msm_ringbuffer.c |  7 +++++--
> > >  2 files changed, 10 insertions(+), 8 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
> > > index 095390774f22..655002b21b0d 100644
> > > --- a/drivers/gpu/drm/msm/msm_gpu.c
> > > +++ b/drivers/gpu/drm/msm/msm_gpu.c
> > > @@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
> > >       struct msm_ringbuffer *ring = submit->ring;
> > >       unsigned long flags;
> > >
> > > -     pm_runtime_get_sync(&gpu->pdev->dev);
> > > +     WARN_ON(!mutex_is_locked(&gpu->lock));
> > >
> > > -     mutex_lock(&gpu->lock);
> > > +     pm_runtime_get_sync(&gpu->pdev->dev);
> > >
> > >       msm_gpu_hw_init(gpu);
> > >
> > > +     submit->seqno = submit->hw_fence->seqno;
> > > +
> > >       update_sw_cntrs(gpu);
> > >
> > >       /*
> > > @@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
> > >       gpu->funcs->submit(gpu, submit);
> > >       gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
> > >
> > > -     hangcheck_timer_reset(gpu);
> > > -
> > > -     mutex_unlock(&gpu->lock);
> > > -
> > >       pm_runtime_put(&gpu->pdev->dev);
> > > +     hangcheck_timer_reset(gpu);
> > >  }
> > >
> > >  /*
> > > diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
> > > index e0ed27739449..548f5266a7d3 100644
> > > --- a/drivers/gpu/drm/msm/msm_ringbuffer.c
> > > +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
> > > @@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
> > >
> > >       msm_fence_init(submit->hw_fence, fctx);
> > >
> > > -     submit->seqno = submit->hw_fence->seqno;
> > > -
> > >       mutex_lock(&priv->lru.lock);
> > >
> > >       for (i = 0; i < submit->nr_bos; i++) {
> > > @@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
> > >
> > >       mutex_unlock(&priv->lru.lock);
> > >
> > > +     /* TODO move submit path over to using a per-ring lock.. */
> > > +     mutex_lock(&gpu->lock);
> > > +
> > >       msm_gpu_submit(gpu, submit);
> > >
> > > +     mutex_unlock(&gpu->lock);
> > > +
> > >       return dma_fence_get(submit->hw_fence);
> > >  }
> > >
> > > --
> > > 2.43.0
> > >
> >
> > --
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > http://blog.ffwll.ch
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 095390774f22..655002b21b0d 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -751,12 +751,14 @@  void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 	struct msm_ringbuffer *ring = submit->ring;
 	unsigned long flags;
 
-	pm_runtime_get_sync(&gpu->pdev->dev);
+	WARN_ON(!mutex_is_locked(&gpu->lock));
 
-	mutex_lock(&gpu->lock);
+	pm_runtime_get_sync(&gpu->pdev->dev);
 
 	msm_gpu_hw_init(gpu);
 
+	submit->seqno = submit->hw_fence->seqno;
+
 	update_sw_cntrs(gpu);
 
 	/*
@@ -781,11 +783,8 @@  void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 	gpu->funcs->submit(gpu, submit);
 	gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
 
-	hangcheck_timer_reset(gpu);
-
-	mutex_unlock(&gpu->lock);
-
 	pm_runtime_put(&gpu->pdev->dev);
+	hangcheck_timer_reset(gpu);
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index e0ed27739449..548f5266a7d3 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -21,8 +21,6 @@  static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 
 	msm_fence_init(submit->hw_fence, fctx);
 
-	submit->seqno = submit->hw_fence->seqno;
-
 	mutex_lock(&priv->lru.lock);
 
 	for (i = 0; i < submit->nr_bos; i++) {
@@ -35,8 +33,13 @@  static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 
 	mutex_unlock(&priv->lru.lock);
 
+	/* TODO move submit path over to using a per-ring lock.. */
+	mutex_lock(&gpu->lock);
+
 	msm_gpu_submit(gpu, submit);
 
+	mutex_unlock(&gpu->lock);
+
 	return dma_fence_get(submit->hw_fence);
 }