diff mbox

[1/3,v2] drm/v3d: Take a lock across GPU scheduler job creation and queuing.

Message ID 20180606174851.12433-1-eric@anholt.net (mailing list archive)
State New, archived
Headers show

Commit Message

Eric Anholt June 6, 2018, 5:48 p.m. UTC
Between creation and queueing of a job, you need to prevent any other
job from being created and queued.  Otherwise the scheduler's fences
may be signaled out of seqno order.

v2: move mutex unlock to the error label.

Signed-off-by: Eric Anholt <eric@anholt.net>
Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+")
---
 drivers/gpu/drm/v3d/v3d_drv.h | 5 +++++
 drivers/gpu/drm/v3d/v3d_gem.c | 4 ++++
 2 files changed, 9 insertions(+)

Comments

Lucas Stach June 7, 2018, 8:37 a.m. UTC | #1
Am Mittwoch, den 06.06.2018, 10:48 -0700 schrieb Eric Anholt:
> Between creation and queueing of a job, you need to prevent any other
> job from being created and queued.  Otherwise the scheduler's fences
> may be signaled out of seqno order.
> 
> v2: move mutex unlock to the error label.
> 
> > Signed-off-by: Eric Anholt <eric@anholt.net>
> Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+")

Reviewed-by: Lucas Stach <l.stach@pengutronix.de>

> ---
>  drivers/gpu/drm/v3d/v3d_drv.h | 5 +++++
>  drivers/gpu/drm/v3d/v3d_gem.c | 4 ++++
>  2 files changed, 9 insertions(+)
> 
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
> index a043ac3aae98..26005abd9c5d 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.h
> +++ b/drivers/gpu/drm/v3d/v3d_drv.h
> @@ -85,6 +85,11 @@ struct v3d_dev {
> >  	 */
> >  	struct mutex reset_lock;
>  
> > +	/* Lock taken when creating and pushing the GPU scheduler
> > +	 * jobs, to keep the sched-fence seqnos in order.
> > +	 */
> > +	struct mutex sched_lock;
> +
> >  	struct {
> >  		u32 num_allocated;
> >  		u32 pages_allocated;
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index b513f9189caf..269fe16379c0 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -550,6 +550,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	if (ret)
> >  		goto fail;
>  
> > +	mutex_lock(&v3d->sched_lock);
> >  	if (exec->bin.start != exec->bin.end) {
> >  		ret = drm_sched_job_init(&exec->bin.base,
> >  					 &v3d->queue[V3D_BIN].sched,
> @@ -576,6 +577,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	kref_get(&exec->refcount); /* put by scheduler job completion */
> >  	drm_sched_entity_push_job(&exec->render.base,
> >  				  &v3d_priv->sched_entity[V3D_RENDER]);
> > +	mutex_unlock(&v3d->sched_lock);
>  
> >  	v3d_attach_object_fences(exec);
>  
> @@ -594,6 +596,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >  	return 0;
>  
>  fail_unreserve:
> > +	mutex_unlock(&v3d->sched_lock);
> >  	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
>  fail:
> >  	v3d_exec_put(exec);
> @@ -615,6 +618,7 @@ v3d_gem_init(struct drm_device *dev)
> >  	spin_lock_init(&v3d->job_lock);
> >  	mutex_init(&v3d->bo_lock);
> >  	mutex_init(&v3d->reset_lock);
> > +	mutex_init(&v3d->sched_lock);
>  
> >  	/* Note: We don't allocate address 0.  Various bits of HW
> >  	 * treat 0 as special, such as the occlusion query counters
diff mbox

Patch

diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a043ac3aae98..26005abd9c5d 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -85,6 +85,11 @@  struct v3d_dev {
 	 */
 	struct mutex reset_lock;
 
+	/* Lock taken when creating and pushing the GPU scheduler
+	 * jobs, to keep the sched-fence seqnos in order.
+	 */
+	struct mutex sched_lock;
+
 	struct {
 		u32 num_allocated;
 		u32 pages_allocated;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b513f9189caf..269fe16379c0 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,6 +550,7 @@  v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto fail;
 
+	mutex_lock(&v3d->sched_lock);
 	if (exec->bin.start != exec->bin.end) {
 		ret = drm_sched_job_init(&exec->bin.base,
 					 &v3d->queue[V3D_BIN].sched,
@@ -576,6 +577,7 @@  v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	kref_get(&exec->refcount); /* put by scheduler job completion */
 	drm_sched_entity_push_job(&exec->render.base,
 				  &v3d_priv->sched_entity[V3D_RENDER]);
+	mutex_unlock(&v3d->sched_lock);
 
 	v3d_attach_object_fences(exec);
 
@@ -594,6 +596,7 @@  v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	return 0;
 
 fail_unreserve:
+	mutex_unlock(&v3d->sched_lock);
 	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
 fail:
 	v3d_exec_put(exec);
@@ -615,6 +618,7 @@  v3d_gem_init(struct drm_device *dev)
 	spin_lock_init(&v3d->job_lock);
 	mutex_init(&v3d->bo_lock);
 	mutex_init(&v3d->reset_lock);
+	mutex_init(&v3d->sched_lock);
 
 	/* Note: We don't allocate address 0.  Various bits of HW
 	 * treat 0 as special, such as the occlusion query counters