diff mbox series

[2/2] drm/msm: subclass work object for vblank events

Message ID 1541031545-20520-2-git-send-email-jsanka@codeaurora.org (mailing list archive)
State New, archived
Headers show
Series [1/2] drm/msm: use common display thread for dispatching vblank events | expand

Commit Message

Jeykumar Sankaran Nov. 1, 2018, 12:19 a.m. UTC
msm maintains a separate structure to define vblank
work definitions and a list to track events submitted
to the display worker thread. We can avoid these
redundant list and its protection mechanism, if we
subclass the work object to encapsulate vblank
event parameters.

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_drv.c | 70 ++++++++++++-------------------------------
 drivers/gpu/drm/msm/msm_drv.h |  7 -----
 2 files changed, 19 insertions(+), 58 deletions(-)

Comments

Sean Paul Nov. 1, 2018, 7:18 p.m. UTC | #1
On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
> msm maintains a separate structure to define vblank
> work definitions and a list to track events submitted
> to the display worker thread. We can avoid these
> redundant list and its protection mechanism, if we
> subclass the work object to encapsulate vblank
> event parameters.
> 
> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/msm_drv.c | 70 ++++++++++++-------------------------------
>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>  2 files changed, 19 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 1f384b3..67a96ee 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>  	return val;
>  }
>  
> -struct vblank_event {
> -	struct list_head node;
> +struct msm_vblank_work {
> +	struct kthread_work work;
>  	int crtc_id;
>  	bool enable;
> +	struct msm_drm_private *priv;
>  };
>  
>  static void vblank_ctrl_worker(struct kthread_work *work)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> -						struct msm_vblank_ctrl, work);
> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> -					struct msm_drm_private, vblank_ctrl);
> +	struct msm_vblank_work *vbl_work = container_of(work,
> +						struct msm_vblank_work, work);
> +	struct msm_drm_private *priv = vbl_work->priv;
>  	struct msm_kms *kms = priv->kms;
> -	struct vblank_event *vbl_ev, *tmp;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> -
> -		if (vbl_ev->enable)
> -			kms->funcs->enable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
> -		else
> -			kms->funcs->disable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
>  
> -		kfree(vbl_ev);
> -
> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	}
> +	if (vbl_work->enable)
> +		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
> +	else
> +		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
>  
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	kfree(vbl_work);
>  }
>  
>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>  					int crtc_id, bool enable)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev;
> -	unsigned long flags;
> +	struct msm_vblank_work *vbl_work;
>  
> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> -	if (!vbl_ev)
> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> +	if (!vbl_work)
>  		return -ENOMEM;
>  
> -	vbl_ev->crtc_id = crtc_id;
> -	vbl_ev->enable = enable;
> +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
>  
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	vbl_work->crtc_id = crtc_id;
> +	vbl_work->enable = enable;
> +	vbl_work->priv = priv;
>  
> -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);

So I think this can get even more simplified. In the short term, you can just
use the systemwq to do the enable and disable.

In the long term, the enable_vblank/disable_vblank functions should be
optimized so they don't sleep. I took a quick look at them perhaps this is
all because of the crtc_lock mutex? That lock seems a bit suspicious to me,
especially being dropped around the pm_runtime calls in
_dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on the modeset
locks for some of these functions, and perhaps convert it to a spinlock if we
can't get rid of it entirely.

Sean

>  
>  	return 0;
>  }
> @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
>  	struct msm_drm_private *priv = ddev->dev_private;
>  	struct msm_kms *kms = priv->kms;
>  	struct msm_mdss *mdss = priv->mdss;
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev, *tmp;
>  	int i;
>  
> -	/* We must cancel and cleanup any pending vblank enable/disable
> -	 * work before drm_irq_uninstall() to avoid work re-enabling an
> -	 * irq after uninstall has disabled it.
> -	 */
> -	kthread_flush_work(&vbl_ctrl->work);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		kfree(vbl_ev);
> -	}
> -
>  	kthread_flush_worker(&priv->disp_thread.worker);
>  	kthread_stop(priv->disp_thread.thread);
>  	priv->disp_thread.thread = NULL;
> @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>  
>  	INIT_LIST_HEAD(&priv->inactive_list);
> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> -	spin_lock_init(&priv->vblank_ctrl.lock);
>  
>  	drm_mode_config_init(ddev);
>  
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index e81b1fa..b91e306 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>  	PLANE_PROP_MAX_NUM
>  };
>  
> -struct msm_vblank_ctrl {
> -	struct kthread_work work;
> -	struct list_head event_list;
> -	spinlock_t lock;
> -};
> -
>  #define MSM_GPU_MAX_RINGS 4
>  #define MAX_H_TILES_PER_DISPLAY 2
>  
> @@ -226,7 +220,6 @@ struct msm_drm_private {
>  	struct notifier_block vmap_notifier;
>  	struct shrinker shrinker;
>  
> -	struct msm_vblank_ctrl vblank_ctrl;
>  	struct drm_atomic_state *pm_state;
>  };
>  
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> a Linux Foundation Collaborative Project
> 
> _______________________________________________
> Freedreno mailing list
> Freedreno@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno
Jeykumar Sankaran Nov. 2, 2018, 11:38 p.m. UTC | #2
On 2018-11-01 12:18, Sean Paul wrote:
> On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
>> msm maintains a separate structure to define vblank
>> work definitions and a list to track events submitted
>> to the display worker thread. We can avoid these
>> redundant list and its protection mechanism, if we
>> subclass the work object to encapsulate vblank
>> event parameters.
>> 
>> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> ---
>>  drivers/gpu/drm/msm/msm_drv.c | 70
> ++++++++++++-------------------------------
>>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>>  2 files changed, 19 insertions(+), 58 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
> b/drivers/gpu/drm/msm/msm_drv.c
>> index 1f384b3..67a96ee 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.c
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>>  	return val;
>>  }
>> 
>> -struct vblank_event {
>> -	struct list_head node;
>> +struct msm_vblank_work {
>> +	struct kthread_work work;
>>  	int crtc_id;
>>  	bool enable;
>> +	struct msm_drm_private *priv;
>>  };
>> 
>>  static void vblank_ctrl_worker(struct kthread_work *work)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>> -						struct msm_vblank_ctrl,
> work);
>> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>> -					struct msm_drm_private,
> vblank_ctrl);
>> +	struct msm_vblank_work *vbl_work = container_of(work,
>> +						struct msm_vblank_work,
> work);
>> +	struct msm_drm_private *priv = vbl_work->priv;
>>  	struct msm_kms *kms = priv->kms;
>> -	struct vblank_event *vbl_ev, *tmp;
>> -	unsigned long flags;
>> -
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> -
>> -		if (vbl_ev->enable)
>> -			kms->funcs->enable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> -		else
>> -			kms->funcs->disable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> 
>> -		kfree(vbl_ev);
>> -
>> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	}
>> +	if (vbl_work->enable)
>> +		kms->funcs->enable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> +	else
>> +		kms->funcs->disable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> 
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	kfree(vbl_work);
>>  }
>> 
>>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>>  					int crtc_id, bool enable)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev;
>> -	unsigned long flags;
>> +	struct msm_vblank_work *vbl_work;
>> 
>> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> -	if (!vbl_ev)
>> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> +	if (!vbl_work)
>>  		return -ENOMEM;
>> 
>> -	vbl_ev->crtc_id = crtc_id;
>> -	vbl_ev->enable = enable;
>> +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
>> 
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	vbl_work->crtc_id = crtc_id;
>> +	vbl_work->enable = enable;
>> +	vbl_work->priv = priv;
>> 
>> -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
> 
> So I think this can get even more simplified. In the short term, you 
> can
> just
> use the systemwq to do the enable and disable.

you mean priv->wq?

> 
> In the long term, the enable_vblank/disable_vblank functions should be
> optimized so they don't sleep. I took a quick look at them perhaps this 
> is
> all because of the crtc_lock mutex? That lock seems a bit suspicious to
> me,
> especially being dropped around the pm_runtime calls in
> _dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on 
> the
> modeset
> locks for some of these functions, and perhaps convert it to a spinlock 
> if
> we
> can't get rid of it entirely.

crtc_lock has a history of usage in the downstream driver. It was 
introduced to protect
vblank variables when vblank requests were handled in the user thread
(not the display thread). When event threads were introduced to receive
encoder events, the lock was further expanded to protect few more vars. 
It was
also needed to synchronize CRTC accesses between debugfs dump calls
and display thread.

Would like to deal with this cleanup bit later once we lose these extra 
threads.

Thanks and Regards,
Jeykumar S.

> 
> Sean
> 
>> 
>>  	return 0;
>>  }
>> @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
>>  	struct msm_drm_private *priv = ddev->dev_private;
>>  	struct msm_kms *kms = priv->kms;
>>  	struct msm_mdss *mdss = priv->mdss;
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev, *tmp;
>>  	int i;
>> 
>> -	/* We must cancel and cleanup any pending vblank enable/disable
>> -	 * work before drm_irq_uninstall() to avoid work re-enabling an
>> -	 * irq after uninstall has disabled it.
>> -	 */
>> -	kthread_flush_work(&vbl_ctrl->work);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		kfree(vbl_ev);
>> -	}
>> -
>>  	kthread_flush_worker(&priv->disp_thread.worker);
>>  	kthread_stop(priv->disp_thread.thread);
>>  	priv->disp_thread.thread = NULL;
>> @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct
> drm_driver *drv)
>>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> 
>>  	INIT_LIST_HEAD(&priv->inactive_list);
>> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> -	spin_lock_init(&priv->vblank_ctrl.lock);
>> 
>>  	drm_mode_config_init(ddev);
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
> b/drivers/gpu/drm/msm/msm_drv.h
>> index e81b1fa..b91e306 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.h
>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>>  	PLANE_PROP_MAX_NUM
>>  };
>> 
>> -struct msm_vblank_ctrl {
>> -	struct kthread_work work;
>> -	struct list_head event_list;
>> -	spinlock_t lock;
>> -};
>> -
>>  #define MSM_GPU_MAX_RINGS 4
>>  #define MAX_H_TILES_PER_DISPLAY 2
>> 
>> @@ -226,7 +220,6 @@ struct msm_drm_private {
>>  	struct notifier_block vmap_notifier;
>>  	struct shrinker shrinker;
>> 
>> -	struct msm_vblank_ctrl vblank_ctrl;
>>  	struct drm_atomic_state *pm_state;
>>  };
>> 
>> --
>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> Forum,
>> a Linux Foundation Collaborative Project
>> 
>> _______________________________________________
>> Freedreno mailing list
>> Freedreno@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/freedreno
Sean Paul Nov. 5, 2018, 5:24 p.m. UTC | #3
On Fri, Nov 02, 2018 at 04:38:48PM -0700, Jeykumar Sankaran wrote:
> On 2018-11-01 12:18, Sean Paul wrote:
> > On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
> > > msm maintains a separate structure to define vblank
> > > work definitions and a list to track events submitted
> > > to the display worker thread. We can avoid these
> > > redundant list and its protection mechanism, if we
> > > subclass the work object to encapsulate vblank
> > > event parameters.
> > > 
> > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> > > ---
> > >  drivers/gpu/drm/msm/msm_drv.c | 70
> > ++++++++++++-------------------------------
> > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
> > >  2 files changed, 19 insertions(+), 58 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > b/drivers/gpu/drm/msm/msm_drv.c
> > > index 1f384b3..67a96ee 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > +++ b/drivers/gpu/drm/msm/msm_drv.c

/snip

> > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> > >  					int crtc_id, bool enable)
> > >  {
> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > -	struct vblank_event *vbl_ev;
> > > -	unsigned long flags;
> > > +	struct msm_vblank_work *vbl_work;
> > > 
> > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > -	if (!vbl_ev)
> > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> > > +	if (!vbl_work)
> > >  		return -ENOMEM;
> > > 
> > > -	vbl_ev->crtc_id = crtc_id;
> > > -	vbl_ev->enable = enable;
> > > +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
> > > 
> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > +	vbl_work->crtc_id = crtc_id;
> > > +	vbl_work->enable = enable;
> > > +	vbl_work->priv = priv;
> > > 
> > > -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
> > > +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
> > 
> > So I think this can get even more simplified. In the short term, you can
> > just
> > use the systemwq to do the enable and disable.
> 
> you mean priv->wq?
> 

I meant the system workqueue, we probably don't need our own for this.


> > 
> > In the long term, the enable_vblank/disable_vblank functions should be
> > optimized so they don't sleep. I took a quick look at them perhaps this
> > is
> > all because of the crtc_lock mutex? That lock seems a bit suspicious to
> > me,
> > especially being dropped around the pm_runtime calls in
> > _dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on the
> > modeset
> > locks for some of these functions, and perhaps convert it to a spinlock
> > if
> > we
> > can't get rid of it entirely.
> 
> crtc_lock has a history of usage in the downstream driver. It was introduced
> to protect
> vblank variables when vblank requests were handled in the user thread
> (not the display thread). When event threads were introduced to receive
> encoder events, the lock was further expanded to protect few more vars. It
> was
> also needed to synchronize CRTC accesses between debugfs dump calls
> and display thread.

The debugfs case can be solved pretty easily by using the modeset locks. I
haven't looked closely at the event threads, could we convert crtc_lock to a
spinlock and then make vblank enable/disable synchronous?

Sean

> 
> Would like to deal with this cleanup bit later once we lose these extra
> threads.
> 
> Thanks and Regards,
> Jeykumar S.
> 
> > 
> > Sean
> > 
> > > 
> > >  	return 0;
> > >  }
> > > @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
> > >  	struct msm_drm_private *priv = ddev->dev_private;
> > >  	struct msm_kms *kms = priv->kms;
> > >  	struct msm_mdss *mdss = priv->mdss;
> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > -	struct vblank_event *vbl_ev, *tmp;
> > >  	int i;
> > > 
> > > -	/* We must cancel and cleanup any pending vblank enable/disable
> > > -	 * work before drm_irq_uninstall() to avoid work re-enabling an
> > > -	 * irq after uninstall has disabled it.
> > > -	 */
> > > -	kthread_flush_work(&vbl_ctrl->work);
> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> > {
> > > -		list_del(&vbl_ev->node);
> > > -		kfree(vbl_ev);
> > > -	}
> > > -
> > >  	kthread_flush_worker(&priv->disp_thread.worker);
> > >  	kthread_stop(priv->disp_thread.thread);
> > >  	priv->disp_thread.thread = NULL;
> > > @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct
> > drm_driver *drv)
> > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
> > > 
> > >  	INIT_LIST_HEAD(&priv->inactive_list);
> > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> > > -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> > > -	spin_lock_init(&priv->vblank_ctrl.lock);
> > > 
> > >  	drm_mode_config_init(ddev);
> > > 
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > b/drivers/gpu/drm/msm/msm_drv.h
> > > index e81b1fa..b91e306 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
> > >  	PLANE_PROP_MAX_NUM
> > >  };
> > > 
> > > -struct msm_vblank_ctrl {
> > > -	struct kthread_work work;
> > > -	struct list_head event_list;
> > > -	spinlock_t lock;
> > > -};
> > > -
> > >  #define MSM_GPU_MAX_RINGS 4
> > >  #define MAX_H_TILES_PER_DISPLAY 2
> > > 
> > > @@ -226,7 +220,6 @@ struct msm_drm_private {
> > >  	struct notifier_block vmap_notifier;
> > >  	struct shrinker shrinker;
> > > 
> > > -	struct msm_vblank_ctrl vblank_ctrl;
> > >  	struct drm_atomic_state *pm_state;
> > >  };
> > > 
> > > --
> > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> > Forum,
> > > a Linux Foundation Collaborative Project
> > > 
> > > _______________________________________________
> > > Freedreno mailing list
> > > Freedreno@lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/freedreno
> 
> -- 
> Jeykumar S
Jeykumar Sankaran Nov. 5, 2018, 9:23 p.m. UTC | #4
On 2018-11-05 09:24, Sean Paul wrote:
> On Fri, Nov 02, 2018 at 04:38:48PM -0700, Jeykumar Sankaran wrote:
>> On 2018-11-01 12:18, Sean Paul wrote:
>> > On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
>> > > msm maintains a separate structure to define vblank
>> > > work definitions and a list to track events submitted
>> > > to the display worker thread. We can avoid these
>> > > redundant list and its protection mechanism, if we
>> > > subclass the work object to encapsulate vblank
>> > > event parameters.
>> > >
>> > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> > > ---
>> > >  drivers/gpu/drm/msm/msm_drv.c | 70
>> > ++++++++++++-------------------------------
>> > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>> > >  2 files changed, 19 insertions(+), 58 deletions(-)
>> > >
>> > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > b/drivers/gpu/drm/msm/msm_drv.c
>> > > index 1f384b3..67a96ee 100644
>> > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> 
> /snip
> 
>> > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>> > >  					int crtc_id, bool enable)
>> > >  {
>> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > -	struct vblank_event *vbl_ev;
>> > > -	unsigned long flags;
>> > > +	struct msm_vblank_work *vbl_work;
>> > >
>> > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > -	if (!vbl_ev)
>> > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> > > +	if (!vbl_work)
>> > >  		return -ENOMEM;
>> > >
>> > > -	vbl_ev->crtc_id = crtc_id;
>> > > -	vbl_ev->enable = enable;
>> > > +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
>> > >
>> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > +	vbl_work->crtc_id = crtc_id;
>> > > +	vbl_work->enable = enable;
>> > > +	vbl_work->priv = priv;
>> > >
>> > > -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>> > > +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
>> >
>> > So I think this can get even more simplified. In the short term, you
> can
>> > just
>> > use the systemwq to do the enable and disable.
>> 
>> you mean priv->wq?
>> 
> 
> I meant the system workqueue, we probably don't need our own for this.
> 
> 
>> >
>> > In the long term, the enable_vblank/disable_vblank functions should be
>> > optimized so they don't sleep. I took a quick look at them perhaps
> this
>> > is
>> > all because of the crtc_lock mutex? That lock seems a bit suspicious
> to
>> > me,
>> > especially being dropped around the pm_runtime calls in
>> > _dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on
> the
>> > modeset
>> > locks for some of these functions, and perhaps convert it to a
> spinlock
>> > if
>> > we
>> > can't get rid of it entirely.
>> 
>> crtc_lock has a history of usage in the downstream driver. It was
> introduced
>> to protect
>> vblank variables when vblank requests were handled in the user thread
>> (not the display thread). When event threads were introduced to 
>> receive
>> encoder events, the lock was further expanded to protect few more 
>> vars.
> It
>> was
>> also needed to synchronize CRTC accesses between debugfs dump calls
>> and display thread.
> 
> The debugfs case can be solved pretty easily by using the modeset 
> locks. I
> haven't looked closely at the event threads, could we convert crtc_lock 
> to
> a
> spinlock and then make vblank enable/disable synchronous?
Did a little digging into the reason why vblank enable/disable was made 
asynchronous
in the first place. Looks like Rob was also using priv->wq to queue 
vblank requests
before display threads were introduced by the DPU driver.

The only reason I can think of was to support smart panels, where we 
wait for
CTL_START interrupt instead of PING_PONG_DONE which is needed for fence
releases. Need to confirm with Rob for MDP5 behaviour before switcing to 
sync.

For now, I submit a patch to use system wq.
> 
> Sean
> 
>> 
>> Would like to deal with this cleanup bit later once we lose these 
>> extra
>> threads.
>> 
>> Thanks and Regards,
>> Jeykumar S.
>> 
>> >
>> > Sean
>> >
>> > >
>> > >  	return 0;
>> > >  }
>> > > @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
>> > >  	struct msm_drm_private *priv = ddev->dev_private;
>> > >  	struct msm_kms *kms = priv->kms;
>> > >  	struct msm_mdss *mdss = priv->mdss;
>> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > -	struct vblank_event *vbl_ev, *tmp;
>> > >  	int i;
>> > >
>> > > -	/* We must cancel and cleanup any pending vblank enable/disable
>> > > -	 * work before drm_irq_uninstall() to avoid work re-enabling an
>> > > -	 * irq after uninstall has disabled it.
>> > > -	 */
>> > > -	kthread_flush_work(&vbl_ctrl->work);
>> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> > {
>> > > -		list_del(&vbl_ev->node);
>> > > -		kfree(vbl_ev);
>> > > -	}
>> > > -
>> > >  	kthread_flush_worker(&priv->disp_thread.worker);
>> > >  	kthread_stop(priv->disp_thread.thread);
>> > >  	priv->disp_thread.thread = NULL;
>> > > @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev,
> struct
>> > drm_driver *drv)
>> > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> > >
>> > >  	INIT_LIST_HEAD(&priv->inactive_list);
>> > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> > > -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> > > -	spin_lock_init(&priv->vblank_ctrl.lock);
>> > >
>> > >  	drm_mode_config_init(ddev);
>> > >
>> > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> > b/drivers/gpu/drm/msm/msm_drv.h
>> > > index e81b1fa..b91e306 100644
>> > > --- a/drivers/gpu/drm/msm/msm_drv.h
>> > > +++ b/drivers/gpu/drm/msm/msm_drv.h
>> > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>> > >  	PLANE_PROP_MAX_NUM
>> > >  };
>> > >
>> > > -struct msm_vblank_ctrl {
>> > > -	struct kthread_work work;
>> > > -	struct list_head event_list;
>> > > -	spinlock_t lock;
>> > > -};
>> > > -
>> > >  #define MSM_GPU_MAX_RINGS 4
>> > >  #define MAX_H_TILES_PER_DISPLAY 2
>> > >
>> > > @@ -226,7 +220,6 @@ struct msm_drm_private {
>> > >  	struct notifier_block vmap_notifier;
>> > >  	struct shrinker shrinker;
>> > >
>> > > -	struct msm_vblank_ctrl vblank_ctrl;
>> > >  	struct drm_atomic_state *pm_state;
>> > >  };
>> > >
>> > > --
>> > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
>> > Forum,
>> > > a Linux Foundation Collaborative Project
>> > >
>> > > _______________________________________________
>> > > Freedreno mailing list
>> > > Freedreno@lists.freedesktop.org
>> > > https://lists.freedesktop.org/mailman/listinfo/freedreno
>> 
>> --
>> Jeykumar S
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 1f384b3..67a96ee 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -203,61 +203,44 @@  u32 msm_readl(const void __iomem *addr)
 	return val;
 }
 
-struct vblank_event {
-	struct list_head node;
+struct msm_vblank_work {
+	struct kthread_work work;
 	int crtc_id;
 	bool enable;
+	struct msm_drm_private *priv;
 };
 
 static void vblank_ctrl_worker(struct kthread_work *work)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
-						struct msm_vblank_ctrl, work);
-	struct msm_drm_private *priv = container_of(vbl_ctrl,
-					struct msm_drm_private, vblank_ctrl);
+	struct msm_vblank_work *vbl_work = container_of(work,
+						struct msm_vblank_work, work);
+	struct msm_drm_private *priv = vbl_work->priv;
 	struct msm_kms *kms = priv->kms;
-	struct vblank_event *vbl_ev, *tmp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
-
-		if (vbl_ev->enable)
-			kms->funcs->enable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
-		else
-			kms->funcs->disable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
 
-		kfree(vbl_ev);
-
-		spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	}
+	if (vbl_work->enable)
+		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+	else
+		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
 
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+	kfree(vbl_work);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 					int crtc_id, bool enable)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev;
-	unsigned long flags;
+	struct msm_vblank_work *vbl_work;
 
-	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
-	if (!vbl_ev)
+	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+	if (!vbl_work)
 		return -ENOMEM;
 
-	vbl_ev->crtc_id = crtc_id;
-	vbl_ev->enable = enable;
+	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
 
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+	vbl_work->crtc_id = crtc_id;
+	vbl_work->enable = enable;
+	vbl_work->priv = priv;
 
-	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
+	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
 
 	return 0;
 }
@@ -269,20 +252,8 @@  static int msm_drm_uninit(struct device *dev)
 	struct msm_drm_private *priv = ddev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_mdss *mdss = priv->mdss;
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev, *tmp;
 	int i;
 
-	/* We must cancel and cleanup any pending vblank enable/disable
-	 * work before drm_irq_uninstall() to avoid work re-enabling an
-	 * irq after uninstall has disabled it.
-	 */
-	kthread_flush_work(&vbl_ctrl->work);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		kfree(vbl_ev);
-	}
-
 	kthread_flush_worker(&priv->disp_thread.worker);
 	kthread_stop(priv->disp_thread.thread);
 	priv->disp_thread.thread = NULL;
@@ -474,9 +445,6 @@  static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	priv->wq = alloc_ordered_workqueue("msm", 0);
 
 	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
-	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(ddev);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e81b1fa..b91e306 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -77,12 +77,6 @@  enum msm_mdp_plane_property {
 	PLANE_PROP_MAX_NUM
 };
 
-struct msm_vblank_ctrl {
-	struct kthread_work work;
-	struct list_head event_list;
-	spinlock_t lock;
-};
-
 #define MSM_GPU_MAX_RINGS 4
 #define MAX_H_TILES_PER_DISPLAY 2
 
@@ -226,7 +220,6 @@  struct msm_drm_private {
 	struct notifier_block vmap_notifier;
 	struct shrinker shrinker;
 
-	struct msm_vblank_ctrl vblank_ctrl;
 	struct drm_atomic_state *pm_state;
 };