diff mbox series

[3/4] drm/msm: Fix debugfs deadlock

Message ID 20210331221630.488498-4-robdclark@gmail.com (mailing list archive)
State Not Applicable, archived
Headers show
Series drm/msm: Shrinker (and related) fixes | expand

Commit Message

Rob Clark March 31, 2021, 10:16 p.m. UTC
From: Rob Clark <robdclark@chromium.org>

In normal cases the gem obj lock is acquired first before mm_lock.  The
exception is iterating the various object lists.  In the shrinker path,
deadlock is avoided by using msm_gem_trylock() and skipping over objects
that cannot be locked.  But for debugfs the straightforward thing is to
split things out into a separate list of all objects protected by it's
own lock.

Fixes: d984457b31c4 ("drm/msm: Add priv->mm_lock to protect active/inactive lists")
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/msm_debugfs.c | 14 +++-----------
 drivers/gpu/drm/msm/msm_drv.c     |  3 +++
 drivers/gpu/drm/msm/msm_drv.h     |  8 +++++++-
 drivers/gpu/drm/msm/msm_gem.c     | 14 +++++++++++++-
 drivers/gpu/drm/msm/msm_gem.h     | 13 ++++++++++---
 5 files changed, 36 insertions(+), 16 deletions(-)

Comments

Doug Anderson March 31, 2021, 11:13 p.m. UTC | #1
Hi,

On Wed, Mar 31, 2021 at 3:14 PM Rob Clark <robdclark@gmail.com> wrote:
>
> @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
>  static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
>  {
>         struct msm_drm_private *priv = dev->dev_private;
> -       struct msm_gpu *gpu = priv->gpu;
>         int ret;
>
> -       ret = mutex_lock_interruptible(&priv->mm_lock);
> +       ret = mutex_lock_interruptible(&priv->obj_lock);
>         if (ret)
>                 return ret;
>
> -       if (gpu) {
> -               seq_printf(m, "Active Objects (%s):\n", gpu->name);
> -               msm_gem_describe_objects(&gpu->active_list, m);
> -       }
> -
> -       seq_printf(m, "Inactive Objects:\n");
> -       msm_gem_describe_objects(&priv->inactive_dontneed, m);
> -       msm_gem_describe_objects(&priv->inactive_willneed, m);
> +       msm_gem_describe_objects(&priv->objects, m);

I guess we no longer sort the by Active and Inactive but that doesn't
really matter?


> @@ -174,7 +174,13 @@ struct msm_drm_private {
>         struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
>         struct msm_perf_state *perf;
>
> -       /*
> +       /**
> +        * List of all GEM objects (mainly for debugfs, protected by obj_lock

It wouldn't hurt to talk about lock ordering here? Like: "If we need
the "obj_lock" and a "gem_lock" at the same time we always grab the
"obj_lock" first.

> @@ -60,13 +60,20 @@ struct msm_gem_object {
>          */
>         uint8_t vmap_count;
>
> -       /* And object is either:
> -        *  inactive - on priv->inactive_list
> +       /**
> +        * Node in list of all objects (mainly for debugfs, protected by
> +        * struct_mutex

Not "struct_mutex" in comment, right? Maybe "obj_lock" I think?

-Doug
Rob Clark March 31, 2021, 11:31 p.m. UTC | #2
On Wed, Mar 31, 2021 at 4:13 PM Doug Anderson <dianders@chromium.org> wrote:
>
> Hi,
>
> On Wed, Mar 31, 2021 at 3:14 PM Rob Clark <robdclark@gmail.com> wrote:
> >
> > @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
> >  static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
> >  {
> >         struct msm_drm_private *priv = dev->dev_private;
> > -       struct msm_gpu *gpu = priv->gpu;
> >         int ret;
> >
> > -       ret = mutex_lock_interruptible(&priv->mm_lock);
> > +       ret = mutex_lock_interruptible(&priv->obj_lock);
> >         if (ret)
> >                 return ret;
> >
> > -       if (gpu) {
> > -               seq_printf(m, "Active Objects (%s):\n", gpu->name);
> > -               msm_gem_describe_objects(&gpu->active_list, m);
> > -       }
> > -
> > -       seq_printf(m, "Inactive Objects:\n");
> > -       msm_gem_describe_objects(&priv->inactive_dontneed, m);
> > -       msm_gem_describe_objects(&priv->inactive_willneed, m);
> > +       msm_gem_describe_objects(&priv->objects, m);
>
> I guess we no longer sort the by Active and Inactive but that doesn't
> really matter?

It turned out to be less useful to sort by active/inactive, as much as
just having the summary at the bottom that the last patch adds.  We
can already tell from the per-object entries whether it is
active/purgable/purged.

I did initially try to come up with an approach that let me keep this,
but it would basically amount to re-writing the gem_submit path
(because you cannot do any memory allocation under mm_lock)

>
> > @@ -174,7 +174,13 @@ struct msm_drm_private {
> >         struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
> >         struct msm_perf_state *perf;
> >
> > -       /*
> > +       /**
> > +        * List of all GEM objects (mainly for debugfs, protected by obj_lock
>
> It wouldn't hurt to talk about lock ordering here? Like: "If we need
> the "obj_lock" and a "gem_lock" at the same time we always grab the
> "obj_lock" first.

good point

>
> > @@ -60,13 +60,20 @@ struct msm_gem_object {
> >          */
> >         uint8_t vmap_count;
> >
> > -       /* And object is either:
> > -        *  inactive - on priv->inactive_list
> > +       /**
> > +        * Node in list of all objects (mainly for debugfs, protected by
> > +        * struct_mutex
>
> Not "struct_mutex" in comment, right? Maybe "obj_lock" I think?

oh, right, forgot to fix that from an earlier iteration

BR,
-R
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 85ad0babc326..d611cc8e54a4 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -111,23 +111,15 @@  static const struct file_operations msm_gpu_fops = {
 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_gpu *gpu = priv->gpu;
 	int ret;
 
-	ret = mutex_lock_interruptible(&priv->mm_lock);
+	ret = mutex_lock_interruptible(&priv->obj_lock);
 	if (ret)
 		return ret;
 
-	if (gpu) {
-		seq_printf(m, "Active Objects (%s):\n", gpu->name);
-		msm_gem_describe_objects(&gpu->active_list, m);
-	}
-
-	seq_printf(m, "Inactive Objects:\n");
-	msm_gem_describe_objects(&priv->inactive_dontneed, m);
-	msm_gem_describe_objects(&priv->inactive_willneed, m);
+	msm_gem_describe_objects(&priv->objects, m);
 
-	mutex_unlock(&priv->mm_lock);
+	mutex_unlock(&priv->obj_lock);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 3462b0ea14c6..1ef1cd0cc714 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -474,6 +474,9 @@  static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
 
 	priv->wq = alloc_ordered_workqueue("msm", 0);
 
+	INIT_LIST_HEAD(&priv->objects);
+	mutex_init(&priv->obj_lock);
+
 	INIT_LIST_HEAD(&priv->inactive_willneed);
 	INIT_LIST_HEAD(&priv->inactive_dontneed);
 	INIT_LIST_HEAD(&priv->inactive_purged);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 3ead5755f695..d69f4263bd4e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -174,7 +174,13 @@  struct msm_drm_private {
 	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
 	struct msm_perf_state *perf;
 
-	/*
+	/**
+	 * List of all GEM objects (mainly for debugfs, protected by obj_lock
+	 */
+	struct list_head objects;
+	struct mutex obj_lock;
+
+	/**
 	 * Lists of inactive GEM objects.  Every bo is either in one of the
 	 * inactive lists (depending on whether or not it is shrinkable) or
 	 * gpu->active_list (for the gpu it is active on[1])
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 74a92eedc992..c184ea68a6d0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -961,7 +961,7 @@  void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 	size_t size = 0;
 
 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
-	list_for_each_entry(msm_obj, list, mm_list) {
+	list_for_each_entry(msm_obj, list, node) {
 		struct drm_gem_object *obj = &msm_obj->base;
 		seq_puts(m, "   ");
 		msm_gem_describe(obj, m);
@@ -980,6 +980,10 @@  void msm_gem_free_object(struct drm_gem_object *obj)
 	struct drm_device *dev = obj->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 
+	mutex_lock(&priv->obj_lock);
+	list_del(&msm_obj->node);
+	mutex_unlock(&priv->obj_lock);
+
 	mutex_lock(&priv->mm_lock);
 	if (msm_obj->dontneed)
 		mark_unpurgable(msm_obj);
@@ -1170,6 +1174,10 @@  static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
 	mutex_unlock(&priv->mm_lock);
 
+	mutex_lock(&priv->obj_lock);
+	list_add_tail(&msm_obj->node, &priv->objects);
+	mutex_unlock(&priv->obj_lock);
+
 	return obj;
 
 fail:
@@ -1240,6 +1248,10 @@  struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
 	mutex_unlock(&priv->mm_lock);
 
+	mutex_lock(&priv->obj_lock);
+	list_add_tail(&msm_obj->node, &priv->objects);
+	mutex_unlock(&priv->obj_lock);
+
 	return obj;
 
 fail:
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 0feabae75d3d..49956196025e 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -60,13 +60,20 @@  struct msm_gem_object {
 	 */
 	uint8_t vmap_count;
 
-	/* And object is either:
-	 *  inactive - on priv->inactive_list
+	/**
+	 * Node in list of all objects (mainly for debugfs, protected by
+	 * struct_mutex
+	 */
+	struct list_head node;
+
+	/**
+	 * An object is either:
+	 *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
+	 *     (depending on purgability status)
 	 *  active   - on one one of the gpu's active_list..  well, at
 	 *     least for now we don't have (I don't think) hw sync between
 	 *     2d and 3d one devices which have both, meaning we need to
 	 *     block on submit if a bo is already on other ring
-	 *
 	 */
 	struct list_head mm_list;