diff mbox series

[RFC,5/5] drm/i915: Add sysfs toggle to enable per-client engine stats

Message ID 20191025142131.17378-6-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Per client engine busyness (all aboard the sysfs train!) | expand

Commit Message

Tvrtko Ursulin Oct. 25, 2019, 2:21 p.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

By default we are not collecting any per-engine and per-context
statistcs.

Add a new sysfs toggle to enable this facility:

$ echo 1 >/sys/class/drm/card0/clients/enable_stats

v2: Rebase.
v3: sysfs_attr_init.
v4: Scheduler caps.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h   |  4 ++
 drivers/gpu/drm/i915/i915_sysfs.c | 73 +++++++++++++++++++++++++++++++
 2 files changed, 77 insertions(+)

Comments

Chris Wilson Oct. 25, 2019, 2:49 p.m. UTC | #1
Quoting Tvrtko Ursulin (2019-10-25 15:21:31)
> +       ret = i915_mutex_lock_interruptible(&i915->drm);
> +       if (ret)
> +               return ret;
> +
> +       if (val && !i915->clients.stats.enabled)
> +               enable = true;
> +       else if (!val && i915->clients.stats.enabled)
> +               disable = true;

The struct_mutex is just for atomically enabling/disabling stats, right?
Only one user may toggle status at a time.

I'd wrap it a i915->spinlock just so the locking is clear from the
outset.

> +       if (!enable && !disable)
> +               goto out;
> +
> +       for_each_engine(engine, i915, id) {

A quick s/for_each_engine/for_each_uabi_engine/

> +               if (enable)
> +                       WARN_ON_ONCE(intel_enable_engine_stats(engine));
> +               else if (disable)
> +                       intel_disable_engine_stats(engine);
> +       }
> +
> +       i915->clients.stats.enabled = val;

Now as for whether we want a toggle approach, or only while the file is
open (and refcount)?
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 45f0e2455322..3d2459e9fff4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1397,6 +1397,10 @@  struct drm_i915_private {
 	struct i915_drm_clients {
 		struct kobject *root;
 		atomic_t serial;
+		struct {
+			bool enabled;
+			struct device_attribute attr;
+		} stats;
 	} clients;
 
 	struct i915_hdcp_comp_master *hdcp_master;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a9f27f4fc245..b061baf5da49 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -569,9 +569,67 @@  static void i915_setup_error_capture(struct device *kdev) {}
 static void i915_teardown_error_capture(struct device *kdev) {}
 #endif
 
+static ssize_t
+show_client_stats(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_i915_private *i915 =
+		container_of(attr, struct drm_i915_private, clients.stats.attr);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", i915->clients.stats.enabled);
+}
+
+static ssize_t
+store_client_stats(struct device *kdev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct drm_i915_private *i915 =
+		container_of(attr, struct drm_i915_private, clients.stats.attr);
+	bool disable = false;
+	bool enable = false;
+	bool val = false;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int ret;
+
+	 /* Use RCS as proxy for all engines. */
+	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS))
+		return -EINVAL;
+
+	ret = kstrtobool(buf, &val);
+	if (ret)
+		return ret;
+
+	ret = i915_mutex_lock_interruptible(&i915->drm);
+	if (ret)
+		return ret;
+
+	if (val && !i915->clients.stats.enabled)
+		enable = true;
+	else if (!val && i915->clients.stats.enabled)
+		disable = true;
+
+	if (!enable && !disable)
+		goto out;
+
+	for_each_engine(engine, i915, id) {
+		if (enable)
+			WARN_ON_ONCE(intel_enable_engine_stats(engine));
+		else if (disable)
+			intel_disable_engine_stats(engine);
+	}
+
+	i915->clients.stats.enabled = val;
+
+out:
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return count;
+}
+
 void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 {
 	struct device *kdev = dev_priv->drm.primary->kdev;
+	struct device_attribute *attr;
 	int ret;
 
 	dev_priv->clients.root =
@@ -579,6 +637,18 @@  void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 	if (!dev_priv->clients.root)
 		DRM_ERROR("Per-client sysfs setup failed\n");
 
+	attr = &dev_priv->clients.stats.attr;
+	sysfs_attr_init(&attr->attr);
+	attr->attr.name = "enable_stats";
+	attr->attr.mode = 0664;
+	attr->show = show_client_stats;
+	attr->store = store_client_stats;
+
+	ret = sysfs_create_file(dev_priv->clients.root,
+				(struct attribute *)attr);
+	if (ret)
+		DRM_ERROR("Per-client sysfs setup failed! (%d)\n", ret);
+
 #ifdef CONFIG_PM
 	if (HAS_RC6(dev_priv)) {
 		ret = sysfs_merge_group(&kdev->kobj,
@@ -640,6 +710,9 @@  void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 	sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
 #endif
 
+	sysfs_remove_file(dev_priv->clients.root,
+			  (struct attribute *)&dev_priv->clients.stats.attr);
+
 	if (dev_priv->clients.root)
 		kobject_put(dev_priv->clients.root);
 }