diff mbox series

[RFC,8/8] drm/i915: Expose per-engine client busyness

Message ID 20191219180019.25562-9-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Per client engine busyness | expand

Commit Message

Tvrtko Ursulin Dec. 19, 2019, 6 p.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Expose per-client and per-engine busyness under the previously added sysfs
client root.

The new files are one per-engine instance and located under the 'busy'
directory. Each contains a monotonically increasing nano-second resolution
times each client's jobs were executing on the GPU.

This enables userspace to create a top-like tool for GPU utilization:

==========================================================================
intel-gpu-top -  935/ 935 MHz;    0% RC6; 14.73 Watts;     1097 irqs/s

      IMC reads:     1401 MiB/s
     IMC writes:        4 MiB/s

          ENGINE      BUSY                                 MI_SEMA MI_WAIT
     Render/3D/0   63.73% |███████████████████           |      3%      0%
       Blitter/0    9.53% |██▊                           |      6%      0%
         Video/0   39.32% |███████████▊                  |     16%      0%
         Video/1   15.62% |████▋                         |      0%      0%
  VideoEnhance/0    0.00% |                              |      0%      0%

  PID            NAME     RCS          BCS          VCS         VECS
 4084        gem_wsim |█████▌     ||█          ||           ||           |
 4086        gem_wsim |█▌         ||           ||███        ||           |
==========================================================================

v2: Use intel_context_engine_get_busy_time.
v3: New directory structure.
v4: Rebase.
v5: sysfs_attr_init.
v6: Small tidy in i915_gem_add_client.
v7: Rebase to be engine class based.
v8:
 * Always enable stats.
 * Walk all client contexts.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |  9 ++++
 drivers/gpu/drm/i915/i915_gem.c | 89 ++++++++++++++++++++++++++++++++-
 2 files changed, 97 insertions(+), 1 deletion(-)

Comments

Chris Wilson Dec. 19, 2019, 9:04 p.m. UTC | #1
Quoting Tvrtko Ursulin (2019-12-19 18:00:19)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Expose per-client and per-engine busyness under the previously added sysfs
> client root.
> 
> The new files are one per-engine instance and located under the 'busy'
> directory. Each contains a monotonically increasing nano-second resolution
> times each client's jobs were executing on the GPU.
> 
> This enables userspace to create a top-like tool for GPU utilization:
> 
> ==========================================================================
> intel-gpu-top -  935/ 935 MHz;    0% RC6; 14.73 Watts;     1097 irqs/s
> 
>       IMC reads:     1401 MiB/s
>      IMC writes:        4 MiB/s
> 
>           ENGINE      BUSY                                 MI_SEMA MI_WAIT
>      Render/3D/0   63.73% |███████████████████           |      3%      0%
>        Blitter/0    9.53% |██▊                           |      6%      0%
>          Video/0   39.32% |███████████▊                  |     16%      0%
>          Video/1   15.62% |████▋                         |      0%      0%
>   VideoEnhance/0    0.00% |                              |      0%      0%
> 
>   PID            NAME     RCS          BCS          VCS         VECS
>  4084        gem_wsim |█████▌     ||█          ||           ||           |
>  4086        gem_wsim |█▌         ||           ||███        ||           |
> ==========================================================================
> 
> v2: Use intel_context_engine_get_busy_time.
> v3: New directory structure.
> v4: Rebase.
> v5: sysfs_attr_init.
> v6: Small tidy in i915_gem_add_client.
> v7: Rebase to be engine class based.
> v8:
>  * Always enable stats.
>  * Walk all client contexts.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Other than splitting it out into i915_drm_client.c (et al). It
worksforme.

However, it's about as useful as top, but without any means to
kill/stop/reprioritise clients :(

To give me actionable data, do we not need more of a perf interface
where events are sent for client start/stop so that observers can
record the context utilisation within their sample periods? I'm thinking
of the "perf stat wsim..." use case where it gives me a breakdown of
each workload.
-Chris
Chris Wilson Dec. 19, 2019, 9:23 p.m. UTC | #2
Quoting Tvrtko Ursulin (2019-12-19 18:00:19)
> +       for (i = 0;
> +            client->busy_root && i < ARRAY_SIZE(uabi_class_names);
> +            i++) {
> +               struct i915_engine_busy_attribute *i915_attr =
> +                       &client->attr.busy[i];
> +
> +               i915_attr->client = client;
> +               i915_attr->engine_class = i;
> +
> +               attr = &i915_attr->attr;
> +
> +               sysfs_attr_init(&attr->attr);
> +
> +               attr->attr.name = uabi_class_names[i];
> +               attr->attr.mode = 0444;
> +               attr->show = show_client_busy;
> +
> +               ret = sysfs_create_file(client->busy_root,
> +                                       (struct attribute *)attr);

Do we need to hold a reference from the open file to the
i915_drm_client?

	fd = open("/sys/i915/clients/0/0", O_RDONLY);

	v[0] = read_u64(fd);
	sleep(2); rewind(fd);
	v[1] = read_u64(fd);

	close(fd);

I was thinking whether or not poll("/sys/i915/clients") would return
events for new clients and so whether or not we could do something like

if (poll("/sys/i915/clients", timeout) > 0) {
	for_each_new_client:
		client = open("/sys/i915/client/$id");
}

for_each_client:
	printf("%s: {rcs:%llu, ...}", client->name, read_u64(client->rcs));

Might be a bit heavy on the fds :)
-Chris
Chris Wilson Dec. 19, 2019, 11:16 p.m. UTC | #3
Quoting Chris Wilson (2019-12-19 21:23:27)
> Quoting Tvrtko Ursulin (2019-12-19 18:00:19)
> > +       for (i = 0;
> > +            client->busy_root && i < ARRAY_SIZE(uabi_class_names);
> > +            i++) {
> > +               struct i915_engine_busy_attribute *i915_attr =
> > +                       &client->attr.busy[i];
> > +
> > +               i915_attr->client = client;
> > +               i915_attr->engine_class = i;
> > +
> > +               attr = &i915_attr->attr;
> > +
> > +               sysfs_attr_init(&attr->attr);
> > +
> > +               attr->attr.name = uabi_class_names[i];
> > +               attr->attr.mode = 0444;
> > +               attr->show = show_client_busy;
> > +
> > +               ret = sysfs_create_file(client->busy_root,
> > +                                       (struct attribute *)attr);
> 
> Do we need to hold a reference from the open file to the
> i915_drm_client?
> 
>         fd = open("/sys/i915/clients/0/0", O_RDONLY);
> 
>         v[0] = read_u64(fd);
>         sleep(2); rewind(fd);
>         v[1] = read_u64(fd);
> 
>         close(fd);

No. Since the sysfs_show() takes a sprintf snapshot of the value, the
above will simply return the same value over and over again. No new
dereferences into our state.
-Chris
Tvrtko Ursulin Dec. 20, 2019, 8:07 a.m. UTC | #4
On 19/12/2019 21:04, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-12-19 18:00:19)
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> Expose per-client and per-engine busyness under the previously added sysfs
>> client root.
>>
>> The new files are one per-engine instance and located under the 'busy'
>> directory. Each contains a monotonically increasing nano-second resolution
>> times each client's jobs were executing on the GPU.
>>
>> This enables userspace to create a top-like tool for GPU utilization:
>>
>> ==========================================================================
>> intel-gpu-top -  935/ 935 MHz;    0% RC6; 14.73 Watts;     1097 irqs/s
>>
>>        IMC reads:     1401 MiB/s
>>       IMC writes:        4 MiB/s
>>
>>            ENGINE      BUSY                                 MI_SEMA MI_WAIT
>>       Render/3D/0   63.73% |███████████████████           |      3%      0%
>>         Blitter/0    9.53% |██▊                           |      6%      0%
>>           Video/0   39.32% |███████████▊                  |     16%      0%
>>           Video/1   15.62% |████▋                         |      0%      0%
>>    VideoEnhance/0    0.00% |                              |      0%      0%
>>
>>    PID            NAME     RCS          BCS          VCS         VECS
>>   4084        gem_wsim |█████▌     ||█          ||           ||           |
>>   4086        gem_wsim |█▌         ||           ||███        ||           |
>> ==========================================================================
>>
>> v2: Use intel_context_engine_get_busy_time.
>> v3: New directory structure.
>> v4: Rebase.
>> v5: sysfs_attr_init.
>> v6: Small tidy in i915_gem_add_client.
>> v7: Rebase to be engine class based.
>> v8:
>>   * Always enable stats.
>>   * Walk all client contexts.
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Other than splitting it out into i915_drm_client.c (et al). It
> worksforme.
> 
> However, it's about as useful as top, but without any means to
> kill/stop/reprioritise clients :(

Killing a client is a job for kill(2), no? Since there is pid already, 
it is just a matter of adding some code to intel_gpu_top.

Unless we also want to cover killing work belonging to exited clients. 
Would probably be nice for feature completeness. For that we probably 
want an ioctl. But it would be a first one to allow direct action on 
unrelated clients, even if under CAP_SYS_ADMIN for instance.

> To give me actionable data, do we not need more of a perf interface
> where events are sent for client start/stop so that observers can
> record the context utilisation within their sample periods? I'm thinking
> of the "perf stat wsim..." use case where it gives me a breakdown of
> each workload.

It is doable I think. I had a prototype at the time when I initially 
started playing with this. In short, what is required is a separate PMU 
"node", and keeping a map of pid to client. Then I was able to query GPU 
time for each pid as profiled by perf. I should have a sketch in a 
branch somewhere.

But IIRC I wasn't sure it was a good replacement for this sysfs 
interface when just thinking about intel_gpu_top. Details escape me now.

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ffd638a071f..8cba3cfb5910 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -189,6 +189,13 @@  struct drm_i915_private;
 struct i915_mm_struct;
 struct i915_mmu_object;
 struct i915_drm_clients;
+struct i915_drm_client;
+
+struct i915_engine_busy_attribute {
+	struct device_attribute attr;
+	struct i915_drm_client *client;
+	unsigned int engine_class;
+};
 
 struct drm_i915_file_private {
 	struct kref kref;
@@ -238,9 +245,11 @@  struct drm_i915_file_private {
 		struct list_head ctx_list;
 
 		struct kobject *root;
+		struct kobject *busy_root;
 		struct {
 			struct device_attribute pid;
 			struct device_attribute name;
+			struct i915_engine_busy_attribute busy[MAX_ENGINE_CLASS];
 		} attr;
 	} client;
 };
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 564e21902dff..98cee37931f6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1546,13 +1546,56 @@  show_client_pid(struct device *kdev, struct device_attribute *attr, char *buf)
 		return snprintf(buf, PAGE_SIZE, "-");
 }
 
+static u64 busy_add(struct i915_gem_context *ctx, unsigned int engine_class)
+{
+	struct i915_gem_engines *engines = rcu_dereference(ctx->engines);
+	struct i915_gem_engines_iter it;
+	struct intel_context *ce;
+	u64 total = 0;
+
+	for_each_gem_engine(ce, engines, it) {
+		if (ce->engine->uabi_class == engine_class)
+			total += ktime_to_ns(intel_context_get_busy_time(ce));
+	}
+
+	return total;
+}
+
+static ssize_t
+show_client_busy(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct i915_engine_busy_attribute *i915_attr =
+		container_of(attr, typeof(*i915_attr), attr);
+	struct list_head *list = &i915_attr->client->ctx_list;
+	unsigned int engine_class = i915_attr->engine_class;
+	struct i915_gem_context *ctx;
+	u64 total = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ctx, list, client_link)
+		total += busy_add(ctx, engine_class);
+	rcu_read_unlock();
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n", total);
+}
+
+static const char *uabi_class_names[] = {
+	[I915_ENGINE_CLASS_RENDER] = "0",
+	[I915_ENGINE_CLASS_COPY] = "1",
+	[I915_ENGINE_CLASS_VIDEO] = "2",
+	[I915_ENGINE_CLASS_VIDEO_ENHANCE] = "3",
+};
+
 int
 __i915_gem_register_client(struct i915_drm_clients *clients,
 			   struct i915_drm_client *client,
 			   struct task_struct *task)
 {
+	struct drm_i915_private *i915 =
+		container_of(clients, typeof(*i915), clients);
 	struct device_attribute *attr;
-	int ret = -ENOMEM;
+	struct intel_engine_cs *engine;
+	int i, ret = -ENOMEM;
 	char idstr[32];
 
 	if (!clients->root)
@@ -1587,10 +1630,47 @@  __i915_gem_register_client(struct i915_drm_clients *clients,
 	if (ret)
 		goto err_attr;
 
+       if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) {
+		client->busy_root =
+			kobject_create_and_add("busy", client->root);
+		if (!client->busy_root)
+			goto err_attr;
+	}
+
+	for (i = 0;
+	     client->busy_root && i < ARRAY_SIZE(uabi_class_names);
+	     i++) {
+		struct i915_engine_busy_attribute *i915_attr =
+			&client->attr.busy[i];
+
+		i915_attr->client = client;
+		i915_attr->engine_class = i;
+
+		attr = &i915_attr->attr;
+
+		sysfs_attr_init(&attr->attr);
+
+		attr->attr.name = uabi_class_names[i];
+		attr->attr.mode = 0444;
+		attr->show = show_client_busy;
+
+		ret = sysfs_create_file(client->busy_root,
+					(struct attribute *)attr);
+		if (ret)
+			goto err_busy;
+	}
+
 	client->pid = get_task_pid(task, PIDTYPE_PID);
 
+	if (client->busy_root) {
+		for_each_uabi_engine(engine, i915)
+			WARN_ON_ONCE(intel_enable_engine_stats(engine));
+	}
+
 	return 0;
 
+err_busy:
+	kobject_put(client->busy_root);
 err_attr:
 	kobject_put(client->root);
 err_client:
@@ -1603,10 +1683,17 @@  void __i915_gem_unregister_client(struct i915_drm_client *client)
 {
 	struct drm_i915_file_private *fpriv =
 		container_of(client, typeof(*fpriv), client);
+	struct intel_engine_cs *engine;
 
 	if (!client->name)
 		return; /* intel_fbdev_init registers a client before sysfs */
 
+	if (client->busy_root) {
+		for_each_uabi_engine(engine, fpriv->i915)
+			intel_disable_engine_stats(engine);
+	}
+
+	kobject_put(fetch_and_zero(&client->busy_root));
 	kobject_put(fetch_and_zero(&client->root));
 	put_pid(fetch_and_zero(&client->pid));
 	kfree(fetch_and_zero(&client->name));