@@ -480,6 +480,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
print_batch_pool_stats(m, dev_priv);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_request *request;
struct task_struct *task;
memset(&stats, 0, sizeof(stats));
@@ -493,8 +495,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
+ request = list_first_entry_or_null(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
+ task = pid_task(request && request->ctx->pid ?
+ request->ctx->pid : file->pid,
+ PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
@@ -681,12 +688,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
seq_printf(m, "%s requests: %d\n", ring->name, count);
list_for_each_entry(req, &ring->request_list, link) {
+ struct pid *pid = req->ctx->pid;
struct task_struct *task;
rcu_read_lock();
- task = NULL;
- if (req->pid)
- task = pid_task(req->pid, PIDTYPE_PID);
+ task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, " %x @ %d: %s [%d]\n",
req->fence.seqno,
(int) (jiffies - req->emitted_jiffies),
@@ -1953,13 +1959,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
continue;
seq_puts(m, "HW context ");
- if (IS_ERR(ctx->file_priv)) {
- seq_puts(m, "(deleted) ");
- } else if (ctx->file_priv) {
- struct pid *pid = ctx->file_priv->file->pid;
+ if (ctx->pid) {
struct task_struct *task;
- task = get_pid_task(pid, PIDTYPE_PID);
+ task = get_pid_task(ctx->pid, PIDTYPE_PID);
if (task) {
seq_printf(m, "(%s [%d]) ",
task->comm, task->pid);
@@ -565,6 +565,7 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
+ pid_t pid;
u32 seqno;
u32 head;
u32 tail;
@@ -878,6 +879,7 @@ struct intel_context {
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
+ struct pid *pid;
unsigned flags;
#define CONTEXT_NO_ZEROMAP (1<<0)
@@ -147,6 +147,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+
+ put_pid(ctx->pid);
list_del(&ctx->link);
kfree(ctx);
}
@@ -256,6 +258,9 @@ __create_hw_context(struct drm_device *dev,
ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
+ if (file_priv)
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -298,8 +298,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
list_add_tail(&req->client_list, &file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
- req->pid = get_pid(task_pid(current));
-
return 0;
}
@@ -315,9 +313,6 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
-
- put_pid(request->pid);
- request->pid = NULL;
}
static void __i915_gem_request_release(struct drm_i915_gem_request *request)
@@ -86,9 +86,6 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
- /** process identifier submitting this request */
- struct pid *pid;
-
/** Execlist link in the submission queue.*/
struct list_head execlist_link; /* guarded by engine->execlist_lock */
};
@@ -457,7 +457,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
dev_priv->ring[i].name,
error->ring[i].num_requests);
for (j = 0; j < error->ring[i].num_requests; j++) {
- err_printf(m, " seqno 0x%08x, emitted %ld, head 0x%08x tail 0x%08x\n",
+ err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x tail 0x%08x\n",
+ error->ring[i].requests[j].pid,
error->ring[i].requests[j].seqno,
error->ring[i].requests[j].jiffies,
error->ring[i].requests[j].head,
@@ -983,6 +984,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
if (request) {
struct i915_address_space *vm;
struct intel_ring *ring;
+ struct pid *pid;
vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
@@ -1002,11 +1004,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_object_create(dev_priv,
engine->scratch.vma);
- if (request->pid) {
+ pid = request->ctx->pid;
+ if (pid) {
struct task_struct *task;
rcu_read_lock();
- task = pid_task(request->pid, PIDTYPE_PID);
+ task = pid_task(pid, PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
@@ -1070,6 +1073,10 @@ static void i915_gem_record_rings(struct drm_device *dev,
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
erq->tail = request->tail;
+
+ rcu_read_lock();
+ erq->pid = request->ctx ? pid_nr(request->ctx->pid) : 0;
+ rcu_read_unlock();
}
}
}
Since contexts are not currently shared between userspace processes, we have an exact correspondence between context creator and guilty batch submitter. Therefore we can save some per-batch work by inspecting the context->pid upon error instead. Note that we take the context's creator's pid rather than the file's pid in order to better track fd passed over sockets. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_debugfs.c | 21 ++++++++++++--------- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem_context.c | 5 +++++ drivers/gpu/drm/i915/i915_gem_request.c | 5 ----- drivers/gpu/drm/i915/i915_gem_request.h | 3 --- drivers/gpu/drm/i915/i915_gpu_error.c | 13 ++++++++++--- 6 files changed, 29 insertions(+), 20 deletions(-)