Message ID | 20180419071746.15996-1-chris@chris-wilson.co.uk (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 19/04/2018 08:17, Chris Wilson wrote: > In the next patch, we want to store the intel_context pointer inside > i915_request, as it is frequently access via a convoluted dance when > submitting the request to hw. Having two context pointers inside > i915_request leads to confusion so first rename the existing > i915_gem_context pointer to i915_request.gem_context. Did you do this manually or with spatch? If spatch then please paste the rule in commit message. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> > --- > drivers/gpu/drm/i915/gvt/scheduler.c | 4 +-- > drivers/gpu/drm/i915/i915_debugfs.c | 4 +-- > drivers/gpu/drm/i915/i915_gem.c | 10 +++---- > drivers/gpu/drm/i915/i915_gpu_error.c | 18 +++++++----- > drivers/gpu/drm/i915/i915_request.c | 8 ++--- > drivers/gpu/drm/i915/i915_request.h | 2 +- > drivers/gpu/drm/i915/i915_trace.h | 6 ++-- > drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- > drivers/gpu/drm/i915/intel_guc_submission.c | 7 +++-- > drivers/gpu/drm/i915/intel_lrc.c | 29 ++++++++++--------- > drivers/gpu/drm/i915/intel_ringbuffer.c | 11 +++---- > .../gpu/drm/i915/selftests/intel_hangcheck.c | 5 +++- > drivers/gpu/drm/i915/selftests/intel_lrc.c | 2 +- > 13 files changed, 58 insertions(+), 50 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c > index f3d21849b0cb..f64cccb2e793 100644 > --- a/drivers/gpu/drm/i915/gvt/scheduler.c > +++ b/drivers/gpu/drm/i915/gvt/scheduler.c > @@ -205,7 +205,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) > > static inline bool is_gvt_request(struct i915_request *req) > { > - return i915_gem_context_force_single_submission(req->ctx); > + return i915_gem_context_force_single_submission(req->gem_context); > } > > static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) > @@ -305,7 +305,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) > struct i915_request *req = workload->req; > > if (IS_KABYLAKE(req->i915) && > - is_inhibit_context(req->ctx, req->engine->id)) > + is_inhibit_context(req->gem_context, req->engine->id)) > intel_vgpu_restore_inhibit_context(vgpu, req); > > /* allocate shadow ring buffer */ > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c > index e0274f41bc76..792f69e44ba5 100644 > --- a/drivers/gpu/drm/i915/i915_debugfs.c > +++ b/drivers/gpu/drm/i915/i915_debugfs.c > @@ -539,8 +539,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) > struct i915_request, > client_link); > rcu_read_lock(); > - task = pid_task(request && request->ctx->pid ? > - request->ctx->pid : file->pid, > + task = pid_task(request && request->gem_context->pid ? > + request->gem_context->pid : file->pid, > PIDTYPE_PID); > print_file_stats(m, task ? task->comm : "<unknown>", stats); > rcu_read_unlock(); > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 795ca83aed7a..4dba735505d4 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -3108,7 +3108,7 @@ static void skip_request(struct i915_request *request) > static void engine_skip_context(struct i915_request *request) > { > struct intel_engine_cs *engine = request->engine; > - struct i915_gem_context *hung_ctx = request->ctx; > + struct i915_gem_context *hung_ctx = request->gem_context; > struct intel_timeline *timeline; > unsigned long flags; > > @@ -3118,7 +3118,7 @@ static void engine_skip_context(struct i915_request *request) > spin_lock(&timeline->lock); > > list_for_each_entry_continue(request, &engine->timeline->requests, link) > - if (request->ctx == hung_ctx) > + if (request->gem_context == hung_ctx) > skip_request(request); > > list_for_each_entry(request, &timeline->requests, link) > @@ -3164,11 +3164,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine, > } > > if (stalled) { > - i915_gem_context_mark_guilty(request->ctx); > + i915_gem_context_mark_guilty(request->gem_context); > skip_request(request); > > /* If this context is now banned, skip all pending requests. */ > - if (i915_gem_context_is_banned(request->ctx)) > + if (i915_gem_context_is_banned(request->gem_context)) > engine_skip_context(request); > } else { > /* > @@ -3178,7 +3178,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine, > */ > request = i915_gem_find_active_request(engine); > if (request) { > - i915_gem_context_mark_innocent(request->ctx); > + i915_gem_context_mark_innocent(request->gem_context); > dma_fence_set_error(&request->fence, -EAGAIN); > > /* Rewind the engine to replay the incomplete rq */ > diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c > index 671ffa37614e..269574b7254c 100644 > --- a/drivers/gpu/drm/i915/i915_gpu_error.c > +++ b/drivers/gpu/drm/i915/i915_gpu_error.c > @@ -1277,16 +1277,18 @@ static void error_record_engine_registers(struct i915_gpu_state *error, > static void record_request(struct i915_request *request, > struct drm_i915_error_request *erq) > { > - erq->context = request->ctx->hw_id; > + struct i915_gem_context *ctx = request->gem_context; > + > + erq->context = ctx->hw_id; > erq->sched_attr = request->sched.attr; > - erq->ban_score = atomic_read(&request->ctx->ban_score); > + erq->ban_score = atomic_read(&ctx->ban_score); > erq->seqno = request->global_seqno; > erq->jiffies = request->emitted_jiffies; > erq->head = request->head; > erq->tail = request->tail; > > rcu_read_lock(); > - erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; > + erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0; > rcu_read_unlock(); > } > > @@ -1450,12 +1452,12 @@ static void gem_record_rings(struct i915_gpu_state *error) > > request = i915_gem_find_active_request(engine); > if (request) { > + struct i915_gem_context *ctx = request->gem_context; > struct intel_ring *ring; > > - ee->vm = request->ctx->ppgtt ? > - &request->ctx->ppgtt->base : &ggtt->base; > + ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base; > > - record_context(&ee->context, request->ctx); > + record_context(&ee->context, ctx); > > /* We need to copy these to an anonymous buffer > * as the simplest method to avoid being overwritten > @@ -1472,10 +1474,10 @@ static void gem_record_rings(struct i915_gpu_state *error) > > ee->ctx = > i915_error_object_create(i915, > - request->ctx->engine[i].state); > + ctx->engine[i].state); > > error->simulated |= > - i915_gem_context_no_error_capture(request->ctx); > + i915_gem_context_no_error_capture(ctx); > > ee->rq_head = request->head; > ee->rq_post = request->postfix; > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c > index b692a9f7c357..f913e56604ea 100644 > --- a/drivers/gpu/drm/i915/i915_request.c > +++ b/drivers/gpu/drm/i915/i915_request.c > @@ -399,7 +399,7 @@ static void i915_request_retire(struct i915_request *request) > i915_request_remove_from_client(request); > > /* Retirement decays the ban score as it is a sign of ctx progress */ > - atomic_dec_if_positive(&request->ctx->ban_score); > + atomic_dec_if_positive(&request->gem_context->ban_score); > > /* > * The backing object for the context is done after switching to the > @@ -411,7 +411,7 @@ static void i915_request_retire(struct i915_request *request) > */ > if (engine->last_retired_context) > engine->context_unpin(engine, engine->last_retired_context); > - engine->last_retired_context = request->ctx; > + engine->last_retired_context = request->gem_context; > > spin_lock_irq(&request->lock); > if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags)) > @@ -732,7 +732,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) > INIT_LIST_HEAD(&rq->active_list); > rq->i915 = i915; > rq->engine = engine; > - rq->ctx = ctx; > + rq->gem_context = ctx; > rq->ring = ring; > > /* No zalloc, must clear what we need by hand */ > @@ -1064,7 +1064,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) > */ > rcu_read_lock(); > if (engine->schedule) > - engine->schedule(request, &request->ctx->sched); > + engine->schedule(request, &request->gem_context->sched); > rcu_read_unlock(); > > local_bh_disable(); > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h > index 8f31ca8272f8..6a029b3f0a88 100644 > --- a/drivers/gpu/drm/i915/i915_request.h > +++ b/drivers/gpu/drm/i915/i915_request.h > @@ -92,7 +92,7 @@ struct i915_request { > * i915_request_free() will then decrement the refcount on the > * context. > */ > - struct i915_gem_context *ctx; > + struct i915_gem_context *gem_context; > struct intel_engine_cs *engine; > struct intel_ring *ring; > struct intel_timeline *timeline; > diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h > index 408827bf5d96..da38b295d69f 100644 > --- a/drivers/gpu/drm/i915/i915_trace.h > +++ b/drivers/gpu/drm/i915/i915_trace.h > @@ -624,7 +624,7 @@ TRACE_EVENT(i915_request_queue, > > TP_fast_assign( > __entry->dev = rq->i915->drm.primary->index; > - __entry->hw_id = rq->ctx->hw_id; > + __entry->hw_id = rq->gem_context->hw_id; > __entry->ring = rq->engine->id; > __entry->ctx = rq->fence.context; > __entry->seqno = rq->fence.seqno; > @@ -651,7 +651,7 @@ DECLARE_EVENT_CLASS(i915_request, > > TP_fast_assign( > __entry->dev = rq->i915->drm.primary->index; > - __entry->hw_id = rq->ctx->hw_id; > + __entry->hw_id = rq->gem_context->hw_id; > __entry->ring = rq->engine->id; > __entry->ctx = rq->fence.context; > __entry->seqno = rq->fence.seqno; > @@ -792,7 +792,7 @@ TRACE_EVENT(i915_request_wait_begin, > */ > TP_fast_assign( > __entry->dev = rq->i915->drm.primary->index; > - __entry->hw_id = rq->ctx->hw_id; > + __entry->hw_id = rq->gem_context->hw_id; > __entry->ring = rq->engine->id; > __entry->ctx = rq->fence.context; > __entry->seqno = rq->fence.seqno; > diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c > index be608f7111f5..0248d64c2a72 100644 > --- a/drivers/gpu/drm/i915/intel_engine_cs.c > +++ b/drivers/gpu/drm/i915/intel_engine_cs.c > @@ -1010,7 +1010,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) > */ > rq = __i915_gem_active_peek(&engine->timeline->last_request); > if (rq) > - return rq->ctx == kernel_context; > + return rq->gem_context == kernel_context; > else > return engine->last_retired_context == kernel_context; > } > diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c > index 02da05875aa7..8527fa1f5c3e 100644 > --- a/drivers/gpu/drm/i915/intel_guc_submission.c > +++ b/drivers/gpu/drm/i915/intel_guc_submission.c > @@ -511,8 +511,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) > { > struct intel_guc_client *client = guc->execbuf_client; > struct intel_engine_cs *engine = rq->engine; > - u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx, > - engine)); > + u32 ctx_desc = > + lower_32_bits(intel_lr_context_descriptor(rq->gem_context, > + engine)); > u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); > > spin_lock(&client->wq_lock); > @@ -707,7 +708,7 @@ static void guc_dequeue(struct intel_engine_cs *engine) > struct i915_request *rq, *rn; > > list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { > - if (last && rq->ctx != last->ctx) { > + if (last && rq->gem_context != last->gem_context) { > if (port == last_port) { > __list_del_many(&p->requests, > &rq->sched.link); > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 029901a8fa38..0777226e65a6 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -414,9 +414,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) > > static u64 execlists_update_context(struct i915_request *rq) > { > - struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; > + struct intel_context *ce = &rq->gem_context->engine[rq->engine->id]; > struct i915_hw_ppgtt *ppgtt = > - rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; > + rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt; > u32 *reg_state = ce->lrc_reg_state; > > reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); > @@ -666,7 +666,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) > * second request, and so we never need to tell the > * hardware about the first. > */ > - if (last && !can_merge_ctx(rq->ctx, last->ctx)) { > + if (last && !can_merge_ctx(rq->gem_context, > + last->gem_context)) { > /* > * If we are on the second port and cannot > * combine this request with the last, then we > @@ -685,14 +686,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) > * the same context (even though a different > * request) to the second port. > */ > - if (ctx_single_port_submission(last->ctx) || > - ctx_single_port_submission(rq->ctx)) { > + if (ctx_single_port_submission(last->gem_context) || > + ctx_single_port_submission(rq->gem_context)) { > __list_del_many(&p->requests, > &rq->sched.link); > goto done; > } > > - GEM_BUG_ON(last->ctx == rq->ctx); > + GEM_BUG_ON(last->gem_context == rq->gem_context); > > if (submit) > port_assign(port, last); > @@ -1376,7 +1377,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine, > static int execlists_request_alloc(struct i915_request *request) > { > struct intel_engine_cs *engine = request->engine; > - struct intel_context *ce = &request->ctx->engine[engine->id]; > + struct intel_context *ce = &request->gem_context->engine[engine->id]; > int ret; > > GEM_BUG_ON(!ce->pin_count); > @@ -1830,9 +1831,9 @@ static void reset_common_ring(struct intel_engine_cs *engine, > * future request will be after userspace has had the opportunity > * to recreate its own state. > */ > - ce = &request->ctx->engine[engine->id]; > + ce = &request->gem_context->engine[engine->id]; > execlists_init_reg_state(ce->lrc_reg_state, > - request->ctx, engine, ce->ring); > + request->gem_context, engine, ce->ring); > > /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ > ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = > @@ -1848,7 +1849,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, > > static int intel_logical_ring_emit_pdps(struct i915_request *rq) > { > - struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; > + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; > struct intel_engine_cs *engine = rq->engine; > const int num_lri_cmds = GEN8_3LVL_PDPES * 2; > u32 *cs; > @@ -1887,15 +1888,15 @@ static int gen8_emit_bb_start(struct i915_request *rq, > * it is unsafe in case of lite-restore (because the ctx is > * not idle). PML4 is allocated during ppgtt init so this is > * not needed in 48-bit.*/ > - if (rq->ctx->ppgtt && > - (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) && > - !i915_vm_is_48bit(&rq->ctx->ppgtt->base) && > + if (rq->gem_context->ppgtt && > + (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && > + !i915_vm_is_48bit(&rq->gem_context->ppgtt->base) && > !intel_vgpu_active(rq->i915)) { > ret = intel_logical_ring_emit_pdps(rq); > if (ret) > return ret; > > - rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); > + rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); > } > > cs = intel_ring_begin(rq, 4); > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index c68ac605b8a9..3ea8eb5d49f5 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -558,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine, > */ > if (request) { > struct drm_i915_private *dev_priv = request->i915; > - struct intel_context *ce = &request->ctx->engine[engine->id]; > + struct intel_context *ce = > + &request->gem_context->engine[engine->id]; > struct i915_hw_ppgtt *ppgtt; > > if (ce->state) { > @@ -570,7 +571,7 @@ static void reset_ring_common(struct intel_engine_cs *engine, > CCID_EN); > } > > - ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; > + ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt; > if (ppgtt) { > u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; > > @@ -1427,7 +1428,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) > > *cs++ = MI_NOOP; > *cs++ = MI_SET_CONTEXT; > - *cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags; > + *cs++ = i915_ggtt_offset(rq->gem_context->engine[RCS].state) | flags; > /* > * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP > * WaMiSetContext_Hang:snb,ivb,vlv > @@ -1495,7 +1496,7 @@ static int remap_l3(struct i915_request *rq, int slice) > static int switch_context(struct i915_request *rq) > { > struct intel_engine_cs *engine = rq->engine; > - struct i915_gem_context *to_ctx = rq->ctx; > + struct i915_gem_context *to_ctx = rq->gem_context; > struct i915_hw_ppgtt *to_mm = > to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; > struct i915_gem_context *from_ctx = engine->legacy_active_context; > @@ -1566,7 +1567,7 @@ static int ring_request_alloc(struct i915_request *request) > { > int ret; > > - GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count); > + GEM_BUG_ON(!request->gem_context->engine[request->engine->id].pin_count); > > /* Flush enough space to reduce the likelihood of waiting after > * we start building the request - in which case we will just > diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c > index f7ee54e109ae..99679b5f97eb 100644 > --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c > +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c > @@ -102,7 +102,10 @@ static int emit_recurse_batch(struct hang *h, > struct i915_request *rq) > { > struct drm_i915_private *i915 = h->i915; > - struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base; > + struct i915_address_space *vm = > + rq->gem_context->ppgtt ? > + &rq->gem_context->ppgtt->base : > + &i915->ggtt.base; > struct i915_vma *hws, *vma; > unsigned int flags; > u32 *batch; > diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c > index ee7e22d18ff8..20279547cb05 100644 > --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c > +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c > @@ -82,7 +82,7 @@ static int emit_recurse_batch(struct spinner *spin, > struct i915_request *rq, > u32 arbitration_command) > { > - struct i915_address_space *vm = &rq->ctx->ppgtt->base; > + struct i915_address_space *vm = &rq->gem_context->ppgtt->base; > struct i915_vma *hws, *vma; > u32 *batch; > int err; > Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Regards, Tvrtko
Quoting Tvrtko Ursulin (2018-04-19 11:43:25) > > On 19/04/2018 08:17, Chris Wilson wrote: > > In the next patch, we want to store the intel_context pointer inside > > i915_request, as it is frequently access via a convoluted dance when > > submitting the request to hw. Having two context pointers inside > > i915_request leads to confusion so first rename the existing > > i915_gem_context pointer to i915_request.gem_context. > > Did you do this manually or with spatch? If spatch then please paste the > rule in commit message. Oh no, by hand. I still find it quicker to do search and replace in vim than find then decypher spatch documentation ;) -Chris
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f3d21849b0cb..f64cccb2e793 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -205,7 +205,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) static inline bool is_gvt_request(struct i915_request *req) { - return i915_gem_context_force_single_submission(req->ctx); + return i915_gem_context_force_single_submission(req->gem_context); } static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) @@ -305,7 +305,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) struct i915_request *req = workload->req; if (IS_KABYLAKE(req->i915) && - is_inhibit_context(req->ctx, req->engine->id)) + is_inhibit_context(req->gem_context, req->engine->id)) intel_vgpu_restore_inhibit_context(vgpu, req); /* allocate shadow ring buffer */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e0274f41bc76..792f69e44ba5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -539,8 +539,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct i915_request, client_link); rcu_read_lock(); - task = pid_task(request && request->ctx->pid ? - request->ctx->pid : file->pid, + task = pid_task(request && request->gem_context->pid ? + request->gem_context->pid : file->pid, PIDTYPE_PID); print_file_stats(m, task ? task->comm : "<unknown>", stats); rcu_read_unlock(); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 795ca83aed7a..4dba735505d4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3108,7 +3108,7 @@ static void skip_request(struct i915_request *request) static void engine_skip_context(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - struct i915_gem_context *hung_ctx = request->ctx; + struct i915_gem_context *hung_ctx = request->gem_context; struct intel_timeline *timeline; unsigned long flags; @@ -3118,7 +3118,7 @@ static void engine_skip_context(struct i915_request *request) spin_lock(&timeline->lock); list_for_each_entry_continue(request, &engine->timeline->requests, link) - if (request->ctx == hung_ctx) + if (request->gem_context == hung_ctx) skip_request(request); list_for_each_entry(request, &timeline->requests, link) @@ -3164,11 +3164,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine, } if (stalled) { - i915_gem_context_mark_guilty(request->ctx); + i915_gem_context_mark_guilty(request->gem_context); skip_request(request); /* If this context is now banned, skip all pending requests. */ - if (i915_gem_context_is_banned(request->ctx)) + if (i915_gem_context_is_banned(request->gem_context)) engine_skip_context(request); } else { /* @@ -3178,7 +3178,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine, */ request = i915_gem_find_active_request(engine); if (request) { - i915_gem_context_mark_innocent(request->ctx); + i915_gem_context_mark_innocent(request->gem_context); dma_fence_set_error(&request->fence, -EAGAIN); /* Rewind the engine to replay the incomplete rq */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 671ffa37614e..269574b7254c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1277,16 +1277,18 @@ static void error_record_engine_registers(struct i915_gpu_state *error, static void record_request(struct i915_request *request, struct drm_i915_error_request *erq) { - erq->context = request->ctx->hw_id; + struct i915_gem_context *ctx = request->gem_context; + + erq->context = ctx->hw_id; erq->sched_attr = request->sched.attr; - erq->ban_score = atomic_read(&request->ctx->ban_score); + erq->ban_score = atomic_read(&ctx->ban_score); erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; erq->head = request->head; erq->tail = request->tail; rcu_read_lock(); - erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; + erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0; rcu_read_unlock(); } @@ -1450,12 +1452,12 @@ static void gem_record_rings(struct i915_gpu_state *error) request = i915_gem_find_active_request(engine); if (request) { + struct i915_gem_context *ctx = request->gem_context; struct intel_ring *ring; - ee->vm = request->ctx->ppgtt ? - &request->ctx->ppgtt->base : &ggtt->base; + ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base; - record_context(&ee->context, request->ctx); + record_context(&ee->context, ctx); /* We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten @@ -1472,10 +1474,10 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->ctx = i915_error_object_create(i915, - request->ctx->engine[i].state); + ctx->engine[i].state); error->simulated |= - i915_gem_context_no_error_capture(request->ctx); + i915_gem_context_no_error_capture(ctx); ee->rq_head = request->head; ee->rq_post = request->postfix; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index b692a9f7c357..f913e56604ea 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -399,7 +399,7 @@ static void i915_request_retire(struct i915_request *request) i915_request_remove_from_client(request); /* Retirement decays the ban score as it is a sign of ctx progress */ - atomic_dec_if_positive(&request->ctx->ban_score); + atomic_dec_if_positive(&request->gem_context->ban_score); /* * The backing object for the context is done after switching to the @@ -411,7 +411,7 @@ static void i915_request_retire(struct i915_request *request) */ if (engine->last_retired_context) engine->context_unpin(engine, engine->last_retired_context); - engine->last_retired_context = request->ctx; + engine->last_retired_context = request->gem_context; spin_lock_irq(&request->lock); if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags)) @@ -732,7 +732,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) INIT_LIST_HEAD(&rq->active_list); rq->i915 = i915; rq->engine = engine; - rq->ctx = ctx; + rq->gem_context = ctx; rq->ring = ring; /* No zalloc, must clear what we need by hand */ @@ -1064,7 +1064,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) */ rcu_read_lock(); if (engine->schedule) - engine->schedule(request, &request->ctx->sched); + engine->schedule(request, &request->gem_context->sched); rcu_read_unlock(); local_bh_disable(); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 8f31ca8272f8..6a029b3f0a88 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -92,7 +92,7 @@ struct i915_request { * i915_request_free() will then decrement the refcount on the * context. */ - struct i915_gem_context *ctx; + struct i915_gem_context *gem_context; struct intel_engine_cs *engine; struct intel_ring *ring; struct intel_timeline *timeline; diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 408827bf5d96..da38b295d69f 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -624,7 +624,7 @@ TRACE_EVENT(i915_request_queue, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; + __entry->hw_id = rq->gem_context->hw_id; __entry->ring = rq->engine->id; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; @@ -651,7 +651,7 @@ DECLARE_EVENT_CLASS(i915_request, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; + __entry->hw_id = rq->gem_context->hw_id; __entry->ring = rq->engine->id; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; @@ -792,7 +792,7 @@ TRACE_EVENT(i915_request_wait_begin, */ TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; + __entry->hw_id = rq->gem_context->hw_id; __entry->ring = rq->engine->id; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index be608f7111f5..0248d64c2a72 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1010,7 +1010,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) */ rq = __i915_gem_active_peek(&engine->timeline->last_request); if (rq) - return rq->ctx == kernel_context; + return rq->gem_context == kernel_context; else return engine->last_retired_context == kernel_context; } diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 02da05875aa7..8527fa1f5c3e 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -511,8 +511,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { struct intel_guc_client *client = guc->execbuf_client; struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx, - engine)); + u32 ctx_desc = + lower_32_bits(intel_lr_context_descriptor(rq->gem_context, + engine)); u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); spin_lock(&client->wq_lock); @@ -707,7 +708,7 @@ static void guc_dequeue(struct intel_engine_cs *engine) struct i915_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { - if (last && rq->ctx != last->ctx) { + if (last && rq->gem_context != last->gem_context) { if (port == last_port) { __list_del_many(&p->requests, &rq->sched.link); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 029901a8fa38..0777226e65a6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -414,9 +414,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) static u64 execlists_update_context(struct i915_request *rq) { - struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; + struct intel_context *ce = &rq->gem_context->engine[rq->engine->id]; struct i915_hw_ppgtt *ppgtt = - rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; + rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); @@ -666,7 +666,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * second request, and so we never need to tell the * hardware about the first. */ - if (last && !can_merge_ctx(rq->ctx, last->ctx)) { + if (last && !can_merge_ctx(rq->gem_context, + last->gem_context)) { /* * If we are on the second port and cannot * combine this request with the last, then we @@ -685,14 +686,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * the same context (even though a different * request) to the second port. */ - if (ctx_single_port_submission(last->ctx) || - ctx_single_port_submission(rq->ctx)) { + if (ctx_single_port_submission(last->gem_context) || + ctx_single_port_submission(rq->gem_context)) { __list_del_many(&p->requests, &rq->sched.link); goto done; } - GEM_BUG_ON(last->ctx == rq->ctx); + GEM_BUG_ON(last->gem_context == rq->gem_context); if (submit) port_assign(port, last); @@ -1376,7 +1377,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine, static int execlists_request_alloc(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - struct intel_context *ce = &request->ctx->engine[engine->id]; + struct intel_context *ce = &request->gem_context->engine[engine->id]; int ret; GEM_BUG_ON(!ce->pin_count); @@ -1830,9 +1831,9 @@ static void reset_common_ring(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ - ce = &request->ctx->engine[engine->id]; + ce = &request->gem_context->engine[engine->id]; execlists_init_reg_state(ce->lrc_reg_state, - request->ctx, engine, ce->ring); + request->gem_context, engine, ce->ring); /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = @@ -1848,7 +1849,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, static int intel_logical_ring_emit_pdps(struct i915_request *rq) { - struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; struct intel_engine_cs *engine = rq->engine; const int num_lri_cmds = GEN8_3LVL_PDPES * 2; u32 *cs; @@ -1887,15 +1888,15 @@ static int gen8_emit_bb_start(struct i915_request *rq, * it is unsafe in case of lite-restore (because the ctx is * not idle). PML4 is allocated during ppgtt init so this is * not needed in 48-bit.*/ - if (rq->ctx->ppgtt && - (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) && - !i915_vm_is_48bit(&rq->ctx->ppgtt->base) && + if (rq->gem_context->ppgtt && + (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && + !i915_vm_is_48bit(&rq->gem_context->ppgtt->base) && !intel_vgpu_active(rq->i915)) { ret = intel_logical_ring_emit_pdps(rq); if (ret) return ret; - rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); + rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); } cs = intel_ring_begin(rq, 4); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c68ac605b8a9..3ea8eb5d49f5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -558,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine, */ if (request) { struct drm_i915_private *dev_priv = request->i915; - struct intel_context *ce = &request->ctx->engine[engine->id]; + struct intel_context *ce = + &request->gem_context->engine[engine->id]; struct i915_hw_ppgtt *ppgtt; if (ce->state) { @@ -570,7 +571,7 @@ static void reset_ring_common(struct intel_engine_cs *engine, CCID_EN); } - ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; + ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt; if (ppgtt) { u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; @@ -1427,7 +1428,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) *cs++ = MI_NOOP; *cs++ = MI_SET_CONTEXT; - *cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags; + *cs++ = i915_ggtt_offset(rq->gem_context->engine[RCS].state) | flags; /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -1495,7 +1496,7 @@ static int remap_l3(struct i915_request *rq, int slice) static int switch_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - struct i915_gem_context *to_ctx = rq->ctx; + struct i915_gem_context *to_ctx = rq->gem_context; struct i915_hw_ppgtt *to_mm = to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; struct i915_gem_context *from_ctx = engine->legacy_active_context; @@ -1566,7 +1567,7 @@ static int ring_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count); + GEM_BUG_ON(!request->gem_context->engine[request->engine->id].pin_count); /* Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index f7ee54e109ae..99679b5f97eb 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -102,7 +102,10 @@ static int emit_recurse_batch(struct hang *h, struct i915_request *rq) { struct drm_i915_private *i915 = h->i915; - struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + rq->gem_context->ppgtt ? + &rq->gem_context->ppgtt->base : + &i915->ggtt.base; struct i915_vma *hws, *vma; unsigned int flags; u32 *batch; diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index ee7e22d18ff8..20279547cb05 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -82,7 +82,7 @@ static int emit_recurse_batch(struct spinner *spin, struct i915_request *rq, u32 arbitration_command) { - struct i915_address_space *vm = &rq->ctx->ppgtt->base; + struct i915_address_space *vm = &rq->gem_context->ppgtt->base; struct i915_vma *hws, *vma; u32 *batch; int err;
In the next patch, we want to store the intel_context pointer inside i915_request, as it is frequently access via a convoluted dance when submitting the request to hw. Having two context pointers inside i915_request leads to confusion so first rename the existing i915_gem_context pointer to i915_request.gem_context. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> --- drivers/gpu/drm/i915/gvt/scheduler.c | 4 +-- drivers/gpu/drm/i915/i915_debugfs.c | 4 +-- drivers/gpu/drm/i915/i915_gem.c | 10 +++---- drivers/gpu/drm/i915/i915_gpu_error.c | 18 +++++++----- drivers/gpu/drm/i915/i915_request.c | 8 ++--- drivers/gpu/drm/i915/i915_request.h | 2 +- drivers/gpu/drm/i915/i915_trace.h | 6 ++-- drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/intel_guc_submission.c | 7 +++-- drivers/gpu/drm/i915/intel_lrc.c | 29 ++++++++++--------- drivers/gpu/drm/i915/intel_ringbuffer.c | 11 +++---- .../gpu/drm/i915/selftests/intel_hangcheck.c | 5 +++- drivers/gpu/drm/i915/selftests/intel_lrc.c | 2 +- 13 files changed, 58 insertions(+), 50 deletions(-)