Message ID | 20191217012316.13271-4-daniele.ceraolospurio@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v2,1/7] drm/i915/guc: Merge communication_stop and communication_disable | expand |
On Tue, 17 Dec 2019 02:23:13 +0100, Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> wrote: > For better isolation of the request tracking from the rest of the > CT-related data. > > v2: split to separate patch, move next_fence to substructure (Michal) > > Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> > Cc: John Harrison <John.C.Harrison@Intel.com> > Cc: Matthew Brost <matthew.brost@intel.com> > --- Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> with some nits below (we may fix them later) /snip/ > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h > b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h > index 6e3d789b9f01..29a026dc3a13 100644 > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h > @@ -48,12 +48,15 @@ struct intel_guc_ct { > /* buffers for sending(0) and receiving(1) commands */ > struct intel_guc_ct_buffer ctbs[2]; > - u32 next_fence; /* fence to be used with next send command */ > + struct { > + u32 next_fence; /* fence to be used with next request to send */ nit: strictly speaking this is "last" fence we just use it to generate next one > - spinlock_t lock; /* protects pending requests list */ > - struct list_head pending_requests; /* requests waiting for response */ > - struct list_head incoming_requests; /* incoming requests */ > - struct work_struct worker; /* handler for incoming requests */ > + spinlock_t lock; /* protects pending requests list */ nit: do we want to use this lock to protect "next/last" fence ? if yes, then maybe lock shall be first ? > + struct list_head pending; /* requests waiting for response */ > + > + struct list_head incoming; /* incoming requests */ > + struct work_struct worker; /* handler for incoming requests */ > + } requests; > }; > void intel_guc_ct_init_early(struct intel_guc_ct *ct);
On 12/17/19 1:42 PM, Michal Wajdeczko wrote: > On Tue, 17 Dec 2019 02:23:13 +0100, Daniele Ceraolo Spurio > <daniele.ceraolospurio@intel.com> wrote: > >> For better isolation of the request tracking from the rest of the >> CT-related data. >> >> v2: split to separate patch, move next_fence to substructure (Michal) >> >> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> >> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> >> Cc: John Harrison <John.C.Harrison@Intel.com> >> Cc: Matthew Brost <matthew.brost@intel.com> >> --- > > Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> > > with some nits below (we may fix them later) > > /snip/ > >> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h >> b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h >> index 6e3d789b9f01..29a026dc3a13 100644 >> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h >> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h >> @@ -48,12 +48,15 @@ struct intel_guc_ct { >> /* buffers for sending(0) and receiving(1) commands */ >> struct intel_guc_ct_buffer ctbs[2]; >> - u32 next_fence; /* fence to be used with next send command */ >> + struct { >> + u32 next_fence; /* fence to be used with next request to send */ > > nit: strictly speaking this is "last" fence > we just use it to generate next one > >> - spinlock_t lock; /* protects pending requests list */ >> - struct list_head pending_requests; /* requests waiting for >> response */ >> - struct list_head incoming_requests; /* incoming requests */ >> - struct work_struct worker; /* handler for incoming requests */ >> + spinlock_t lock; /* protects pending requests list */ > > nit: do we want to use this lock to protect "next/last" fence ? > if yes, then maybe lock shall be first ? We currently only touch this while holding send_mutex, so we don't need the spinlock as well. We can move it later if we ever re-organize the locking structure. Daniele > >> + struct list_head pending; /* requests waiting for response */ >> + >> + struct list_head incoming; /* incoming requests */ >> + struct work_struct worker; /* handler for incoming requests */ >> + } requests; >> }; >> void intel_guc_ct_init_early(struct intel_guc_ct *ct);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 4e20f6c48a4f..f22cd9b2311b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -37,10 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w); */ void intel_guc_ct_init_early(struct intel_guc_ct *ct) { - spin_lock_init(&ct->lock); - INIT_LIST_HEAD(&ct->pending_requests); - INIT_LIST_HEAD(&ct->incoming_requests); - INIT_WORK(&ct->worker, ct_incoming_request_worker_func); + spin_lock_init(&ct->requests.lock); + INIT_LIST_HEAD(&ct->requests.pending); + INIT_LIST_HEAD(&ct->requests.incoming); + INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); } static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) @@ -267,7 +267,7 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) static u32 ct_get_next_fence(struct intel_guc_ct *ct) { /* For now it's trivial */ - return ++ct->next_fence; + return ++ct->requests.next_fence; } /** @@ -465,9 +465,9 @@ static int ct_send(struct intel_guc_ct *ct, request.response_len = response_buf_size; request.response_buf = response_buf; - spin_lock_irqsave(&ct->lock, flags); - list_add_tail(&request.link, &ct->pending_requests); - spin_unlock_irqrestore(&ct->lock, flags); + spin_lock_irqsave(&ct->requests.lock, flags); + list_add_tail(&request.link, &ct->requests.pending); + spin_unlock_irqrestore(&ct->requests.lock, flags); err = ctb_write(ctb, action, len, fence, !!response_buf); if (unlikely(err)) @@ -500,9 +500,9 @@ static int ct_send(struct intel_guc_ct *ct, } unlink: - spin_lock_irqsave(&ct->lock, flags); + spin_lock_irqsave(&ct->requests.lock, flags); list_del(&request.link); - spin_unlock_irqrestore(&ct->lock, flags); + spin_unlock_irqrestore(&ct->requests.lock, flags); return err; } @@ -650,8 +650,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); - spin_lock(&ct->lock); - list_for_each_entry(req, &ct->pending_requests, link) { + spin_lock(&ct->requests.lock); + list_for_each_entry(req, &ct->requests.pending, link) { if (unlikely(fence != req->fence)) { CT_DEBUG_DRIVER("CT: request %u awaits response\n", req->fence); @@ -669,7 +669,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) found = true; break; } - spin_unlock(&ct->lock); + spin_unlock(&ct->requests.lock); if (!found) DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg); @@ -707,13 +707,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) u32 *payload; bool done; - spin_lock_irqsave(&ct->lock, flags); - request = list_first_entry_or_null(&ct->incoming_requests, + spin_lock_irqsave(&ct->requests.lock, flags); + request = list_first_entry_or_null(&ct->requests.incoming, struct ct_incoming_request, link); if (request) list_del(&request->link); - done = !!list_empty(&ct->incoming_requests); - spin_unlock_irqrestore(&ct->lock, flags); + done = !!list_empty(&ct->requests.incoming); + spin_unlock_irqrestore(&ct->requests.lock, flags); if (!request) return true; @@ -731,12 +731,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) static void ct_incoming_request_worker_func(struct work_struct *w) { - struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); + struct intel_guc_ct *ct = + container_of(w, struct intel_guc_ct, requests.worker); bool done; done = ct_process_incoming_requests(ct); if (!done) - queue_work(system_unbound_wq, &ct->worker); + queue_work(system_unbound_wq, &ct->requests.worker); } /** @@ -774,11 +775,11 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) } memcpy(request->msg, msg, 4 * msglen); - spin_lock_irqsave(&ct->lock, flags); - list_add_tail(&request->link, &ct->incoming_requests); - spin_unlock_irqrestore(&ct->lock, flags); + spin_lock_irqsave(&ct->requests.lock, flags); + list_add_tail(&request->link, &ct->requests.incoming); + spin_unlock_irqrestore(&ct->requests.lock, flags); - queue_work(system_unbound_wq, &ct->worker); + queue_work(system_unbound_wq, &ct->requests.worker); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index 6e3d789b9f01..29a026dc3a13 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -48,12 +48,15 @@ struct intel_guc_ct { /* buffers for sending(0) and receiving(1) commands */ struct intel_guc_ct_buffer ctbs[2]; - u32 next_fence; /* fence to be used with next send command */ + struct { + u32 next_fence; /* fence to be used with next request to send */ - spinlock_t lock; /* protects pending requests list */ - struct list_head pending_requests; /* requests waiting for response */ - struct list_head incoming_requests; /* incoming requests */ - struct work_struct worker; /* handler for incoming requests */ + spinlock_t lock; /* protects pending requests list */ + struct list_head pending; /* requests waiting for response */ + + struct list_head incoming; /* incoming requests */ + struct work_struct worker; /* handler for incoming requests */ + } requests; }; void intel_guc_ct_init_early(struct intel_guc_ct *ct);
For better isolation of the request tracking from the rest of the CT-related data. v2: split to separate patch, move next_fence to substructure (Michal) Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: John Harrison <John.C.Harrison@Intel.com> Cc: Matthew Brost <matthew.brost@intel.com> --- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 47 ++++++++++++----------- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h | 13 ++++--- 2 files changed, 32 insertions(+), 28 deletions(-)