Message ID | 20180323144728.61548-9-michal.wajdeczko@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 3/23/2018 7:47 AM, Michal Wajdeczko wrote: > Instead of returning small data in response status dword, > GuC may append longer data as response message payload. > If caller provides response buffer, we will copy received > data and use number of received data dwords as new success > return value. We will WARN if response from GuC does not > match caller expectation. > > v2: fix timeout and checkpatch warnings (Michal) > > Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> > Cc: Oscar Mateo <oscar.mateo@intel.com> > Cc: Michel Thierry <michel.thierry@intel.com> > Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > --- > drivers/gpu/drm/i915/intel_guc_ct.c | 137 ++++++++++++++++++++++++++++++++---- > drivers/gpu/drm/i915/intel_guc_ct.h | 5 ++ > 2 files changed, 128 insertions(+), 14 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c > index b1ab28f..6a9aad0 100644 > --- a/drivers/gpu/drm/i915/intel_guc_ct.c > +++ b/drivers/gpu/drm/i915/intel_guc_ct.c > @@ -24,6 +24,14 @@ > #include "i915_drv.h" > #include "intel_guc_ct.h" > > +struct ct_request { > + struct list_head link; > + u32 fence; > + u32 status; > + u32 response_len; > + u32 *response_buf; > +}; > + > enum { CTB_SEND = 0, CTB_RECV = 1 }; > > enum { CTB_OWNER_HOST = 0 }; > @@ -36,6 +44,9 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct) > { > /* we're using static channel owners */ > ct->host_channel.owner = CTB_OWNER_HOST; > + > + spin_lock_init(&ct->lock); > + INIT_LIST_HEAD(&ct->pending_requests); > } > > static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) > @@ -276,7 +287,8 @@ static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) > static int ctb_write(struct intel_guc_ct_buffer *ctb, > const u32 *action, > u32 len /* in dwords */, > - u32 fence) > + u32 fence, > + bool want_response) > { > struct guc_ct_buffer_desc *desc = ctb->desc; > u32 head = desc->head / 4; /* in dwords */ > @@ -312,6 +324,7 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb, > */ > header = (len << GUC_CT_MSG_LEN_SHIFT) | > (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | > + (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | > (action[0] << GUC_CT_MSG_ACTION_SHIFT); > > cmds[tail] = header; > @@ -380,36 +393,104 @@ static int wait_for_desc_update(struct guc_ct_buffer_desc *desc, > return err; > } > > -static int ctch_send(struct intel_guc *guc, > +/** > + * wait_for_response_msg - Wait for the Guc response. > + * @req: pointer to pending request > + * @status: placeholder for status > + * > + * We will update request status from the response message handler. > + * Returns: > + * * 0 response received (status is valid) > + * * -ETIMEDOUT no response within hardcoded timeout > + */ > +static int wait_for_response_msg(struct ct_request *req, u32 *status) > +{ > + int err; > + > + /* > + * Fast commands should complete in less than 10us, so sample quickly > + * up to that length of time, then switch to a slower sleep-wait loop. > + * No GuC command should ever take longer than 10ms. > + */ > +#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status)) > + err = wait_for_us(done, 10); > + if (err) > + err = wait_for(done, 10); > +#undef done > + > + if (unlikely(err)) > + DRM_ERROR("CT: fence %u err %d\n", req->fence, err); > + > + *status = req->status; > + return err; > +} > + > +static int ctch_send(struct intel_guc_ct *ct, > struct intel_guc_ct_channel *ctch, > const u32 *action, > u32 len, > + u32 *response_buf, > + u32 response_buf_size, > u32 *status) > { > struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; > struct guc_ct_buffer_desc *desc = ctb->desc; > + struct ct_request request; > + unsigned long flags; > u32 fence; > int err; > > GEM_BUG_ON(!ctch_is_open(ctch)); > GEM_BUG_ON(!len); > GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); > + GEM_BUG_ON(!response_buf && response_buf_size); > > fence = ctch_get_next_fence(ctch); > - err = ctb_write(ctb, action, len, fence); > + request.fence = fence; > + request.status = 0; > + request.response_len = response_buf_size; > + request.response_buf = response_buf; > + > + spin_lock_irqsave(&ct->lock, flags); > + list_add_tail(&request.link, &ct->pending_requests); > + spin_unlock_irqrestore(&ct->lock, flags); > + > + err = ctb_write(ctb, action, len, fence, !!response_buf); > if (unlikely(err)) > - return err; > + goto unlink; > > - intel_guc_notify(guc); > + intel_guc_notify(ct_to_guc(ct)); > > - err = wait_for_desc_update(desc, fence, status); > + if (response_buf) > + err = wait_for_response_msg(&request, status); > + else > + err = wait_for_desc_update(desc, fence, status); > if (unlikely(err)) > - return err; > - if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) > - return -EIO; > + goto unlink; > + > + if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) { > + err = -EIO; > + goto unlink; > + } > + > + if (response_buf) { > + /* There shall be no data in the status */ > + WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status)); > + /* Return actual response len */ > + err = request.response_len; > + } else { > + /* There shall be no response payload */ > + WARN_ON(request.response_len); > + /* Return data decoded from the status dword */ > + err = INTEL_GUC_MSG_TO_DATA(*status); > + } > > - /* Use data from the GuC status as our return value */ > - return INTEL_GUC_MSG_TO_DATA(*status); > +unlink: > + spin_lock_irqsave(&ct->lock, flags); > + list_del(&request.link); > + spin_unlock_irqrestore(&ct->lock, flags); > + > + return err; > } > > /* > @@ -418,13 +499,15 @@ static int ctch_send(struct intel_guc *guc, > static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, > u32 *response_buf, u32 response_buf_size) > { > - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; > + struct intel_guc_ct *ct = &guc->ct; > + struct intel_guc_ct_channel *ctch = &ct->host_channel; > u32 status = ~0; /* undefined */ > int ret; > > mutex_lock(&guc->send_mutex); > > - ret = ctch_send(guc, ctch, action, len, &status); > + ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, > + &status); > if (unlikely(ret < 0)) { > DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", > action[0], ret, status); > @@ -503,8 +586,13 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) > static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) > { > u32 header = msg[0]; > + u32 fence = msg[1]; > u32 status = msg[2]; > u32 len = ct_header_get_len(header) + 1; /* total len with header */ > + u32 payload_len = len - 3; /* len<3 is treated as protocol error */ > + struct ct_request *req; > + bool found = false; > + unsigned long flags; > > GEM_BUG_ON(!ct_header_is_response(header)); > > @@ -518,8 +606,29 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) > DRM_ERROR("CT: corrupted response %*phn\n", 4*len, msg); > return -EPROTO; > } > + spin_lock_irqsave(&ct->lock, flags); > + list_for_each_entry(req, &ct->pending_requests, link) { > + if (req->fence != fence) { > + DRM_DEBUG_DRIVER("CT: request %u awaits response\n", > + req->fence); > + continue; > + } > + if (unlikely(payload_len > req->response_len)) { > + DRM_ERROR("CT: response %u too long %*phn\n", > + req->fence, 4*len, msg); > + payload_len = 0; > + } > + if (payload_len) > + memcpy(req->response_buf, msg + 3, 4*payload_len); > + req->response_len = payload_len; > + WRITE_ONCE(req->status, status); > + found = true; > + break; > + } > + spin_unlock_irqrestore(&ct->lock, flags); > > - /* XXX */ > + if (!found) > + DRM_ERROR("CT: unsolicited response %*phn\n", 4*len, msg); > return 0; > } > > diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h > index 595c8ad..905566b 100644 > --- a/drivers/gpu/drm/i915/intel_guc_ct.h > +++ b/drivers/gpu/drm/i915/intel_guc_ct.h > @@ -71,10 +71,15 @@ struct intel_guc_ct_channel { > /** Holds all command transport channels. > * > * @host_channel: main channel used by the host > + * @lock: spin lock for pending requests list > + * @pending_requests: list of pending requests > */ > struct intel_guc_ct { > struct intel_guc_ct_channel host_channel; > /* other channels are tbd */ > + > + spinlock_t lock; I know you documented that this lock is for in the header, but the checkpatch.pl wants it in the line before the definition :| And there are a couple of 4*len and 4*payload_len Reviewed-by: Michel Thierry <michel.thierry@intel.com> > + struct list_head pending_requests; > }; > > void intel_guc_ct_init_early(struct intel_guc_ct *ct); >
On Fri, Mar 23, 2018 at 02:47:23PM +0000, Michal Wajdeczko wrote: > Instead of returning small data in response status dword, > GuC may append longer data as response message payload. > If caller provides response buffer, we will copy received > data and use number of received data dwords as new success > return value. We will WARN if response from GuC does not > match caller expectation. > > v2: fix timeout and checkpatch warnings (Michal) > > Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> > Cc: Oscar Mateo <oscar.mateo@intel.com> > Cc: Michel Thierry <michel.thierry@intel.com> > Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > --- > drivers/gpu/drm/i915/intel_guc_ct.c | 137 ++++++++++++++++++++++++++++++++---- > drivers/gpu/drm/i915/intel_guc_ct.h | 5 ++ > 2 files changed, 128 insertions(+), 14 deletions(-) > [SNIP] > @@ -418,13 +499,15 @@ static int ctch_send(struct intel_guc *guc, > static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, > u32 *response_buf, u32 response_buf_size) > { > - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; > + struct intel_guc_ct *ct = &guc->ct; > + struct intel_guc_ct_channel *ctch = &ct->host_channel; > u32 status = ~0; /* undefined */ > int ret; > > mutex_lock(&guc->send_mutex); > > - ret = ctch_send(guc, ctch, action, len, &status); > + ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, > + &status); > if (unlikely(ret < 0)) { > DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", > action[0], ret, status); > @@ -503,8 +586,13 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) > static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) > { > u32 header = msg[0]; > + u32 fence = msg[1]; > u32 status = msg[2]; > u32 len = ct_header_get_len(header) + 1; /* total len with header */ > + u32 payload_len = len - 3; /* len<3 is treated as protocol error */ Magic numbers, please ether define 3 as min payload length or hide this behind macro. > + struct ct_request *req; > + bool found = false; > + unsigned long flags; > > GEM_BUG_ON(!ct_header_is_response(header)); > > @@ -518,8 +606,29 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) > DRM_ERROR("CT: corrupted response %*phn\n", 4*len, msg); > return -EPROTO; > } > + spin_lock_irqsave(&ct->lock, flags); Isn't this called from the irq? We can use plain spin_lock here. > + list_for_each_entry(req, &ct->pending_requests, link) { > + if (req->fence != fence) { > + DRM_DEBUG_DRIVER("CT: request %u awaits response\n", > + req->fence); > + continue; Is this expected? In other words - do we expect out of order responses? Can we extract this into a helper (find request)? -Michał > + } > + if (unlikely(payload_len > req->response_len)) { > + DRM_ERROR("CT: response %u too long %*phn\n", > + req->fence, 4*len, msg); > + payload_len = 0; > + } > + if (payload_len) > + memcpy(req->response_buf, msg + 3, 4*payload_len); > + req->response_len = payload_len; > + WRITE_ONCE(req->status, status); > + found = true; > + break; > + } > + spin_unlock_irqrestore(&ct->lock, flags); > > - /* XXX */ > + if (!found) > + DRM_ERROR("CT: unsolicited response %*phn\n", 4*len, msg); > return 0; > } > > diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h > index 595c8ad..905566b 100644 > --- a/drivers/gpu/drm/i915/intel_guc_ct.h > +++ b/drivers/gpu/drm/i915/intel_guc_ct.h > @@ -71,10 +71,15 @@ struct intel_guc_ct_channel { > /** Holds all command transport channels. > * > * @host_channel: main channel used by the host > + * @lock: spin lock for pending requests list > + * @pending_requests: list of pending requests > */ > struct intel_guc_ct { > struct intel_guc_ct_channel host_channel; > /* other channels are tbd */ > + > + spinlock_t lock; > + struct list_head pending_requests; > }; > > void intel_guc_ct_init_early(struct intel_guc_ct *ct); > -- > 1.9.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
On Mon, 26 Mar 2018, Michał Winiarski <michal.winiarski@intel.com> wrote: > On Fri, Mar 23, 2018 at 02:47:23PM +0000, Michal Wajdeczko wrote: >> Instead of returning small data in response status dword, >> GuC may append longer data as response message payload. >> If caller provides response buffer, we will copy received >> data and use number of received data dwords as new success >> return value. We will WARN if response from GuC does not >> match caller expectation. >> >> v2: fix timeout and checkpatch warnings (Michal) >> >> Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> >> Cc: Oscar Mateo <oscar.mateo@intel.com> >> Cc: Michel Thierry <michel.thierry@intel.com> >> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> >> --- >> drivers/gpu/drm/i915/intel_guc_ct.c | 137 ++++++++++++++++++++++++++++++++---- >> drivers/gpu/drm/i915/intel_guc_ct.h | 5 ++ >> 2 files changed, 128 insertions(+), 14 deletions(-) >> > > [SNIP] > >> @@ -418,13 +499,15 @@ static int ctch_send(struct intel_guc *guc, >> static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, >> u32 *response_buf, u32 response_buf_size) >> { >> - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; >> + struct intel_guc_ct *ct = &guc->ct; >> + struct intel_guc_ct_channel *ctch = &ct->host_channel; >> u32 status = ~0; /* undefined */ >> int ret; >> >> mutex_lock(&guc->send_mutex); >> >> - ret = ctch_send(guc, ctch, action, len, &status); >> + ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, >> + &status); >> if (unlikely(ret < 0)) { >> DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", >> action[0], ret, status); >> @@ -503,8 +586,13 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) >> static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) >> { >> u32 header = msg[0]; >> + u32 fence = msg[1]; >> u32 status = msg[2]; >> u32 len = ct_header_get_len(header) + 1; /* total len with header */ >> + u32 payload_len = len - 3; /* len<3 is treated as protocol error */ > > Magic numbers, please ether define 3 as min payload length or hide this behind > macro. And check len >= 3 before substracting 3. BR, Jani. > >> + struct ct_request *req; >> + bool found = false; >> + unsigned long flags; >> >> GEM_BUG_ON(!ct_header_is_response(header)); >> >> @@ -518,8 +606,29 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) >> DRM_ERROR("CT: corrupted response %*phn\n", 4*len, msg); >> return -EPROTO; >> } >> + spin_lock_irqsave(&ct->lock, flags); > > Isn't this called from the irq? We can use plain spin_lock here. > >> + list_for_each_entry(req, &ct->pending_requests, link) { >> + if (req->fence != fence) { >> + DRM_DEBUG_DRIVER("CT: request %u awaits response\n", >> + req->fence); >> + continue; > > Is this expected? > In other words - do we expect out of order responses? > Can we extract this into a helper (find request)? > > -Michał > >> + } >> + if (unlikely(payload_len > req->response_len)) { >> + DRM_ERROR("CT: response %u too long %*phn\n", >> + req->fence, 4*len, msg); >> + payload_len = 0; >> + } >> + if (payload_len) >> + memcpy(req->response_buf, msg + 3, 4*payload_len); >> + req->response_len = payload_len; >> + WRITE_ONCE(req->status, status); >> + found = true; >> + break; >> + } >> + spin_unlock_irqrestore(&ct->lock, flags); >> >> - /* XXX */ >> + if (!found) >> + DRM_ERROR("CT: unsolicited response %*phn\n", 4*len, msg); >> return 0; >> } >> >> diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h >> index 595c8ad..905566b 100644 >> --- a/drivers/gpu/drm/i915/intel_guc_ct.h >> +++ b/drivers/gpu/drm/i915/intel_guc_ct.h >> @@ -71,10 +71,15 @@ struct intel_guc_ct_channel { >> /** Holds all command transport channels. >> * >> * @host_channel: main channel used by the host >> + * @lock: spin lock for pending requests list >> + * @pending_requests: list of pending requests >> */ >> struct intel_guc_ct { >> struct intel_guc_ct_channel host_channel; >> /* other channels are tbd */ >> + >> + spinlock_t lock; >> + struct list_head pending_requests; >> }; >> >> void intel_guc_ct_init_early(struct intel_guc_ct *ct); >> -- >> 1.9.1 >> >> _______________________________________________ >> Intel-gfx mailing list >> Intel-gfx@lists.freedesktop.org >> https://lists.freedesktop.org/mailman/listinfo/intel-gfx > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
On Mon, 26 Mar 2018 17:29:32 +0200, Michał Winiarski <michal.winiarski@intel.com> wrote: > On Fri, Mar 23, 2018 at 02:47:23PM +0000, Michal Wajdeczko wrote: >> Instead of returning small data in response status dword, >> GuC may append longer data as response message payload. >> If caller provides response buffer, we will copy received >> data and use number of received data dwords as new success >> return value. We will WARN if response from GuC does not >> match caller expectation. >> >> v2: fix timeout and checkpatch warnings (Michal) >> >> Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> >> Cc: Oscar Mateo <oscar.mateo@intel.com> >> Cc: Michel Thierry <michel.thierry@intel.com> >> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> >> --- >> drivers/gpu/drm/i915/intel_guc_ct.c | 137 >> ++++++++++++++++++++++++++++++++---- >> drivers/gpu/drm/i915/intel_guc_ct.h | 5 ++ >> 2 files changed, 128 insertions(+), 14 deletions(-) >> > > [SNIP] > >> @@ -418,13 +499,15 @@ static int ctch_send(struct intel_guc *guc, >> static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, >> u32 len, >> u32 *response_buf, u32 response_buf_size) >> { >> - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; >> + struct intel_guc_ct *ct = &guc->ct; >> + struct intel_guc_ct_channel *ctch = &ct->host_channel; >> u32 status = ~0; /* undefined */ >> int ret; >> >> mutex_lock(&guc->send_mutex); >> >> - ret = ctch_send(guc, ctch, action, len, &status); >> + ret = ctch_send(ct, ctch, action, len, response_buf, >> response_buf_size, >> + &status); >> if (unlikely(ret < 0)) { >> DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", >> action[0], ret, status); >> @@ -503,8 +586,13 @@ static int ctb_read(struct intel_guc_ct_buffer >> *ctb, u32 *data) >> static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) >> { >> u32 header = msg[0]; >> + u32 fence = msg[1]; >> u32 status = msg[2]; >> u32 len = ct_header_get_len(header) + 1; /* total len with header */ >> + u32 payload_len = len - 3; /* len<3 is treated as protocol error */ > > Magic numbers, please ether define 3 as min payload length or hide this > behind > macro. Well, it looks that detailed CTB documentation can't wait any more... (simplified doc was already there: /* Response message shall at least include header, fence and status */ I'll try to include more docs in next series spin to make these numbers more clear for everyone > >> + struct ct_request *req; >> + bool found = false; >> + unsigned long flags; >> >> GEM_BUG_ON(!ct_header_is_response(header)); >> >> @@ -518,8 +606,29 @@ static int ct_handle_response(struct intel_guc_ct >> *ct, const u32 *msg) >> DRM_ERROR("CT: corrupted response %*phn\n", 4*len, msg); >> return -EPROTO; >> } >> + spin_lock_irqsave(&ct->lock, flags); > > Isn't this called from the irq? We can use plain spin_lock here. yes, will update, thanks > >> + list_for_each_entry(req, &ct->pending_requests, link) { >> + if (req->fence != fence) { >> + DRM_DEBUG_DRIVER("CT: request %u awaits response\n", >> + req->fence); >> + continue; > > Is this expected? rather not, tempting to mark that with 'unlikely' > In other words - do we expect out of order responses? not today, since we're using send_mutex to serialize all requests (serialization was required for MMIO based comm, but not for CT) > Can we extract this into a helper (find request)? will try, but can't promise thanks, /m
On Mon, 26 Mar 2018 17:35:00 +0200, Jani Nikula <jani.nikula@linux.intel.com> wrote: > On Mon, 26 Mar 2018, Michał Winiarski <michal.winiarski@intel.com> wrote: >> On Fri, Mar 23, 2018 at 02:47:23PM +0000, Michal Wajdeczko wrote: >>> Instead of returning small data in response status dword, >>> GuC may append longer data as response message payload. >>> If caller provides response buffer, we will copy received >>> data and use number of received data dwords as new success >>> return value. We will WARN if response from GuC does not >>> match caller expectation. >>> >>> v2: fix timeout and checkpatch warnings (Michal) >>> >>> Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> >>> Cc: Oscar Mateo <oscar.mateo@intel.com> >>> Cc: Michel Thierry <michel.thierry@intel.com> >>> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> >>> --- >>> drivers/gpu/drm/i915/intel_guc_ct.c | 137 >>> ++++++++++++++++++++++++++++++++---- >>> drivers/gpu/drm/i915/intel_guc_ct.h | 5 ++ >>> 2 files changed, 128 insertions(+), 14 deletions(-) >>> >> >> [SNIP] >> >>> @@ -418,13 +499,15 @@ static int ctch_send(struct intel_guc *guc, >>> static int intel_guc_send_ct(struct intel_guc *guc, const u32 >>> *action, u32 len, >>> u32 *response_buf, u32 response_buf_size) >>> { >>> - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; >>> + struct intel_guc_ct *ct = &guc->ct; >>> + struct intel_guc_ct_channel *ctch = &ct->host_channel; >>> u32 status = ~0; /* undefined */ >>> int ret; >>> >>> mutex_lock(&guc->send_mutex); >>> >>> - ret = ctch_send(guc, ctch, action, len, &status); >>> + ret = ctch_send(ct, ctch, action, len, response_buf, >>> response_buf_size, >>> + &status); >>> if (unlikely(ret < 0)) { >>> DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", >>> action[0], ret, status); >>> @@ -503,8 +586,13 @@ static int ctb_read(struct intel_guc_ct_buffer >>> *ctb, u32 *data) >>> static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) >>> { >>> u32 header = msg[0]; >>> + u32 fence = msg[1]; >>> u32 status = msg[2]; >>> u32 len = ct_header_get_len(header) + 1; /* total len with header */ >>> + u32 payload_len = len - 3; /* len<3 is treated as protocol error */ >> >> Magic numbers, please ether define 3 as min payload length or hide this >> behind >> macro. > > And check len >= 3 before substracting 3. > This was speculative, same as reading 'fence' and 'status', hence comment about protocol error, but also note that all of them were used after proper check for unlikely len<3 condition later in the function. I will reorder existing code to avoid such tricks /m
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index b1ab28f..6a9aad0 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -24,6 +24,14 @@ #include "i915_drv.h" #include "intel_guc_ct.h" +struct ct_request { + struct list_head link; + u32 fence; + u32 status; + u32 response_len; + u32 *response_buf; +}; + enum { CTB_SEND = 0, CTB_RECV = 1 }; enum { CTB_OWNER_HOST = 0 }; @@ -36,6 +44,9 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct) { /* we're using static channel owners */ ct->host_channel.owner = CTB_OWNER_HOST; + + spin_lock_init(&ct->lock); + INIT_LIST_HEAD(&ct->pending_requests); } static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) @@ -276,7 +287,8 @@ static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) static int ctb_write(struct intel_guc_ct_buffer *ctb, const u32 *action, u32 len /* in dwords */, - u32 fence) + u32 fence, + bool want_response) { struct guc_ct_buffer_desc *desc = ctb->desc; u32 head = desc->head / 4; /* in dwords */ @@ -312,6 +324,7 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb, */ header = (len << GUC_CT_MSG_LEN_SHIFT) | (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | + (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | (action[0] << GUC_CT_MSG_ACTION_SHIFT); cmds[tail] = header; @@ -380,36 +393,104 @@ static int wait_for_desc_update(struct guc_ct_buffer_desc *desc, return err; } -static int ctch_send(struct intel_guc *guc, +/** + * wait_for_response_msg - Wait for the Guc response. + * @req: pointer to pending request + * @status: placeholder for status + * + * We will update request status from the response message handler. + * Returns: + * * 0 response received (status is valid) + * * -ETIMEDOUT no response within hardcoded timeout + */ +static int wait_for_response_msg(struct ct_request *req, u32 *status) +{ + int err; + + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No GuC command should ever take longer than 10ms. + */ +#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status)) + err = wait_for_us(done, 10); + if (err) + err = wait_for(done, 10); +#undef done + + if (unlikely(err)) + DRM_ERROR("CT: fence %u err %d\n", req->fence, err); + + *status = req->status; + return err; +} + +static int ctch_send(struct intel_guc_ct *ct, struct intel_guc_ct_channel *ctch, const u32 *action, u32 len, + u32 *response_buf, + u32 response_buf_size, u32 *status) { struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; struct guc_ct_buffer_desc *desc = ctb->desc; + struct ct_request request; + unsigned long flags; u32 fence; int err; GEM_BUG_ON(!ctch_is_open(ctch)); GEM_BUG_ON(!len); GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); + GEM_BUG_ON(!response_buf && response_buf_size); fence = ctch_get_next_fence(ctch); - err = ctb_write(ctb, action, len, fence); + request.fence = fence; + request.status = 0; + request.response_len = response_buf_size; + request.response_buf = response_buf; + + spin_lock_irqsave(&ct->lock, flags); + list_add_tail(&request.link, &ct->pending_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + err = ctb_write(ctb, action, len, fence, !!response_buf); if (unlikely(err)) - return err; + goto unlink; - intel_guc_notify(guc); + intel_guc_notify(ct_to_guc(ct)); - err = wait_for_desc_update(desc, fence, status); + if (response_buf) + err = wait_for_response_msg(&request, status); + else + err = wait_for_desc_update(desc, fence, status); if (unlikely(err)) - return err; - if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) - return -EIO; + goto unlink; + + if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) { + err = -EIO; + goto unlink; + } + + if (response_buf) { + /* There shall be no data in the status */ + WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status)); + /* Return actual response len */ + err = request.response_len; + } else { + /* There shall be no response payload */ + WARN_ON(request.response_len); + /* Return data decoded from the status dword */ + err = INTEL_GUC_MSG_TO_DATA(*status); + } - /* Use data from the GuC status as our return value */ - return INTEL_GUC_MSG_TO_DATA(*status); +unlink: + spin_lock_irqsave(&ct->lock, flags); + list_del(&request.link); + spin_unlock_irqrestore(&ct->lock, flags); + + return err; } /* @@ -418,13 +499,15 @@ static int ctch_send(struct intel_guc *guc, static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; + struct intel_guc_ct *ct = &guc->ct; + struct intel_guc_ct_channel *ctch = &ct->host_channel; u32 status = ~0; /* undefined */ int ret; mutex_lock(&guc->send_mutex); - ret = ctch_send(guc, ctch, action, len, &status); + ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, + &status); if (unlikely(ret < 0)) { DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", action[0], ret, status); @@ -503,8 +586,13 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) { u32 header = msg[0]; + u32 fence = msg[1]; u32 status = msg[2]; u32 len = ct_header_get_len(header) + 1; /* total len with header */ + u32 payload_len = len - 3; /* len<3 is treated as protocol error */ + struct ct_request *req; + bool found = false; + unsigned long flags; GEM_BUG_ON(!ct_header_is_response(header)); @@ -518,8 +606,29 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) DRM_ERROR("CT: corrupted response %*phn\n", 4*len, msg); return -EPROTO; } + spin_lock_irqsave(&ct->lock, flags); + list_for_each_entry(req, &ct->pending_requests, link) { + if (req->fence != fence) { + DRM_DEBUG_DRIVER("CT: request %u awaits response\n", + req->fence); + continue; + } + if (unlikely(payload_len > req->response_len)) { + DRM_ERROR("CT: response %u too long %*phn\n", + req->fence, 4*len, msg); + payload_len = 0; + } + if (payload_len) + memcpy(req->response_buf, msg + 3, 4*payload_len); + req->response_len = payload_len; + WRITE_ONCE(req->status, status); + found = true; + break; + } + spin_unlock_irqrestore(&ct->lock, flags); - /* XXX */ + if (!found) + DRM_ERROR("CT: unsolicited response %*phn\n", 4*len, msg); return 0; } diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h index 595c8ad..905566b 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/intel_guc_ct.h @@ -71,10 +71,15 @@ struct intel_guc_ct_channel { /** Holds all command transport channels. * * @host_channel: main channel used by the host + * @lock: spin lock for pending requests list + * @pending_requests: list of pending requests */ struct intel_guc_ct { struct intel_guc_ct_channel host_channel; /* other channels are tbd */ + + spinlock_t lock; + struct list_head pending_requests; }; void intel_guc_ct_init_early(struct intel_guc_ct *ct);
Instead of returning small data in response status dword, GuC may append longer data as response message payload. If caller provides response buffer, we will copy received data and use number of received data dwords as new success return value. We will WARN if response from GuC does not match caller expectation. v2: fix timeout and checkpatch warnings (Michal) Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> --- drivers/gpu/drm/i915/intel_guc_ct.c | 137 ++++++++++++++++++++++++++++++++---- drivers/gpu/drm/i915/intel_guc_ct.h | 5 ++ 2 files changed, 128 insertions(+), 14 deletions(-)