Message ID | 1406217891-8912-34-git-send-email-thomas.daniel@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Jul 24, 2014 at 05:04:41PM +0100, Thomas Daniel wrote: > From: Oscar Mateo <oscar.mateo@intel.com> > > If we receive a storm of requests for the same context (see gem_storedw_loop_*) > we might end up iterating over too many elements in interrupt time, looking for > contexts to squash together. Instead, share the burden by giving more > intelligence to the queue function. At most, the interrupt will iterate over > three elements. > > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> I'll continue merging after this patch tomorrow. -Daniel > --- > drivers/gpu/drm/i915/intel_lrc.c | 26 ++++++++++++++++++++++---- > 1 file changed, 22 insertions(+), 4 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 895dbfc..829b15d 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -384,9 +384,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring, > struct intel_context *to, > u32 tail) > { > - struct intel_ctx_submit_request *req = NULL; > + struct drm_i915_private *dev_priv = ring->dev->dev_private; > + struct intel_ctx_submit_request *req = NULL, *cursor; > unsigned long flags; > - bool was_empty; > + int num_elements = 0; > > req = kzalloc(sizeof(*req), GFP_KERNEL); > if (req == NULL) > @@ -400,9 +401,26 @@ static int execlists_context_queue(struct intel_engine_cs *ring, > > spin_lock_irqsave(&ring->execlist_lock, flags); > > - was_empty = list_empty(&ring->execlist_queue); > + list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) > + if (++num_elements > 2) > + break; > + > + if (num_elements > 2) { > + struct intel_ctx_submit_request *tail_req; > + > + tail_req = list_last_entry(&ring->execlist_queue, > + struct intel_ctx_submit_request, > + execlist_link); > + if (to == tail_req->ctx) { > + WARN(tail_req->elsp_submitted != 0, > + "More than 2 already-submitted reqs queued\n"); > + list_del(&tail_req->execlist_link); > + queue_work(dev_priv->wq, &tail_req->work); > + } > + } > + > list_add_tail(&req->execlist_link, &ring->execlist_queue); > - if (was_empty) > + if (num_elements == 0) > execlists_context_unqueue(ring); > > spin_unlock_irqrestore(&ring->execlist_lock, flags); > -- > 1.7.9.5 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 895dbfc..829b15d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -384,9 +384,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring, struct intel_context *to, u32 tail) { - struct intel_ctx_submit_request *req = NULL; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct intel_ctx_submit_request *req = NULL, *cursor; unsigned long flags; - bool was_empty; + int num_elements = 0; req = kzalloc(sizeof(*req), GFP_KERNEL); if (req == NULL) @@ -400,9 +401,26 @@ static int execlists_context_queue(struct intel_engine_cs *ring, spin_lock_irqsave(&ring->execlist_lock, flags); - was_empty = list_empty(&ring->execlist_queue); + list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) + if (++num_elements > 2) + break; + + if (num_elements > 2) { + struct intel_ctx_submit_request *tail_req; + + tail_req = list_last_entry(&ring->execlist_queue, + struct intel_ctx_submit_request, + execlist_link); + if (to == tail_req->ctx) { + WARN(tail_req->elsp_submitted != 0, + "More than 2 already-submitted reqs queued\n"); + list_del(&tail_req->execlist_link); + queue_work(dev_priv->wq, &tail_req->work); + } + } + list_add_tail(&req->execlist_link, &ring->execlist_queue); - if (was_empty) + if (num_elements == 0) execlists_context_unqueue(ring); spin_unlock_irqrestore(&ring->execlist_lock, flags);