@@ -1875,7 +1875,7 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, ring_id) {
- struct intel_ctx_submit_request *head_req = NULL;
+ struct drm_i915_gem_request *head_req = NULL;
int count = 0;
unsigned long flags;
@@ -1908,18 +1908,18 @@ static int i915_execlists(struct seq_file *m, void *data)
list_for_each(cursor, &ring->execlist_queue)
count++;
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request, execlist_link);
+ struct drm_i915_gem_request, execlist_link);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
struct drm_i915_gem_object *ctx_obj;
- ctx_obj = head_req->request->ctx->engine[ring_id].state;
+ ctx_obj = head_req->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
intel_execlists_ctx_id(ctx_obj));
seq_printf(m, "\tHead request tail: %u\n",
- head_req->request->tail);
+ head_req->tail);
}
seq_putc(m, '\n');
@@ -2036,6 +2036,30 @@ struct drm_i915_gem_request {
struct list_head client_list;
uint32_t uniq;
+
+ /**
+ * The ELSP only accepts two elements at a time, so we queue
+ * context/tail pairs on a given queue (ring->execlist_queue) until the
+ * hardware is available. The queue serves a double purpose: we also use
+ * it to keep track of the up to 2 contexts currently in the hardware
+ * (usually one in execution and the other queued up by the GPU): We
+ * only remove elements from the head
+ * of the queue when the hardware informs us that an element has been
+ * completed.
+ *
+ * All accesses to the queue are mediated by a spinlock
+ * (ring->execlist_lock).
+ */
+
+ /** Execlist link in the submission queue.*/
+ struct list_head execlist_link;
+
+ /** Execlists workqueue for processing this request in a bottom half */
+ struct work_struct work;
+
+ /** Execlists no. of times this request has been sent to the ELSP */
+ int elsp_submitted;
+
};
void i915_gem_request_free(struct kref *req_ref);
@@ -2658,14 +2658,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
+ struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(submit_req->request->ctx);
+ i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
}
@@ -405,8 +405,8 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
- struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
+ struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
assert_spin_locked(&ring->execlist_lock);
@@ -418,7 +418,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
execlist_link) {
if (!req0) {
req0 = cursor;
- } else if (req0->request->ctx == cursor->request->ctx) {
+ } else if (req0->ctx == cursor->ctx) {
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
@@ -434,9 +434,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
WARN_ON(req1 && req1->elsp_submitted);
- execlists_submit_contexts(ring, req0->request->ctx, req0->request->tail,
- req1 ? req1->request->ctx : NULL,
- req1 ? req1->request->tail : 0);
+ execlists_submit_contexts(ring, req0->ctx, req0->tail,
+ req1 ? req1->ctx : NULL,
+ req1 ? req1->tail : 0);
req0->elsp_submitted++;
if (req1)
@@ -446,17 +446,17 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct intel_ctx_submit_request *head_req;
+ struct drm_i915_gem_request *head_req;
assert_spin_locked(&ring->execlist_lock);
head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
if (head_req != NULL) {
struct drm_i915_gem_object *ctx_obj =
- head_req->request->ctx->engine[ring->id].state;
+ head_req->ctx->engine[ring->id].state;
if (intel_execlists_ctx_id(ctx_obj) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
@@ -538,17 +538,11 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
u32 tail,
struct drm_i915_gem_request *request)
{
- struct intel_ctx_submit_request *req = NULL, *cursor;
+ struct drm_i915_gem_request *cursor;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
int num_elements = 0;
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (req == NULL)
- return -ENOMEM;
- req->ctx = to;
- i915_gem_context_reference(req->ctx);
-
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
@@ -566,9 +560,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
} else {
WARN_ON(to != request->ctx);
}
- req->request = request;
i915_gem_request_reference(request);
- i915_gem_context_reference(req->request->ctx);
+ i915_gem_context_reference(request->ctx);
intel_runtime_pm_get(dev_priv);
@@ -579,13 +572,13 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
break;
if (num_elements > 2) {
- struct intel_ctx_submit_request *tail_req;
+ struct drm_i915_gem_request *tail_req;
tail_req = list_last_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
+ struct drm_i915_gem_request,
execlist_link);
- if (to == tail_req->request->ctx) {
+ if (to == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
@@ -594,7 +587,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
}
}
- list_add_tail(&req->execlist_link, &ring->execlist_queue);
+ list_add_tail(&request->execlist_link, &ring->execlist_queue);
if (num_elements == 0)
execlists_context_unqueue(ring);
@@ -763,7 +756,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
- struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_gem_request *req, *tmp;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct list_head retired_list;
@@ -778,7 +771,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
spin_unlock_irqrestore(&ring->execlist_lock, flags);
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
- struct intel_context *ctx = req->request->ctx;
+ struct intel_context *ctx = req->ctx;
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
@@ -786,9 +779,8 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(ctx);
- i915_gem_request_unreference(req->request);
+ i915_gem_request_unreference(req);
list_del(&req->execlist_link);
- kfree(req);
}
}
Move all remaining elements that were unique to execlists queue items in to the associated request. Issue: VIZ-4274 v2: Rebase. Fixed issue of overzealous freeing of request. Signed-off-by: Nick Hoath <nicholas.hoath@intel.com> --- drivers/gpu/drm/i915/i915_debugfs.c | 8 +++---- drivers/gpu/drm/i915/i915_drv.h | 24 ++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem.c | 6 ++--- drivers/gpu/drm/i915/intel_lrc.c | 44 +++++++++++++++---------------------- 4 files changed, 49 insertions(+), 33 deletions(-)