diff mbox

[RFC,09/11] HACK drm/i915/preempt: Actually send the preemption request

Message ID 20170223191421.4502-5-michal.winiarski@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Michał Winiarski Feb. 23, 2017, 7:14 p.m. UTC
Now that we're able to post-process the preemption event, let's actually
send it to GuC. To identify that preemption has finished, we're using a
dummy request sent through kernel_context.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
---
 drivers/gpu/drm/i915/i915_guc_submission.c | 90 ++++++++++++++++++++++++++++--
 1 file changed, 86 insertions(+), 4 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 2a658b9..7ccc5b4 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -491,14 +491,96 @@  static int guc_ring_doorbell(struct i915_guc_client *client)
 	return ret;
 }
 
-static int i915_guc_preempt_noop(struct intel_engine_cs *engine)
+static int add_preemption_workitem(struct intel_engine_cs *engine)
 {
-	engine->preempt_requested = false;
-	intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_guc *guc = &engine->i915->guc;
+	struct i915_gem_context *ctx = engine->i915->kernel_context;
+	struct i915_guc_client *client = guc->client[PREEMPT];
+	struct drm_i915_gem_request rq;
+	u32 *cs;
+
+	rq.i915 = engine->i915;
+	rq.engine = engine;
+	rq.ctx = ctx;
+	rq.global_seqno = 0;
+	rq.file_priv = NULL;
+	rq.batch = NULL;
+	rq.reserved_space = 32;
+	rq.ring = ctx->engine[engine->id].ring;
+
+	if (engine->id == RCS) {
+		cs = intel_ring_begin(&rq, 8);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
+			PIPE_CONTROL_CS_STALL |
+			PIPE_CONTROL_QW_WRITE;
+		*cs++ = engine->status_page.ggtt_offset +
+			I915_GEM_HWS_PREEMPT_ADDR;
+		*cs++ = 0;
+		*cs++ = 1;
+		*cs++ = 0;
+		*cs++ = MI_USER_INTERRUPT;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(&rq, cs);
+	} else {
+		cs = intel_ring_begin(&rq, 6);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+		*cs++ = (engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR) |
+		        MI_FLUSH_DW_USE_GTT;
+		*cs++ = 0;
+		*cs++ = 1;
+		*cs++ = MI_USER_INTERRUPT;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(&rq, cs);
+	}
+	rq.tail = intel_ring_offset(&rq, cs);
+
+	spin_lock_irq(&client->wq_lock);
+	client->wq_rsvd += sizeof(struct guc_wq_item);
+	guc_wq_item_append(client, &rq);
+	if (i915_vma_is_map_and_fenceable(rq.ring->vma))
+		POSTING_READ_FW(GUC_STATUS);
+	spin_unlock_irq(&client->wq_lock);
 
 	return 0;
 }
 
+static int i915_guc_preempt(struct intel_engine_cs *engine)
+{
+	struct intel_guc *guc = &engine->i915->guc;
+	struct i915_guc_client *client = guc->client[PREEMPT];
+	struct guc_process_desc *desc;
+	u32 data[7];
+	int ret;
+
+	ret = add_preemption_workitem(engine);
+	if (ret)
+		return ret;
+
+	desc = client->vaddr + client->proc_desc_offset;
+	desc->tail = client->wq_tail;
+
+	data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
+	data[1] = client->ctx_index;
+	data[2] = INTEL_GUC_PREEMPT_OPTION_IMMEDIATE |
+		  INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
+		  INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
+	data[3] = engine->guc_id;
+	data[4] = guc->client[NORMAL]->priority;
+	data[5] = guc->client[NORMAL]->ctx_index;
+	data[6] = engine->status_page.ggtt_offset +
+		  (LRC_GUCSHR_PN * PAGE_SIZE);
+
+	return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
 /**
  * __i915_guc_submit() - Submit commands through GuC
  * @rq:		request associated with the commands
@@ -1143,7 +1225,7 @@  int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 		engine->irq_tasklet.func = i915_guc_irq_handler;
 
 		if (i915.enable_preemption)
-			engine->preempt = i915_guc_preempt_noop;
+			engine->preempt = i915_guc_preempt;
 
 		/* Replay the current set of previously submitted requests */
 		spin_lock_irqsave(&engine->timeline->lock, flags);