diff mbox

[37/62] drm/i915: Unify request submission

Message ID 1464971847-15809-38-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 3, 2016, 4:37 p.m. UTC
Move request submission from emit_request into its own common vfunc
from i915_add_request().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_request.c    |  8 +++---
 drivers/gpu/drm/i915/i915_guc_submission.c |  4 +--
 drivers/gpu/drm/i915/intel_guc.h           |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 13 +++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 39 ++++++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 23 +++++++++---------
 6 files changed, 41 insertions(+), 48 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5fef1c291b25..a55042ff7994 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -459,12 +459,10 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = intel_ring_get_tail(ring);
 
-	if (i915.enable_execlists)
-		ret = engine->emit_request(request);
-	else
-		ret = engine->add_request(request);
 	/* Not allowed to fail! */
+	ret = engine->emit_request(request);
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
+
 	/* Sanity check that the reserved size was large enough. */
 	ret = intel_ring_get_tail(ring) - request_start;
 	if (ret < 0)
@@ -475,6 +473,8 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 		  reserved_tail, ret);
 
 	i915_gem_mark_busy(request->i915, engine);
+
+	engine->submit_request(request);
 }
 
 static unsigned long local_clock_us(unsigned *cpu)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 8aa3cf8cac45..cc4792df249d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -562,7 +562,7 @@  static void guc_add_workqueue_item(struct i915_guc_client *gc,
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
  */
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
 	unsigned int engine_id = rq->engine->guc_id;
 	struct intel_guc *guc = &rq->i915->guc;
@@ -579,8 +579,6 @@  int i915_guc_submit(struct drm_i915_gem_request *rq)
 
 	guc->submissions[engine_id] += 1;
 	guc->last_seqno[engine_id] = rq->fence.seqno;
-
-	return b_ret;
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 41601c71f529..7f9063385258 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -159,7 +159,7 @@  extern int intel_guc_resume(struct drm_device *dev);
 int i915_guc_submission_init(struct drm_device *dev);
 int i915_guc_submission_enable(struct drm_device *dev);
 int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
+void i915_guc_submit(struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_device *dev);
 void i915_guc_submission_fini(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 71960e47277c..eee9274f7516 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -751,12 +751,6 @@  intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 */
 	request->previous_context = engine->last_context;
 	engine->last_context = request->ctx;
-
-	if (i915.enable_guc_submission)
-		i915_guc_submit(request);
-	else
-		execlists_context_queue(request);
-
 	return 0;
 }
 
@@ -1834,8 +1828,13 @@  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
 	engine->init_hw = gen8_init_common_ring;
-	engine->emit_request = gen8_emit_request;
 	engine->emit_flush = gen8_emit_flush;
+	engine->emit_request = gen8_emit_request;
+	if (i915.enable_guc_submission)
+		engine->submit_request = i915_guc_submit;
+	else
+		engine->submit_request = execlists_context_queue;
+
 	engine->irq_enable = gen8_logical_ring_enable_irq;
 	engine->irq_disable = gen8_logical_ring_disable_irq;
 	engine->emit_bb_start = gen8_emit_bb_start;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index db38abddfec1..b7b5c2d94db5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1341,15 +1341,14 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 }
 
 /**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1370,13 +1369,10 @@  gen6_add_request(struct drm_i915_gem_request *req)
 
 	req->tail = intel_ring_get_tail(ring);
 
-	req->engine->submit_request(req);
-
 	return 0;
 }
 
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
@@ -1400,8 +1396,9 @@  gen8_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
-	req->engine->submit_request(req);
+	req->tail = intel_ring_get_tail(ring);
 
 	return 0;
 }
@@ -1609,8 +1606,7 @@  bsd_ring_flush(struct drm_i915_gem_request *req,
 	return 0;
 }
 
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1627,8 +1623,6 @@  i9xx_add_request(struct drm_i915_gem_request *req)
 
 	req->tail = intel_ring_get_tail(ring);
 
-	req->engine->submit_request(req);
-
 	return 0;
 }
 
@@ -2630,7 +2624,7 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 		}
 
 		engine->init_context = intel_rcs_ctx_init;
-		engine->add_request = gen8_render_add_request;
+		engine->emit_request = gen8_render_emit_request;
 		engine->emit_flush = gen8_render_ring_flush;
 		engine->irq_enable = gen8_ring_enable_irq;
 		engine->irq_disable = gen8_ring_disable_irq;
@@ -2643,7 +2637,7 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 		}
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->add_request = gen6_add_request;
+		engine->emit_request = gen6_emit_request;
 		engine->emit_flush = gen7_render_ring_flush;
 		if (IS_GEN6(dev_priv))
 			engine->emit_flush = gen6_render_ring_flush;
@@ -2673,14 +2667,14 @@  int intel_init_render_ring_buffer(struct drm_device *dev)
 			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 		}
 	} else if (IS_GEN5(dev_priv)) {
-		engine->add_request = i9xx_add_request;
+		engine->emit_request = i9xx_emit_request;
 		engine->emit_flush = gen4_render_ring_flush;
 		engine->irq_enable = gen5_ring_enable_irq;
 		engine->irq_disable = gen5_ring_disable_irq;
 		engine->irq_seqno_barrier = gen5_seqno_barrier;
 		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 	} else {
-		engine->add_request = i9xx_add_request;
+		engine->emit_request = i9xx_emit_request;
 		if (INTEL_GEN(dev_priv) < 4)
 			engine->emit_flush = gen2_render_ring_flush;
 		else
@@ -2745,7 +2739,7 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 		if (IS_GEN6(dev_priv))
 			engine->submit_request = gen6_bsd_submit_request;
 		engine->emit_flush = gen6_bsd_ring_flush;
-		engine->add_request = gen6_add_request;
+		engine->emit_request = gen6_emit_request;
 		engine->irq_seqno_barrier = gen6_seqno_barrier;
 		if (INTEL_GEN(dev_priv) >= 8) {
 			engine->irq_enable_mask =
@@ -2781,7 +2775,7 @@  int intel_init_bsd_ring_buffer(struct drm_device *dev)
 	} else {
 		engine->mmio_base = BSD_RING_BASE;
 		engine->emit_flush = bsd_ring_flush;
-		engine->add_request = i9xx_add_request;
+		engine->emit_request = i9xx_emit_request;
 		if (IS_GEN5(dev_priv)) {
 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
 			engine->irq_enable = gen5_ring_enable_irq;
@@ -2813,8 +2807,9 @@  int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 	engine->hw_id = 4;
 
 	engine->mmio_base = GEN8_BSD2_RING_BASE;
+
 	engine->emit_flush = gen6_bsd_ring_flush;
-	engine->add_request = gen6_add_request;
+	engine->emit_request = gen6_emit_request;
 	engine->submit_request = i9xx_submit_request;
 
 	engine->irq_seqno_barrier = gen6_seqno_barrier;
@@ -2844,8 +2839,9 @@  int intel_init_blt_ring_buffer(struct drm_device *dev)
 	engine->hw_id = 2;
 
 	engine->mmio_base = BLT_RING_BASE;
+
 	engine->emit_flush = gen6_ring_flush;
-	engine->add_request = gen6_add_request;
+	engine->emit_request = gen6_emit_request;
 	engine->submit_request = i9xx_submit_request;
 
 	engine->irq_seqno_barrier = gen6_seqno_barrier;
@@ -2903,8 +2899,9 @@  int intel_init_vebox_ring_buffer(struct drm_device *dev)
 	engine->hw_id = 3;
 
 	engine->mmio_base = VEBOX_RING_BASE;
+
 	engine->emit_flush = gen6_ring_flush;
-	engine->add_request = gen6_add_request;
+	engine->emit_request = gen6_emit_request;
 	engine->submit_request = i9xx_submit_request;
 
 	engine->irq_seqno_barrier = gen6_seqno_barrier;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 647cc51e6457..2eb12d92d112 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -212,7 +212,17 @@  struct intel_engine_cs {
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	int		(*add_request)(struct drm_i915_gem_request *req);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
+				      u32 invalidate_domains,
+				      u32 flush_domains);
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
+					 u64 offset, u32 length,
+					 unsigned dispatch_flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
+	int		(*emit_request)(struct drm_i915_gem_request *req);
+	void		(*submit_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -290,17 +300,6 @@  struct intel_engine_cs {
 	unsigned int idle_lite_restore_wa;
 	bool disable_lite_restore_wa;
 	u32 ctx_desc_template;
-	int		(*emit_request)(struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct drm_i915_gem_request *request,
-				      u32 invalidate_domains,
-				      u32 flush_domains);
-	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, u32 length,
-					 unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
-	void		(*submit_request)(struct drm_i915_gem_request *req);
 
 	/**
 	 * List of objects currently involved in rendering from the