diff mbox series

[RFC,2/5] drm/i915: Move struct intel_virtual_engine to its own header

Message ID 20191211211244.7831-3-daniele.ceraolospurio@intel.com (mailing list archive)
State New, archived
Headers show
Series Split up intel_lrc.c | expand

Commit Message

Daniele Ceraolo Spurio Dec. 11, 2019, 9:12 p.m. UTC
From: Matthew Brost <matthew.brost@intel.com>

The upcoming GuC submission code will need to use the structure, so
split it to its own file.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: John Harrison <john.c.harrison@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c           | 103 ++++++------------
 .../drm/i915/gt/intel_virtual_engine_types.h  |  57 ++++++++++
 2 files changed, 92 insertions(+), 68 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h

Comments

Chris Wilson Dec. 11, 2019, 9:22 p.m. UTC | #1
Quoting Daniele Ceraolo Spurio (2019-12-11 21:12:41)
> From: Matthew Brost <matthew.brost@intel.com>
> 
> The upcoming GuC submission code will need to use the structure, so
> split it to its own file.

There is no way this struct belongs anywhere else.

You want to add a few vfuncs to the context_ops so we can
abstract creation and manipulation.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 6d6148e11fd0..e6dea2d3a5c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -147,6 +147,7 @@ 
 #include "intel_mocs.h"
 #include "intel_reset.h"
 #include "intel_ring.h"
+#include "intel_virtual_engine_types.h"
 #include "intel_workarounds.h"
 
 #define RING_EXECLIST_QFULL		(1 << 0x2)
@@ -180,52 +181,11 @@ 
 #define WA_TAIL_DWORDS 2
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
-struct virtual_engine {
-	struct intel_engine_cs base;
-	struct intel_context context;
-
-	/*
-	 * We allow only a single request through the virtual engine at a time
-	 * (each request in the timeline waits for the completion fence of
-	 * the previous before being submitted). By restricting ourselves to
-	 * only submitting a single request, each request is placed on to a
-	 * physical to maximise load spreading (by virtue of the late greedy
-	 * scheduling -- each real engine takes the next available request
-	 * upon idling).
-	 */
-	struct i915_request *request;
-
-	/*
-	 * We keep a rbtree of available virtual engines inside each physical
-	 * engine, sorted by priority. Here we preallocate the nodes we need
-	 * for the virtual engine, indexed by physical_engine->id.
-	 */
-	struct ve_node {
-		struct rb_node rb;
-		int prio;
-	} nodes[I915_NUM_ENGINES];
-
-	/*
-	 * Keep track of bonded pairs -- restrictions upon on our selection
-	 * of physical engines any particular request may be submitted to.
-	 * If we receive a submit-fence from a master engine, we will only
-	 * use one of sibling_mask physical engines.
-	 */
-	struct ve_bond {
-		const struct intel_engine_cs *master;
-		intel_engine_mask_t sibling_mask;
-	} *bonds;
-	unsigned int num_bonds;
-
-	/* And finally, which physical engines this virtual engine maps onto. */
-	unsigned int num_siblings;
-	struct intel_engine_cs *siblings[0];
-};
-
-static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+static struct intel_virtual_engine *
+to_virtual_engine(struct intel_engine_cs *engine)
 {
 	GEM_BUG_ON(!intel_engine_is_virtual(engine));
-	return container_of(engine, struct virtual_engine, base);
+	return container_of(engine, struct intel_virtual_engine, base);
 }
 
 static int lr_context_alloc(struct intel_context *ce,
@@ -384,7 +344,7 @@  static inline bool need_preempt(const struct intel_engine_cs *engine,
 		return true;
 
 	if (rb) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		bool preempt = false;
 
@@ -1144,7 +1104,8 @@  execlists_schedule_in(struct i915_request *rq, int idx)
 
 static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	struct i915_request *next = READ_ONCE(ve->request);
 
 	if (next && next->execution_mask & ~rq->execution_mask)
@@ -1448,7 +1409,7 @@  static void virtual_update_register_offsets(u32 *regs,
 	set_offsets(regs, reg_offsets(engine), engine);
 }
 
-static bool virtual_matches(const struct virtual_engine *ve,
+static bool virtual_matches(const struct intel_virtual_engine *ve,
 			    const struct i915_request *rq,
 			    const struct intel_engine_cs *engine)
 {
@@ -1473,7 +1434,7 @@  static bool virtual_matches(const struct virtual_engine *ve,
 	return true;
 }
 
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
+static void virtual_xfer_breadcrumbs(struct intel_virtual_engine *ve,
 				     struct intel_engine_cs *engine)
 {
 	struct intel_engine_cs *old = ve->siblings[0];
@@ -1670,7 +1631,7 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 	 */
 
 	for (rb = rb_first_cached(&execlists->virtual); rb; ) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq = READ_ONCE(ve->request);
 
@@ -1786,7 +1747,7 @@  static void execlists_dequeue(struct intel_engine_cs *engine)
 	}
 
 	while (rb) { /* XXX virtual is always taking precedence */
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq;
 
@@ -3237,7 +3198,7 @@  static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	/* Cancel all attached virtual engines */
 	while ((rb = rb_first_cached(&execlists->virtual))) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 
 		rb_erase_cached(rb, &execlists->virtual);
@@ -4198,14 +4159,14 @@  static int lr_context_alloc(struct intel_context *ce,
 	return ret;
 }
 
-static struct list_head *virtual_queue(struct virtual_engine *ve)
+static struct list_head *virtual_queue(struct intel_virtual_engine *ve)
 {
 	return &ve->base.execlists.default_priolist.requests[0];
 }
 
 static void virtual_context_destroy(struct kref *kref)
 {
-	struct virtual_engine *ve =
+	struct intel_virtual_engine *ve =
 		container_of(kref, typeof(*ve), context.ref);
 	unsigned int n;
 
@@ -4239,7 +4200,7 @@  static void virtual_context_destroy(struct kref *kref)
 	kfree(ve);
 }
 
-static void virtual_engine_initial_hint(struct virtual_engine *ve)
+static void virtual_engine_initial_hint(struct intel_virtual_engine *ve)
 {
 	int swp;
 
@@ -4268,7 +4229,8 @@  static void virtual_engine_initial_hint(struct virtual_engine *ve)
 
 static int virtual_context_pin(struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	int err;
 
 	/* Note: we must use a real engine class for setting up reg state */
@@ -4282,7 +4244,8 @@  static int virtual_context_pin(struct intel_context *ce)
 
 static void virtual_context_enter(struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	unsigned int n;
 
 	for (n = 0; n < ve->num_siblings; n++)
@@ -4293,7 +4256,8 @@  static void virtual_context_enter(struct intel_context *ce)
 
 static void virtual_context_exit(struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	unsigned int n;
 
 	intel_timeline_exit(ce->timeline);
@@ -4312,7 +4276,8 @@  static const struct intel_context_ops virtual_context_ops = {
 	.destroy = virtual_context_destroy,
 };
 
-static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+static intel_engine_mask_t
+virtual_submission_mask(struct intel_virtual_engine *ve)
 {
 	struct i915_request *rq;
 	intel_engine_mask_t mask;
@@ -4339,7 +4304,8 @@  static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
 
 static void virtual_submission_tasklet(unsigned long data)
 {
-	struct virtual_engine * const ve = (struct virtual_engine *)data;
+	struct intel_virtual_engine * const ve =
+		(struct intel_virtual_engine *)data;
 	const int prio = ve->base.execlists.queue_priority_hint;
 	intel_engine_mask_t mask;
 	unsigned int n;
@@ -4419,7 +4385,7 @@  static void virtual_submission_tasklet(unsigned long data)
 
 static void virtual_submit_request(struct i915_request *rq)
 {
-	struct virtual_engine *ve = to_virtual_engine(rq->engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(rq->engine);
 	struct i915_request *old;
 	unsigned long flags;
 
@@ -4458,7 +4424,7 @@  static void virtual_submit_request(struct i915_request *rq)
 }
 
 static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
+virtual_find_bond(struct intel_virtual_engine *ve,
 		  const struct intel_engine_cs *master)
 {
 	int i;
@@ -4474,7 +4440,7 @@  virtual_find_bond(struct virtual_engine *ve,
 static void
 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 {
-	struct virtual_engine *ve = to_virtual_engine(rq->engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(rq->engine);
 	intel_engine_mask_t allowed, exec;
 	struct ve_bond *bond;
 
@@ -4498,7 +4464,7 @@  intel_execlists_create_virtual(struct i915_gem_context *ctx,
 			       struct intel_engine_cs **siblings,
 			       unsigned int count)
 {
-	struct virtual_engine *ve;
+	struct intel_virtual_engine *ve;
 	unsigned int n;
 	int err;
 
@@ -4639,7 +4605,7 @@  struct intel_context *
 intel_execlists_clone_virtual(struct i915_gem_context *ctx,
 			      struct intel_engine_cs *src)
 {
-	struct virtual_engine *se = to_virtual_engine(src);
+	struct intel_virtual_engine *se = to_virtual_engine(src);
 	struct intel_context *dst;
 
 	dst = intel_execlists_create_virtual(ctx,
@@ -4649,7 +4615,8 @@  intel_execlists_clone_virtual(struct i915_gem_context *ctx,
 		return dst;
 
 	if (se->num_bonds) {
-		struct virtual_engine *de = to_virtual_engine(dst->engine);
+		struct intel_virtual_engine *de =
+			to_virtual_engine(dst->engine);
 
 		de->bonds = kmemdup(se->bonds,
 				    sizeof(*se->bonds) * se->num_bonds,
@@ -4669,7 +4636,7 @@  int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 				     const struct intel_engine_cs *master,
 				     const struct intel_engine_cs *sibling)
 {
-	struct virtual_engine *ve = to_virtual_engine(engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(engine);
 	struct ve_bond *bond;
 	int n;
 
@@ -4705,7 +4672,7 @@  struct intel_engine_cs *
 intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
 				 unsigned int sibling)
 {
-	struct virtual_engine *ve = to_virtual_engine(engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(engine);
 
 	if (sibling >= ve->num_siblings)
 		return NULL;
@@ -4773,7 +4740,7 @@  void intel_execlists_show_requests(struct intel_engine_cs *engine,
 	last = NULL;
 	count = 0;
 	for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq = READ_ONCE(ve->request);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h b/drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h
new file mode 100644
index 000000000000..9ba5f0e6e395
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h
@@ -0,0 +1,57 @@ 
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VIRTUAL_ENGINE_TYPES__
+#define __INTEL_VIRTUAL_ENGINE_TYPES__
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+struct i915_request;
+
+struct intel_virtual_engine {
+	struct intel_engine_cs base;
+	struct intel_context context;
+
+	/*
+	 * We allow only a single request through the virtual engine at a time
+	 * (each request in the timeline waits for the completion fence of
+	 * the previous before being submitted). By restricting ourselves to
+	 * only submitting a single request, each request is placed on to a
+	 * physical to maximise load spreading (by virtue of the late greedy
+	 * scheduling -- each real engine takes the next available request
+	 * upon idling).
+	 */
+	struct i915_request *request;
+
+	/*
+	 * We keep a rbtree of available virtual engines inside each physical
+	 * engine, sorted by priority. Here we preallocate the nodes we need
+	 * for the virtual engine, indexed by physical_engine->id.
+	 */
+	struct ve_node {
+		struct rb_node rb;
+		int prio;
+	} nodes[I915_NUM_ENGINES];
+
+	/*
+	 * Keep track of bonded pairs -- restrictions upon on our selection
+	 * of physical engines any particular request may be submitted to.
+	 * If we receive a submit-fence from a master engine, we will only
+	 * use one of sibling_mask physical engines.
+	 */
+	struct ve_bond {
+		const struct intel_engine_cs *master;
+		intel_engine_mask_t sibling_mask;
+	} *bonds;
+	unsigned int num_bonds;
+
+	/* And finally, which physical engines this virtual engine maps onto. */
+	unsigned int num_siblings;
+	struct intel_engine_cs *siblings[0];
+};
+
+#endif /* __INTEL_VIRTUAL_ENGINE_TYPES__ */