@@ -662,9 +662,8 @@ static void port_assign(struct execlist_port *port,
static bool i915_guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlist * const el = &engine->execlist;
- struct execlist_port *port = el->port;
- const struct execlist_port * const last_port =
- &el->port[el->port_mask];
+ struct execlist_port *port = execlist_port_head(el);
+ const struct execlist_port * const last_port = execlist_port_tail(el);
struct drm_i915_gem_request *last = port_request(port);
struct rb_node *rb;
bool submit = false;
@@ -686,7 +685,8 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+
+ port = execlist_port_next(el, port);
}
INIT_LIST_HEAD(&rq->priotree.link);
@@ -717,9 +717,8 @@ static void i915_guc_irq_handler(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlist * const el = &engine->execlist;
- struct execlist_port *port = el->port;
- const struct execlist_port * const last_port =
- &el->port[el->port_mask];
+ struct execlist_port *port = execlist_port_head(el);
+ const struct execlist_port * const last_port = execlist_port_tail(el);
struct drm_i915_gem_request *rq;
bool submit;
@@ -729,7 +728,7 @@ static void i915_guc_irq_handler(unsigned long data)
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- execlist_port_complete(el, port);
+ port = execlist_port_complete(el, port);
rq = port_request(port);
}
@@ -1312,7 +1312,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
bool tasklet = false;
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
- if (port_count(&el->port[0])) {
+ if (port_count(execlist_port_head(el))) {
__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet = true;
}
@@ -1334,7 +1334,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false;
/* Both ports drained, no more ELSP submission? */
- if (port_request(&engine->execlist.port[0]))
+ if (port_request(execlist_port_head(&engine->execlist)))
return false;
/* ELSP is empty, but there are ready requests? */
@@ -400,9 +400,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *last;
struct intel_engine_execlist * const el = &engine->execlist;
- struct execlist_port *port = el->port;
- const struct execlist_port * const last_port =
- &el->port[el->port_mask];
+ struct execlist_port *port = execlist_port_head(el);
+ const struct execlist_port * const last_port = execlist_port_tail(el);
struct rb_node *rb;
bool submit = false;
@@ -486,7 +485,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+
+ port = execlist_port_next(el, port);
GEM_BUG_ON(port_isset(port));
}
@@ -516,11 +516,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists_submit_ports(engine);
}
-static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
+static bool execlists_elsp_ready(struct intel_engine_execlist * const el)
{
- const struct execlist_port *port = engine->execlist.port;
+ struct execlist_port * const port = execlist_port_head(el);
- return port_count(&port[0]) + port_count(&port[1]) < 2;
+ return port_count(port) + port_count(execlist_port_next(el, port)) < 2;
}
/*
@@ -531,7 +531,7 @@ static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlist * const el = &engine->execlist;
- struct execlist_port *port = el->port;
+ struct execlist_port *port = execlist_port_head(el);
struct drm_i915_private *dev_priv = engine->i915;
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -613,7 +613,7 @@ static void intel_lrc_irq_handler(unsigned long data)
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- execlist_port_complete(el, port);
+ port = execlist_port_complete(el, port);
} else {
port_set(port, port_pack(rq, count));
}
@@ -627,7 +627,7 @@ static void intel_lrc_irq_handler(unsigned long data)
csb_mmio);
}
- if (execlists_elsp_ready(engine))
+ if (execlists_elsp_ready(el))
execlists_dequeue(engine);
intel_uncore_forcewake_put(dev_priv, el->fw_domains);
@@ -701,6 +701,7 @@ insert_request(struct intel_engine_cs *engine,
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ struct intel_engine_execlist * const el = &engine->execlist;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -709,11 +710,11 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
if (insert_request(engine,
&request->priotree,
request->priotree.priority)) {
- if (execlists_elsp_ready(engine))
- tasklet_hi_schedule(&engine->execlist.irq_tasklet);
+ if (execlists_elsp_ready(el))
+ tasklet_hi_schedule(&el->irq_tasklet);
}
- GEM_BUG_ON(!engine->execlist.first);
+ GEM_BUG_ON(!el->first);
GEM_BUG_ON(list_empty(&request->priotree.link));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -1333,7 +1334,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
struct intel_engine_execlist * const el = &engine->execlist;
- struct execlist_port *port = el->port;
+ struct execlist_port *port = execlist_port_head(el);
struct intel_context *ce;
/*
@@ -244,6 +244,11 @@ struct intel_engine_execlist {
unsigned int port_mask;
/**
+ * @port_head: first used execlist port
+ */
+ unsigned int port_head;
+
+ /**
* @queue: queue of requests, in priority lists
*/
struct rb_root queue;
@@ -505,23 +510,44 @@ struct intel_engine_cs {
#define for_each_execlist_port(el__, port__, n__) \
for ((n__) = 0; \
- (port__) = &(el__)->port[__port_idx(0, (n__), (el__)->port_mask)], (n__) < (el__)->port_mask + 1; \
+ (port__) = &(el__)->port[__port_idx((el__)->port_head, (n__), (el__)->port_mask)], (n__) < (el__)->port_mask + 1; \
(n__)++)
#define for_each_execlist_port_reverse(el__, port__, n__) \
for ((n__) = (el__)->port_mask + 1; \
- (port__) = &(el__)->port[__port_idx((el__)->port_mask, (n__), (el__)->port_mask)], (n__)--;)
+ (port__) = &(el__)->port[__port_idx((el__)->port_head - 1, (n__), (el__)->port_mask)], (n__)--;)
-static inline void
+static inline struct execlist_port *
+execlist_port_head(struct intel_engine_execlist * const el)
+{
+ return &el->port[el->port_head];
+}
+
+static inline struct execlist_port *
+execlist_port_tail(struct intel_engine_execlist * const el)
+{
+ return &el->port[__port_idx(el->port_head, el->port_mask, el->port_mask)];
+}
+
+static inline struct execlist_port *
+execlist_port_next(struct intel_engine_execlist * const el,
+ const struct execlist_port * const port)
+{
+ const unsigned int i = port_index(port, el);
+
+ return &el->port[__port_idx(i, 1, el->port_mask)];
+}
+
+static inline struct execlist_port *
execlist_port_complete(struct intel_engine_execlist * const el,
struct execlist_port * const port)
{
- const unsigned int m = el->port_mask;
+ GEM_DEBUG_BUG_ON(port_index(port, el) != el->port_head);
- GEM_DEBUG_BUG_ON(port_index(port, el) != 0);
+ memset(port, 0, sizeof(struct execlist_port));
+ el->port_head = __port_idx(el->port_head, 1, el->port_mask);
- memmove(port, port + 1, m * sizeof(struct execlist_port));
- memset(port + m, 0, sizeof(struct execlist_port));
+ return execlist_port_head(el);
}
void execlist_cancel_port_requests(struct intel_engine_execlist * const el);
Instead of trusting that first available port is at index 0, use accessor to hide this. This allows us to just move the head on port completion instead of memmoving the array. Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> --- drivers/gpu/drm/i915/i915_guc_submission.c | 15 ++++++----- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 29 +++++++++++----------- drivers/gpu/drm/i915/intel_ringbuffer.h | 40 ++++++++++++++++++++++++------ 5 files changed, 57 insertions(+), 31 deletions(-)