@@ -330,6 +330,7 @@ struct intel_engine_cs {
atomic_t fw_active;
unsigned long context_tag;
+ unsigned long context_last;
struct rb_node uabi_node;
@@ -1335,6 +1335,21 @@ static void intel_context_update_runtime(struct intel_context *ce)
ce->runtime.total += dt;
}
+static unsigned int next_cyclic_tag(struct intel_engine_cs *engine)
+{
+ unsigned long tag, mask = ~0ul << engine->context_last;
+
+ /* Cyclically allocate unused ids, prevent immediate reuse of last */
+ tag = READ_ONCE(engine->context_tag);
+ tag = (tag & mask) ?: tag;
+ GEM_BUG_ON(tag == 0);
+
+ tag = __ffs(tag);
+ clear_bit(tag, &engine->context_tag);
+
+ return engine->context_last = tag + 1;
+}
+
static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -1355,12 +1370,9 @@ __execlists_schedule_in(struct i915_request *rq)
ce->lrc.ccid = ce->tag;
} else {
/* We don't need a strict matching tag, just different values */
- unsigned int tag = ffs(READ_ONCE(engine->context_tag));
+ unsigned int tag = next_cyclic_tag(engine);
- GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
- clear_bit(tag - 1, &engine->context_tag);
ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
-
BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
}
While we only release the context tag after we have processed the context-switch event away from the context, be paranoid in case that value remains live in HW and so avoid reusing the last tag for the next context after a brief idle. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ramalingam C <ramalingam.c@intel.com> --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 + drivers/gpu/drm/i915/gt/intel_lrc.c | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-)