@@ -77,6 +77,28 @@
#define CTX_R_PWR_CLK_STATE 0x42
#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
+#define GEN8_CTX_VALID (1<<0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
+#define GEN8_CTX_FORCE_RESTORE (1<<2)
+#define GEN8_CTX_L3LLC_COHERENT (1<<5)
+#define GEN8_CTX_PRIVILEGE (1<<8)
+enum {
+ ADVANCED_CONTEXT=0,
+ LEGACY_CONTEXT,
+ ADVANCED_AD_CONTEXT,
+ LEGACY_64B_CONTEXT
+};
+#define GEN8_CTX_MODE_SHIFT 3
+enum {
+ FAULT_AND_HANG=0,
+ FAULT_AND_HALT, /* Debug only */
+ FAULT_AND_STREAM,
+ FAULT_AND_CONTINUE /* Unsupported */
+};
+#define GEN8_CTX_FAULT_SHIFT 6
+#define GEN8_CTX_LRCA_SHIFT 12
+#define GEN8_CTX_UNUSED_SHIFT 32
+
static inline u32 get_submission_id(struct i915_hw_context *ctx)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;
@@ -95,6 +117,68 @@ static inline u32 get_submission_id(struct i915_hw_context *ctx)
return submission_id;
}
+static inline uint64_t get_descriptor(struct i915_hw_context *ctx)
+{
+ uint64_t desc;
+ u32 submission_id = get_submission_id(ctx);
+
+ BUG_ON(i915_gem_obj_ggtt_offset(ctx->obj) & 0xFFFFFFFF00000000ULL);
+
+ desc = GEN8_CTX_VALID;
+ desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
+ desc |= i915_gem_obj_ggtt_offset(ctx->obj);
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+ desc |= (u64)submission_id << GEN8_CTX_UNUSED_SHIFT;
+ desc |= GEN8_CTX_PRIVILEGE;
+
+ /* TODO: WaDisableLiteRestore when we start using semaphore
+ * signalling between Command Streamers */
+ /* desc |= GEN8_CTX_FORCE_RESTORE; */
+
+ return desc;
+}
+
+static void submit_execlist(struct intel_engine *ring,
+ struct i915_hw_context *ctx0,
+ struct i915_hw_context *ctx1)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ uint64_t temp = 0;
+ uint32_t desc[4];
+
+ /* XXX: You must always write both descriptors in the order below. */
+ if (ctx1)
+ temp = get_descriptor(ctx1);
+ else
+ temp = 0;
+ desc[1] = (u32)(temp >> 32);
+ desc[0] = (u32)temp;
+
+ temp = get_descriptor(ctx0);
+ desc[3] = (u32)(temp >> 32);
+ desc[2] = (u32)temp;
+
+ I915_WRITE(RING_ELSP(ring), desc[1]);
+ I915_WRITE(RING_ELSP(ring), desc[0]);
+ I915_WRITE(RING_ELSP(ring), desc[3]);
+ /* The context is automatically loaded after the following */
+ I915_WRITE(RING_ELSP(ring), desc[2]);
+}
+
+static int gen8_switch_context(struct intel_engine *ring,
+ struct i915_hw_context *to0, u32 tail0,
+ struct i915_hw_context *to1, u32 tail1)
+{
+ BUG_ON(!i915_gem_obj_is_pinned(to0->obj));
+
+ if (to1)
+ BUG_ON(!i915_gem_obj_is_pinned(to1->obj));
+
+ submit_execlist(ring, to0, to1);
+
+ return 0;
+}
+
void gen8_gem_context_free(struct i915_hw_context *ctx)
{
/* Global default contexts ringbuffers are take care of