@@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
+#define I915_GEM_HWS_GGTT_BIND 0x46
+#define I915_GEM_HWS_GGTT_BIND_ADDR (I915_GEM_HWS_GGTT_BIND * sizeof(u32))
#define I915_GEM_HWS_PXP 0x60
#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
#define I915_GEM_HWS_GSC 0x62
@@ -1418,6 +1418,20 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
intel_context_put(ce);
}
+static struct intel_context *
+create_ggtt_bind_context(struct intel_engine_cs *engine)
+{
+ static struct lock_class_key kernel;
+
+ /*
+ * MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple
+ * bind requets at a time so get a bigger ring.
+ */
+ return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
+ I915_GEM_HWS_GGTT_BIND_ADDR,
+ &kernel, "ggtt_bind_context");
+}
+
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
@@ -1441,7 +1455,7 @@ create_kernel_context(struct intel_engine_cs *engine)
*/
static int engine_init_common(struct intel_engine_cs *engine)
{
- struct intel_context *ce;
+ struct intel_context *ce, *bce = NULL;
int ret;
engine->set_default_submission(engine);
@@ -1457,17 +1471,33 @@ static int engine_init_common(struct intel_engine_cs *engine)
ce = create_kernel_context(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ /*
+ * Create a separate pinned context for GGTT update with blitter engine
+ * if a platform require such service. MI_UPDATE_GTT works on other
+ * engines as well but BCS should be less busy engine so pick that for
+ * GGTT updates.
+ */
+ if (engine->id == BCS0) {
+ bce = create_ggtt_bind_context(engine);
+ if (IS_ERR(bce)) {
+ ret = PTR_ERR(bce);
+ goto err_ce_context;
+ }
+ }
ret = measure_breadcrumb_dw(ce);
if (ret < 0)
- goto err_context;
+ goto err_bce_context;
engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
+ engine->bind_context = bce;
return 0;
-err_context:
+err_bce_context:
+ intel_engine_destroy_pinned_context(bce);
+err_ce_context:
intel_engine_destroy_pinned_context(ce);
return ret;
}
@@ -1537,6 +1567,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->kernel_context)
intel_engine_destroy_pinned_context(engine->kernel_context);
+ if (engine->bind_context)
+ intel_engine_destroy_pinned_context(engine->bind_context);
+
+
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
cleanup_status_page(engine);
@@ -416,6 +416,9 @@ struct intel_engine_cs {
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
+ struct intel_context *bind_context; /* pinned, only for BCS0 */
+ /* mark the bind context's availability status */
+ bool bind_context_ready;
/**
* pinned_contexts_list: List of pinned contexts. This list is only
@@ -1024,3 +1024,52 @@ bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
}
+
+static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready)
+{
+ struct intel_engine_cs *engine = gt->engine[BCS0];
+
+ if (engine && engine->bind_context)
+ engine->bind_context_ready = ready;
+}
+
+/**
+ * intel_gt_bind_context_set_ready - Set the context binding as ready
+ *
+ * @gt: GT structure
+ *
+ * This function marks the binder context as ready.
+ */
+void intel_gt_bind_context_set_ready(struct intel_gt *gt)
+{
+ __intel_gt_bind_context_set_ready(gt, true);
+}
+
+/**
+ * intel_gt_bind_context_set_unready - Set the context binding as ready
+ * @gt: GT structure
+ *
+ * This function marks the binder context as not ready.
+ */
+
+void intel_gt_bind_context_set_unready(struct intel_gt *gt)
+{
+ __intel_gt_bind_context_set_ready(gt, false);
+}
+
+/**
+ * intel_gt_is_bind_context_ready - Check if context binding is ready
+ *
+ * @gt: GT structure
+ *
+ * This function returns binder context's ready status.
+ */
+bool intel_gt_is_bind_context_ready(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine = gt->engine[BCS0];
+
+ if (engine)
+ return engine->bind_context_ready;
+
+ return false;
+}
@@ -176,4 +176,7 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
struct drm_i915_gem_object *obj,
bool always_coherent);
+void intel_gt_bind_context_set_ready(struct intel_gt *gt);
+void intel_gt_bind_context_set_unready(struct intel_gt *gt);
+bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
#endif /* __INTEL_GT_H__ */