@@ -606,6 +606,8 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
unsigned long flags;
int b_ret;
+ intel_gvt_notify_context_status(rq, INTEL_CONTEXT_SCHEDULE_IN);
+
/* WA to flush out the pending GMADR writes to ring buffer. */
if (i915_vma_is_map_and_fenceable(rq->ring->vma))
POSTING_READ_FW(GUC_STATUS);
@@ -724,6 +726,8 @@ static void i915_guc_irq_handler(unsigned long data)
rq = port[0].request;
while (rq && i915_gem_request_completed(rq)) {
trace_i915_gem_request_out(rq);
+ intel_gvt_notify_context_status(rq,
+ INTEL_CONTEXT_SCHEDULE_OUT);
i915_gem_request_put(rq);
port[0].request = port[1].request;
port[1].request = NULL;
@@ -32,6 +32,14 @@ void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
int intel_gvt_init_device(struct drm_i915_private *dev_priv);
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
+
+static inline void
+intel_gvt_notify_context_status(struct drm_i915_gem_request *rq,
+ unsigned long status)
+{
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
+}
#else
static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
@@ -40,6 +48,11 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
{
}
+static inline void
+intel_gvt_notify_context_status(struct drm_i915_gem_request *rq,
+ unsigned long status)
+{
+}
#endif
#endif /* _INTEL_GVT_H_ */
@@ -295,21 +295,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
return ctx->engine[engine->id].lrc_desc;
}
-static inline void
-execlists_context_status_change(struct drm_i915_gem_request *rq,
- unsigned long status)
-{
- /*
- * Only used when GVT-g is enabled now. When GVT-g is disabled,
- * The compiler should eliminate this function as dead-code.
- */
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return;
-
- atomic_notifier_call_chain(&rq->engine->context_status_notifier,
- status, rq);
-}
-
static void
execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
{
@@ -350,7 +335,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
GEM_BUG_ON(port[0].count > 1);
if (!port[0].count)
- execlists_context_status_change(port[0].request,
+ intel_gvt_notify_context_status(port[0].request,
INTEL_CONTEXT_SCHEDULE_IN);
desc[0] = execlists_update_context(port[0].request);
GEM_DEBUG_EXEC(port[0].context_id = upper_32_bits(desc[0]));
@@ -358,7 +343,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
if (port[1].request) {
GEM_BUG_ON(port[1].count);
- execlists_context_status_change(port[1].request,
+ intel_gvt_notify_context_status(port[1].request,
INTEL_CONTEXT_SCHEDULE_IN);
desc[1] = execlists_update_context(port[1].request);
GEM_DEBUG_EXEC(port[1].context_id = upper_32_bits(desc[1]));
@@ -574,7 +559,7 @@ static void intel_lrc_irq_handler(unsigned long data)
if (--port[0].count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(!i915_gem_request_completed(port[0].request));
- execlists_context_status_change(port[0].request,
+ intel_gvt_notify_context_status(port[0].request,
INTEL_CONTEXT_SCHEDULE_OUT);
trace_i915_gem_request_out(port[0].request);
GVT request needs a manual mmio load/restore. Before GuC submit a request, send notification to gvt for mmio loading. And after the GuC finished this GVT request, notify gvt again for mmio restore. This follows the usage when using execlists submission. Cc: xiao.zheng@intel.com Cc: kevin.tian@intel.com Cc: joonas.lahtinen@linux.intel.com Cc: chris@chris-wilson.co.uk Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com> --- drivers/gpu/drm/i915/i915_guc_submission.c | 4 ++++ drivers/gpu/drm/i915/intel_gvt.h | 13 +++++++++++++ drivers/gpu/drm/i915/intel_lrc.c | 21 +++------------------ 3 files changed, 20 insertions(+), 18 deletions(-)