@@ -2976,8 +2976,7 @@ void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
- struct intel_context *to);
+int i915_switch_context(struct drm_i915_gem_request *req);
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
@@ -2469,8 +2469,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
WARN_ON(request->batch_obj && obj);
request->batch_obj = obj;
- WARN_ON(request->ctx != ring->last_context);
-
request->emitted_jiffies = jiffies;
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
@@ -3109,7 +3107,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret)
return ret;
- ret = i915_switch_context(req->ring, ring->default_context);
+ ret = i915_switch_context(req);
if (ret) {
i915_gem_request_unreference(req);
return ret;
@@ -414,7 +414,7 @@ int i915_gem_context_enable(struct drm_i915_gem_request *req)
ret = ring->init_context(req->ring, ring->default_context);
} else
- ret = i915_switch_context(req->ring, ring->default_context);
+ ret = i915_switch_context(req);
if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
@@ -693,8 +693,7 @@ unpin_out:
/**
* i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -705,25 +704,25 @@ unpin_out:
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
-int i915_switch_context(struct intel_engine_cs *ring,
- struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
- if (to != ring->last_context) {
- i915_gem_context_reference(to);
+ if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+ if (req->ctx != ring->last_context) {
+ i915_gem_context_reference(req->ctx);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
- ring->last_context = to;
+ ring->last_context = req->ctx;
}
return 0;
}
- return do_switch(ring, to);
+ return do_switch(req->ring, req->ctx);
}
static bool contexts_enabled(struct drm_device *dev)
@@ -1190,7 +1190,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (ret)
goto error;
- ret = i915_switch_context(ring, params->ctx);
+ ret = i915_switch_context(params->request);
if (ret)
goto error;