@@ -1296,18 +1296,22 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
/* The mutex must be acquired before calling this function */
WARN_ON(!mutex_is_locked(¶ms->dev->struct_mutex));
+ ret = intel_ring_reserve_space(req);
+ if (ret)
+ goto error;
+
/*
* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
ret = intel_ring_invalidate_all_caches(req);
if (ret)
- return ret;
+ goto error;
/* Switch to the correct context for the batch */
ret = i915_switch_context(req);
if (ret)
- return ret;
+ goto error;
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
"%s didn't clear reload\n", engine->name);
@@ -1316,7 +1320,7 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
params->instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(req, 4);
if (ret)
- return ret;
+ goto error;
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
@@ -1330,7 +1334,7 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
if (params->args_flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(params->dev, req);
if (ret)
- return ret;
+ goto error;
}
exec_len = params->args_batch_len;
@@ -1344,13 +1348,17 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
exec_start, exec_len,
params->dispatch_flags);
if (ret)
- return ret;
+ goto error;
trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
i915_gem_execbuffer_retire_commands(params);
- return 0;
+error:
+ if (ret)
+ intel_ring_reserved_space_cancel(req->ringbuf);
+
+ return ret;
}
/**
@@ -519,6 +519,8 @@ static int i915_scheduler_queue_execbuffer_bypass(struct i915_scheduler_queue_en
struct i915_scheduler *scheduler = dev_priv->scheduler;
int ret;
+ intel_ring_reserved_space_cancel(qe->params.request->ringbuf);
+
scheduler->flags[qe->params.engine->id] |= I915_SF_SUBMITTING;
ret = dev_priv->gt.execbuf_final(&qe->params);
scheduler->flags[qe->params.engine->id] &= ~I915_SF_SUBMITTING;
@@ -584,6 +586,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
node->stamp = jiffies;
i915_gem_request_reference(node->params.request);
+ intel_ring_reserved_space_cancel(node->params.request->ringbuf);
+
WARN_ON(node->params.request->scheduler_qe);
node->params.request->scheduler_qe = node;
@@ -1010,13 +1010,17 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
/* The mutex must be acquired before calling this function */
WARN_ON(!mutex_is_locked(¶ms->dev->struct_mutex));
+ ret = intel_logical_ring_reserve_space(req);
+ if (ret)
+ goto err;
+
/*
* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
ret = logical_ring_invalidate_all_caches(req);
if (ret)
- return ret;
+ goto err;
if (engine == &dev_priv->engine[RCS] &&
params->instp_mode != dev_priv->relative_constants_mode) {
@@ -1038,13 +1042,18 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
ret = engine->emit_bb_start(req, exec_start, params->dispatch_flags);
if (ret)
- return ret;
+ goto err;
trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
i915_gem_execbuffer_retire_commands(params);
return 0;
+
+err:
+ intel_ring_reserved_space_cancel(params->request->ringbuf);
+
+ return ret;
}
void intel_execlists_retire_requests(struct intel_engine_cs *engine)