@@ -2201,6 +2201,7 @@ struct drm_i915_gem_request {
/** GEM sequence number associated with this request. */
uint32_t seqno;
+ uint32_t reserved_seqno;
/* Unique identifier which can be used for trace points & debug */
uint32_t uniq;
@@ -2524,6 +2524,9 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
+ /* Why is the full re-initialisation required? Is it only for
+ * hardware semaphores? If so, could skip it in the case where
+ * semaphores are disabled? */
int ret = i915_gem_init_seqno(dev, 0);
if (ret)
return ret;
@@ -2581,6 +2584,12 @@ void __i915_add_request(struct drm_i915_gem_request *request,
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
+ /* Make the request's seqno 'live': */
+ if(!request->seqno) {
+ request->seqno = request->reserved_seqno;
+ WARN_ON(request->seqno != dev_priv->last_seqno);
+ }
+
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
@@ -2821,6 +2830,9 @@ void i915_gem_request_notify(struct intel_engine_cs *ring)
if (!complete)
continue;
} else {
+ /* How can this happen? */
+ WARN_ON(req->seqno == 0);
+
if (!i915_seqno_passed(seqno, req->seqno))
continue;
}
@@ -3009,7 +3021,14 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+ /*
+ * Assign an identifier to track this request through the hardware
+ * but don't make it live yet. It could change in the future if this
+ * request gets overtaken. However, it still needs to be allocated
+ * in advance because the point of submission must not fail and seqno
+ * allocation can fail.
+ */
+ ret = i915_gem_get_seqno(ring->dev, &req->reserved_seqno);
if (ret)
goto err;
@@ -1317,6 +1317,19 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
/* The mutex must be acquired before calling this function */
BUG_ON(!mutex_is_locked(¶ms->dev->struct_mutex));
+ /* Make sure the request's seqno is the latest and greatest: */
+ if(params->request->reserved_seqno != dev_priv->last_seqno) {
+ ret = i915_gem_get_seqno(ring->dev, ¶ms->request->reserved_seqno);
+ if (ret)
+ return ret;
+ }
+ /*
+ * And make it live because some of the execbuff submission code
+ * requires the seqno to be available up front. */
+ WARN_ON(params->request->seqno);
+ params->request->seqno = params->request->reserved_seqno;
+ WARN_ON(params->request->seqno != dev_priv->last_seqno);
+
ret = intel_ring_reserve_space(params->request);
if (ret)
return ret;
@@ -908,6 +908,19 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
/* The mutex must be acquired before calling this function */
BUG_ON(!mutex_is_locked(¶ms->dev->struct_mutex));
+ /* Make sure the request's seqno is the latest and greatest: */
+ if(params->request->reserved_seqno != dev_priv->last_seqno) {
+ ret = i915_gem_get_seqno(ring->dev, ¶ms->request->reserved_seqno);
+ if (ret)
+ return ret;
+ }
+ /*
+ * And make it live because some of the execbuff submission code
+ * requires the seqno to be available up front. */
+ WARN_ON(params->request->seqno);
+ params->request->seqno = params->request->reserved_seqno;
+ WARN_ON(params->request->seqno != dev_priv->last_seqno);
+
ret = intel_logical_ring_reserve_space(params->request);
if (ret)
return ret;