@@ -509,6 +509,15 @@ i915_request_alloc_slow(struct intel_context *ce)
return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
}
+static int add_timeline_barrier(struct i915_request *rq)
+{
+ struct i915_request *barrier =
+ i915_gem_active_raw(&rq->timeline->barrier,
+ &rq->i915->drm.struct_mutex);
+
+ return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
+}
+
/**
* i915_request_alloc - allocate a request structure
*
@@ -652,6 +661,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
+ ret = add_timeline_barrier(rq);
+ if (ret)
+ goto err_unwind;
+
ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
@@ -37,6 +37,8 @@ void i915_timeline_init(struct drm_i915_private *i915,
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
+
+ init_request_active(&timeline->barrier, NULL);
}
/**
@@ -69,6 +71,7 @@ void i915_timelines_park(struct drm_i915_private *i915)
void i915_timeline_fini(struct i915_timeline *timeline)
{
GEM_BUG_ON(!list_empty(&timeline->requests));
+ GEM_BUG_ON(i915_gem_active_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
@@ -64,6 +64,16 @@ struct i915_timeline {
*/
struct i915_syncmap *sync;
+ /**
+ * Barrier provides the ability to serialize ordering between different
+ * timelines.
+ *
+ * Users can call i915_timeline_set_barrier which will make all
+ * subsequent submissions to this timeline be executed only after the
+ * barrier has been completed.
+ */
+ struct i915_gem_active barrier;
+
struct list_head link;
const char *name;
@@ -136,4 +146,21 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
void i915_timelines_park(struct drm_i915_private *i915);
+/**
+ * i915_timeline_set_barrier - orders submission between different timelines
+ * @timeline: timeline to set the barrier on
+ * @rq: request after which new submissions can proceed
+ *
+ * Sets the passed in request as the serialization point for all subsequent
+ * submissions on @timeline. Subsequent requests will not be submitted to GPU
+ * until the barrier has been completed.
+ */
+static inline void
+i915_timeline_set_barrier(struct i915_timeline *timeline,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(timeline->fence_context == rq->timeline->fence_context);
+ i915_gem_active_set(&timeline->barrier, rq);
+}
+
#endif
@@ -19,6 +19,8 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
i915_syncmap_init(&timeline->sync);
+ init_request_active(&timeline->barrier, NULL);
+
INIT_LIST_HEAD(&timeline->link);
}