@@ -644,6 +644,15 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static int add_timeline_barrier(struct i915_request *rq)
+{
+ struct i915_request *barrier =
+ i915_gem_active_raw(&rq->timeline->barrier,
+ &rq->i915->drm.struct_mutex);
+
+ return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
+}
+
/**
* i915_request_alloc - allocate a request structure
*
@@ -808,6 +817,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
+ ret = add_timeline_barrier(rq);
+ if (ret)
+ goto err_unwind;
+
/* Unconditionally invalidate GPU caches and TLBs. */
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
@@ -37,6 +37,8 @@ void i915_timeline_init(struct drm_i915_private *i915,
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
+
+ init_request_active(&timeline->barrier, NULL);
}
/**
@@ -69,6 +71,7 @@ void i915_timelines_park(struct drm_i915_private *i915)
void i915_timeline_fini(struct i915_timeline *timeline)
{
GEM_BUG_ON(!list_empty(&timeline->requests));
+ GEM_BUG_ON(i915_gem_active_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
@@ -72,6 +72,16 @@ struct i915_timeline {
*/
u32 global_sync[I915_NUM_ENGINES];
+ /**
+ * Barrier provides the ability to serialize ordering between different
+ * timelines.
+ *
+ * Users can call i915_timeline_set_barrier which will make all
+ * subsequent submissions be executed only after this barrier has been
+ * completed.
+ */
+ struct i915_gem_active barrier;
+
struct list_head link;
const char *name;
@@ -125,4 +135,21 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
void i915_timelines_park(struct drm_i915_private *i915);
+/**
+ * i915_timeline_set_barrier - orders submission between different timelines
+ * @timeline: timeline to set the barrier on
+ * @rq: request after which new submissions can proceed
+ *
+ * Sets the passed in request as the serialization point for all subsequent
+ * submissions on @timeline. Subsequent requests will not be submitted to GPU
+ * until the barrier has been completed.
+ */
+static inline void
+i915_timeline_set_barrier(struct i915_timeline *timeline,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(timeline->fence_context == rq->timeline->fence_context);
+ i915_gem_active_set(&timeline->barrier, rq);
+}
+
#endif
@@ -19,6 +19,8 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
i915_syncmap_init(&timeline->sync);
+ init_request_active(&timeline->barrier, NULL);
+
INIT_LIST_HEAD(&timeline->link);
}