@@ -1612,7 +1612,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/*
* Without a GPU scheduler, any fence waits must be done up front.
*/
- if (args->flags & I915_EXEC_WAIT_FENCE) {
+ if ((args->flags & I915_EXEC_WAIT_FENCE) &&
+ (i915.scheduler_override & i915_so_direct_submit))
+ {
ret = i915_early_fence_wait(ring, fd_fence_wait);
if (ret < 0)
return ret;
@@ -1799,6 +1801,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->ctx = ctx;
#ifdef CONFIG_SYNC
+ if (args->flags & I915_EXEC_WAIT_FENCE) {
+ if (fd_fence_wait < 0) {
+ DRM_ERROR("Wait fence for ring %d has invalid id %d\n",
+ (int) ring->id, fd_fence_wait);
+ } else {
+ params->fence_wait = sync_fence_fdget(fd_fence_wait);
+ if (params->fence_wait == NULL)
+ DRM_ERROR("Invalid wait fence %d\n",
+ fd_fence_wait);
+ }
+ }
+
if (args->flags & I915_EXEC_CREATE_FENCE) {
/*
* Caller has requested a sync fence.
@@ -1865,6 +1879,11 @@ err:
i915_gem_context_unreference(params->ctx);
}
+#ifdef CONFIG_SYNC
+ if (params->fence_wait)
+ sync_fence_put(params->fence_wait);
+#endif
+
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
@@ -977,6 +977,9 @@ static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
signalled = atomic_read(&node->params.fence_wait->status) != 0;
else
signalled = true;
+
+ if (!signalled)
+ signalled = i915_safe_to_ignore_fence(ring, node->params.fence_wait);
#endif // CONFIG_SYNC
has_local = false;
@@ -110,6 +110,11 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *q
int i915_scheduler_handle_irq(struct intel_engine_cs *ring);
void i915_scheduler_kill_all(struct drm_device *dev);
void i915_gem_scheduler_work_handler(struct work_struct *work);
+#ifdef CONFIG_SYNC
+struct drm_i915_gem_request *i915_scheduler_find_by_sync_value(struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ uint32_t sync_value);
+#endif
int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked);
int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
bool is_locked);