@@ -597,7 +597,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
struct fence *fence);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
@@ -166,7 +166,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
}
job = to_amdgpu_job(sched_job);
- BUG_ON(!amdgpu_sync_is_idle(&job->sync));
+ BUG_ON(!amdgpu_sync_is_idle(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
@@ -226,10 +226,13 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
* amdgpu_sync_is_idle - test if all fences are signaled
*
* @sync: the sync object
+ * @ring: optional ring to use for test
*
- * Returns true if all fences in the sync object are signaled.
+ * Returns true if all fences in the sync object are signaled or scheduled to
+ * the ring (if provided).
*/
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
@@ -237,6 +240,16 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+ if (ring && s_fence) {
+ /* For fences from the same ring it is sufficient
+ * when they are scheduled.
+ */
+ if (s_fence->sched == &ring->sched &&
+ fence_is_signaled(&s_fence->scheduled))
+ continue;
+ }
if (fence_is_signaled(f)) {
hash_del(&e->node);
@@ -240,13 +240,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_vm_id,
list);
- if (!amdgpu_sync_is_idle(&id->active)) {
+ if (!amdgpu_sync_is_idle(&id->active, NULL)) {
struct list_head *head = &adev->vm_manager.ids_lru;
struct amdgpu_vm_id *tmp;
list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
list) {
- if (amdgpu_sync_is_idle(&id->active)) {
+ if (amdgpu_sync_is_idle(&id->active, NULL)) {
list_move(&id->list, head);
head = &id->list;
}