diff mbox

[2/6] drm/radeon: add fence owners

Message ID 1418055073-2211-2-git-send-email-deathsimple@vodafone.de (mailing list archive)
State New, archived
Headers show

Commit Message

Christian König Dec. 8, 2014, 4:11 p.m. UTC
From: Christian König <christian.koenig@amd.com>

This way we can track who created the fence and then only wait
on fences that userspace doesn't knows about.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/cik.c           |  8 +++++---
 drivers/gpu/drm/radeon/cik_sdma.c      |  8 +++++---
 drivers/gpu/drm/radeon/evergreen_dma.c |  5 +++--
 drivers/gpu/drm/radeon/r100.c          |  6 ++++--
 drivers/gpu/drm/radeon/r200.c          |  3 ++-
 drivers/gpu/drm/radeon/r600.c          |  8 +++++---
 drivers/gpu/drm/radeon/r600_dma.c      |  8 +++++---
 drivers/gpu/drm/radeon/radeon.h        | 15 +++++++++++----
 drivers/gpu/drm/radeon/radeon_cs.c     | 14 +++++++++-----
 drivers/gpu/drm/radeon/radeon_fence.c  |  4 ++--
 drivers/gpu/drm/radeon/radeon_ib.c     |  5 +++--
 drivers/gpu/drm/radeon/radeon_sync.c   | 19 +++++++++++--------
 drivers/gpu/drm/radeon/radeon_test.c   |  3 ++-
 drivers/gpu/drm/radeon/radeon_uvd.c    |  3 ++-
 drivers/gpu/drm/radeon/radeon_vce.c    |  6 ++++--
 drivers/gpu/drm/radeon/radeon_vm.c     | 18 ++++++++++--------
 drivers/gpu/drm/radeon/rv770_dma.c     |  5 +++--
 drivers/gpu/drm/radeon/si_dma.c        |  5 +++--
 18 files changed, 89 insertions(+), 54 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dcde37..7f15ec5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4013,7 +4013,7 @@  struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
 		return ERR_PTR(r);
 	}
 
-	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_resv(rdev, &sync, resv, RADEON_FENCE_OWNER_UNDEFINED);
 	radeon_sync_rings(rdev, &sync, ring->idx);
 
 	for (i = 0; i < num_loops; i++) {
@@ -4035,7 +4035,8 @@  struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
 		dst_offset += cur_size_in_bytes;
 	}
 
-	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	r = radeon_fence_emit(rdev, &fence, ring->idx,
+			      RADEON_FENCE_OWNER_MOVE);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		radeon_sync_free(rdev, &sync, NULL);
@@ -4141,7 +4142,8 @@  int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 		radeon_scratch_free(rdev, scratch);
 		radeon_ib_free(rdev, &ib);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index dde5c7e..2261a88 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -560,7 +560,7 @@  struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
 		return ERR_PTR(r);
 	}
 
-	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_resv(rdev, &sync, resv, RADEON_FENCE_OWNER_UNDEFINED);
 	radeon_sync_rings(rdev, &sync, ring->idx);
 
 	for (i = 0; i < num_loops; i++) {
@@ -579,7 +579,8 @@  struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
 		dst_offset += cur_size_in_bytes;
 	}
 
-	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	r = radeon_fence_emit(rdev, &fence, ring->idx,
+			      RADEON_FENCE_OWNER_MOVE);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		radeon_sync_free(rdev, &sync, NULL);
@@ -691,7 +692,8 @@  int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	ib.ptr[4] = 0xDEADBEEF;
 	ib.length_dw = 5;
 
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 		radeon_ib_free(rdev, &ib);
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 96535aa..094df95 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -129,7 +129,7 @@  struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
 		return ERR_PTR(r);
 	}
 
-	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_resv(rdev, &sync, resv, RADEON_FENCE_OWNER_UNDEFINED);
 	radeon_sync_rings(rdev, &sync, ring->idx);
 
 	for (i = 0; i < num_loops; i++) {
@@ -146,7 +146,8 @@  struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
 		dst_offset += cur_size_in_dw * 4;
 	}
 
-	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	r = radeon_fence_emit(rdev, &fence, ring->idx,
+			      RADEON_FENCE_OWNER_MOVE);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		radeon_sync_free(rdev, &sync, NULL);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d5..81388d9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -937,7 +937,8 @@  struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
 			  RADEON_WAIT_2D_IDLECLEAN |
 			  RADEON_WAIT_HOST_IDLECLEAN |
 			  RADEON_WAIT_DMA_GUI_IDLE);
-	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX,
+			      RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		return ERR_PTR(r);
@@ -3706,7 +3707,8 @@  int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	ib.ptr[6] = PACKET2(0);
 	ib.ptr[7] = PACKET2(0);
 	ib.length_dw = 8;
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index c70e6d5..d09fb3f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -119,7 +119,8 @@  struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
 	}
 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
 	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
-	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX,
+			      RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		return ERR_PTR(r);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ef5d606..462cc36 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2908,7 +2908,7 @@  struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
 		return ERR_PTR(r);
 	}
 
-	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_resv(rdev, &sync, resv, RADEON_FENCE_OWNER_UNDEFINED);
 	radeon_sync_rings(rdev, &sync, ring->idx);
 
 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
@@ -2935,7 +2935,8 @@  struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
 	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
 	radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
 
-	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	r = radeon_fence_emit(rdev, &fence, ring->idx,
+			      RADEON_FENCE_OWNER_MOVE);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		radeon_sync_free(rdev, &sync, NULL);
@@ -3302,7 +3303,8 @@  int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index d2dd29a..013f939 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -362,7 +362,8 @@  int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	ib.ptr[3] = 0xDEADBEEF;
 	ib.length_dw = 4;
 
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 		radeon_ib_free(rdev, &ib);
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@@ -460,7 +461,7 @@  struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
 		return ERR_PTR(r);
 	}
 
-	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_resv(rdev, &sync, resv, RADEON_FENCE_OWNER_UNDEFINED);
 	radeon_sync_rings(rdev, &sync, ring->idx);
 
 	for (i = 0; i < num_loops; i++) {
@@ -477,7 +478,8 @@  struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
 		dst_offset += cur_size_in_dw * 4;
 	}
 
-	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	r = radeon_fence_emit(rdev, &fence, ring->idx,
+			      RADEON_FENCE_OWNER_MOVE);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		radeon_sync_free(rdev, &sync, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b8..3968f91 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -359,14 +359,20 @@  struct radeon_fence_driver {
 	struct delayed_work		lockup_work;
 };
 
+/* some special values for the owner field */
+#define RADEON_FENCE_OWNER_UNDEFINED	(0ul)
+#define RADEON_FENCE_OWNER_VM		(1ul)
+#define RADEON_FENCE_OWNER_MOVE		(2ul)
+
 struct radeon_fence {
 	struct fence		base;
 
 	struct radeon_device	*rdev;
 	uint64_t		seq;
+	/* filp or special value for fence creator */
+	long			owner;
 	/* RB, DMA, etc. */
 	unsigned		ring;
-	bool			is_vm_update;
 
 	wait_queue_t		fence_wake;
 };
@@ -375,7 +381,8 @@  int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 int radeon_fence_driver_init(struct radeon_device *rdev);
 void radeon_fence_driver_fini(struct radeon_device *rdev);
 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
-int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence,
+		      int ring, long owner);
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
@@ -614,7 +621,7 @@  void radeon_sync_fence(struct radeon_sync *sync,
 int radeon_sync_resv(struct radeon_device *rdev,
 		     struct radeon_sync *sync,
 		     struct reservation_object *resv,
-		     bool shared);
+		     long owner);
 int radeon_sync_rings(struct radeon_device *rdev,
 		      struct radeon_sync *sync,
 		      int waiting_ring);
@@ -1015,7 +1022,7 @@  int radeon_ib_get(struct radeon_device *rdev, int ring,
 		  unsigned size);
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
-		       struct radeon_ib *const_ib, bool hdp_flush);
+		       struct radeon_ib *const_ib, bool hdp_flush, long owner);
 int radeon_ib_pool_init(struct radeon_device *rdev);
 void radeon_ib_pool_fini(struct radeon_device *rdev);
 int radeon_ib_ring_tests(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 9648e28..3c3b7d9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -237,10 +237,11 @@  static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
 
 	list_for_each_entry(reloc, &p->validated, tv.head) {
 		struct reservation_object *resv;
+		long owner = reloc->tv.shared ? (long)p->filp :
+			RADEON_FENCE_OWNER_UNDEFINED;
 
 		resv = reloc->robj->tbo.resv;
-		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
-				     reloc->tv.shared);
+		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, owner);
 
 		if (r)
 			return r;
@@ -467,7 +468,8 @@  static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
 		radeon_vce_note_usage(rdev);
 
-	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
+	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true,
+			       (long)parser->filp);
 	if (r) {
 		DRM_ERROR("Failed to schedule IB !\n");
 	}
@@ -561,9 +563,11 @@  static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 
 	if ((rdev->family >= CHIP_TAHITI) &&
 	    (parser->chunk_const_ib != NULL)) {
-		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
+		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib,
+				       true, (long)parser->filp);
 	} else {
-		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
+		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true,
+				       (long)parser->filp);
 	}
 
 out:
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d13d1b5..89e8c5f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -128,7 +128,7 @@  static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
  */
 int radeon_fence_emit(struct radeon_device *rdev,
 		      struct radeon_fence **fence,
-		      int ring)
+		      int ring, long owner)
 {
 	u64 seq = ++rdev->fence_drv[ring].sync_seq[ring];
 
@@ -138,9 +138,9 @@  int radeon_fence_emit(struct radeon_device *rdev,
 		return -ENOMEM;
 	}
 	(*fence)->rdev = rdev;
+	(*fence)->owner = owner;
 	(*fence)->seq = seq;
 	(*fence)->ring = ring;
-	(*fence)->is_vm_update = false;
 	fence_init(&(*fence)->base, &radeon_fence_ops,
 		   &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
 	radeon_fence_ring_emit(rdev, ring, *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index c39ce1f..525416a 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -105,6 +105,7 @@  void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  * @ib: IB object to schedule
  * @const_ib: Const IB to schedule (SI only)
  * @hdp_flush: Whether or not to perform an HDP cache flush
+ * @owner: owner for creating the fence
  *
  * Schedule an IB on the associated ring (all asics).
  * Returns 0 on success, error on failure.
@@ -120,7 +121,7 @@  void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  * to SI there was just a DE IB.
  */
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
-		       struct radeon_ib *const_ib, bool hdp_flush)
+		       struct radeon_ib *const_ib, bool hdp_flush, long owner)
 {
 	struct radeon_ring *ring = &rdev->ring[ib->ring];
 	int r = 0;
@@ -162,7 +163,7 @@  int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
 		radeon_sync_free(rdev, &const_ib->sync, NULL);
 	}
 	radeon_ring_ib_execute(rdev, ib->ring, ib);
-	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+	r = radeon_fence_emit(rdev, &ib->fence, ib->ring, owner);
 	if (r) {
 		dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
 		radeon_ring_unlock_undo(rdev, ring);
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 6fccaaf..ca98d4b 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -73,7 +73,7 @@  void radeon_sync_fence(struct radeon_sync *sync,
 		radeon_fence_later(fence, other));
 	radeon_fence_unref(&other);
 
-	if (fence->is_vm_update) {
+	if (fence->owner == RADEON_FENCE_OWNER_VM) {
 		other = sync->last_vm_update;
 		sync->last_vm_update = radeon_fence_ref(
 			radeon_fence_later(fence, other));
@@ -93,7 +93,7 @@  void radeon_sync_fence(struct radeon_sync *sync,
 int radeon_sync_resv(struct radeon_device *rdev,
 		     struct radeon_sync *sync,
 		     struct reservation_object *resv,
-		     bool shared)
+		     long owner)
 {
 	struct reservation_object_list *flist;
 	struct fence *f;
@@ -110,20 +110,23 @@  int radeon_sync_resv(struct radeon_device *rdev,
 		r = fence_wait(f, true);
 
 	flist = reservation_object_get_list(resv);
-	if (shared || !flist || r)
+	if (!flist || r)
 		return r;
 
 	for (i = 0; i < flist->shared_count; ++i) {
 		f = rcu_dereference_protected(flist->shared[i],
 					      reservation_object_held(resv));
 		fence = to_radeon_fence(f);
-		if (fence && fence->rdev == rdev)
-			radeon_sync_fence(sync, fence);
-		else
+		if (fence && fence->rdev == rdev) {
+			if (fence->owner != owner ||
+			    fence->owner == RADEON_FENCE_OWNER_UNDEFINED)
+				radeon_sync_fence(sync, fence);
+		} else {
 			r = fence_wait(f, true);
 
-		if (r)
-			break;
+			if (r)
+				break;
+		}
 	}
 	return r;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 07b506b..5e38b95 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -298,7 +298,8 @@  static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
 			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
 			return r;
 		}
-		radeon_fence_emit(rdev, fence, ring->idx);
+		radeon_fence_emit(rdev, fence, ring->idx,
+				  RADEON_FENCE_OWNER_UNDEFINED);
 		radeon_ring_unlock_commit(rdev, ring, false);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index c10b2ae..1ee9ac3 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -671,7 +671,8 @@  static int radeon_uvd_send_msg(struct radeon_device *rdev,
 		ib.ptr[i] = PACKET2(0);
 	ib.length_dw = 16;
 
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 
 	if (fence)
 		*fence = radeon_fence_ref(ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 976fe43..e64bbcb 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -369,7 +369,8 @@  int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
 	for (i = ib.length_dw; i < ib_size_dw; ++i)
 		ib.ptr[i] = 0x0;
 
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 	}
@@ -426,7 +427,8 @@  int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
 	for (i = ib.length_dw; i < ib_size_dw; ++i)
 		ib.ptr[i] = 0x0;
 
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_UNDEFINED);
 	if (r) {
 	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c4..d9074bb 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -414,11 +414,11 @@  static int radeon_vm_clear_bo(struct radeon_device *rdev,
 	radeon_asic_vm_pad_ib(rdev, &ib);
 	WARN_ON(ib.length_dw > 64);
 
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_VM);
 	if (r)
 		goto error_free;
 
-	ib.fence->is_vm_update = true;
 	radeon_bo_fence(bo, ib.fence, false);
 
 error_free:
@@ -693,14 +693,15 @@  int radeon_vm_update_page_directory(struct radeon_device *rdev,
 	if (ib.length_dw != 0) {
 		radeon_asic_vm_pad_ib(rdev, &ib);
 
-		radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
+		radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv,
+				 RADEON_FENCE_OWNER_VM);
 		WARN_ON(ib.length_dw > ndw);
-		r = radeon_ib_schedule(rdev, &ib, NULL, false);
+		r = radeon_ib_schedule(rdev, &ib, NULL, false,
+				       RADEON_FENCE_OWNER_VM);
 		if (r) {
 			radeon_ib_free(rdev, &ib);
 			return r;
 		}
-		ib.fence->is_vm_update = true;
 		radeon_bo_fence(pd, ib.fence, false);
 	}
 	radeon_ib_free(rdev, &ib);
@@ -819,7 +820,8 @@  static int radeon_vm_update_ptes(struct radeon_device *rdev,
 		uint64_t pte;
 		int r;
 
-		radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+		radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv,
+				 RADEON_FENCE_OWNER_VM);
 		r = reservation_object_reserve_shared(pt->tbo.resv);
 		if (r)
 			return r;
@@ -1004,12 +1006,12 @@  int radeon_vm_bo_update(struct radeon_device *rdev,
 	radeon_asic_vm_pad_ib(rdev, &ib);
 	WARN_ON(ib.length_dw > ndw);
 
-	r = radeon_ib_schedule(rdev, &ib, NULL, false);
+	r = radeon_ib_schedule(rdev, &ib, NULL, false,
+			       RADEON_FENCE_OWNER_VM);
 	if (r) {
 		radeon_ib_free(rdev, &ib);
 		return r;
 	}
-	ib.fence->is_vm_update = true;
 	radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
 	radeon_fence_unref(&bo_va->last_pt_update);
 	bo_va->last_pt_update = radeon_fence_ref(ib.fence);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index acff6e0..fd274d1 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -63,7 +63,7 @@  struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
 		return ERR_PTR(r);
 	}
 
-	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_resv(rdev, &sync, resv, RADEON_FENCE_OWNER_UNDEFINED);
 	radeon_sync_rings(rdev, &sync, ring->idx);
 
 	for (i = 0; i < num_loops; i++) {
@@ -80,7 +80,8 @@  struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
 		dst_offset += cur_size_in_dw * 4;
 	}
 
-	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	r = radeon_fence_emit(rdev, &fence, ring->idx,
+			      RADEON_FENCE_OWNER_MOVE);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		radeon_sync_free(rdev, &sync, NULL);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index f5cc777..6420a19 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -245,7 +245,7 @@  struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
 		return ERR_PTR(r);
 	}
 
-	radeon_sync_resv(rdev, &sync, resv, false);
+	radeon_sync_resv(rdev, &sync, resv, RADEON_FENCE_OWNER_UNDEFINED);
 	radeon_sync_rings(rdev, &sync, ring->idx);
 
 	for (i = 0; i < num_loops; i++) {
@@ -262,7 +262,8 @@  struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
 		dst_offset += cur_size_in_bytes;
 	}
 
-	r = radeon_fence_emit(rdev, &fence, ring->idx);
+	r = radeon_fence_emit(rdev, &fence, ring->idx,
+			      RADEON_FENCE_OWNER_MOVE);
 	if (r) {
 		radeon_ring_unlock_undo(rdev, ring);
 		radeon_sync_free(rdev, &sync, NULL);