@@ -474,6 +474,9 @@ struct radeon_bo_va {
struct radeon_bo *bo;
};
+#define RADEON_BO_OWNER_IMPLICIT_SYNC (0l)
+#define RADEON_BO_OWNER_FIRST_CS (~0l)
+
struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
@@ -489,6 +492,7 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
+ long owner;
/* list of all virtual address to which this bo
* is associated to
*/
@@ -1084,6 +1088,7 @@ struct radeon_cs_parser {
struct radeon_cs_chunk *chunk_relocs;
struct radeon_cs_chunk *chunk_flags;
struct radeon_cs_chunk *chunk_const_ib;
+ struct radeon_cs_chunk *chunk_wait_for;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
@@ -183,7 +183,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.shared = !r->write_domain;
+ p->relocs[i].tv.shared = !r->write_domain ||
+ !!p->chunk_wait_for;
p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
@@ -251,16 +252,40 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
+ long owner = p->chunk_wait_for ? (long)p->filp :
+ RADEON_BO_OWNER_IMPLICIT_SYNC;
int i;
+ if (p->chunk_wait_for) {
+ struct radeon_fpriv *fpriv = p->filp->driver_priv;
+
+ for (i = 0; i < p->chunk_wait_for->length_dw; ++i) {
+ struct radeon_fence *fence;
+ uint64_t *id;
+
+ id = (uint64_t *)&p->chunk_wait_for->kdata[i];
+
+ mutex_lock(&fpriv->seq_lock);
+ fence = radeon_seq_query(fpriv->seq, *id);
+ mutex_unlock(&fpriv->seq_lock);
+
+ radeon_sync_fence(&p->ib.sync, fence);
+ }
+ }
+
for (i = 0; i < p->nrelocs; i++) {
+ struct radeon_cs_reloc *reloc = &p->relocs[i];
struct reservation_object *resv;
- if (!p->relocs[i].robj)
+ if (!reloc->robj)
continue;
- resv = p->relocs[i].robj->tbo.resv;
- radeon_sync_resv(&p->ib.sync, resv, p->relocs[i].tv.shared);
+ if (reloc->robj->owner != owner &&
+ reloc->robj->owner != RADEON_BO_OWNER_FIRST_CS)
+ reloc->tv.shared = false;
+
+ resv = reloc->robj->tbo.resv;
+ radeon_sync_resv(&p->ib.sync, resv, reloc->tv.shared);
}
}
@@ -332,6 +357,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_WAIT_FOR) {
+ p->chunk_wait_for = &p->chunks[i];
+ /* zero length wait for list is actually useful */
+ }
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -413,6 +442,18 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
unsigned i;
if (!error) {
+ long owner = parser->chunk_wait_for ? (long)parser->filp :
+ RADEON_BO_OWNER_IMPLICIT_SYNC;
+
+ for (i = 0; i < parser->nrelocs; i++) {
+ struct radeon_cs_reloc *reloc = &parser->relocs[i];
+
+ if (!reloc->robj)
+ continue;
+
+ reloc->robj->owner = owner;
+ }
+
if (parser->chunk_flags &&
parser->chunk_flags->length_dw > 4) {
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
@@ -81,6 +81,7 @@ retry:
}
*obj = &robj->gem_base;
robj->pid = task_pid_nr(current);
+ robj->owner = RADEON_BO_OWNER_FIRST_CS;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&robj->list, &rdev->gem.objects);
@@ -230,6 +230,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
@@ -275,6 +276,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (IS_ERR(fence))
return PTR_ERR(fence);
+ rbo->owner = RADEON_BO_OWNER_IMPLICIT_SYNC;
r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
@@ -942,10 +942,11 @@ struct drm_radeon_gem_va {
uint64_t offset;
};
-#define RADEON_CHUNK_ID_RELOCS 0x01
-#define RADEON_CHUNK_ID_IB 0x02
-#define RADEON_CHUNK_ID_FLAGS 0x03
+#define RADEON_CHUNK_ID_RELOCS 0x01
+#define RADEON_CHUNK_ID_IB 0x02
+#define RADEON_CHUNK_ID_FLAGS 0x03
#define RADEON_CHUNK_ID_CONST_IB 0x04
+#define RADEON_CHUNK_ID_WAIT_FOR 0x05
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01