@@ -49,11 +49,12 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
-#define __EXEC_OBJECT_HAS_PIN BIT(31)
-#define __EXEC_OBJECT_HAS_FENCE BIT(30)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
+/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
+#define __EXEC_OBJECT_HAS_PIN BIT(30)
+#define __EXEC_OBJECT_HAS_FENCE BIT(29)
+#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
+#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above + */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
@@ -929,6 +930,12 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
+ if (!(ev->flags & EXEC_OBJECT_WRITE)) {
+ err = dma_resv_reserve_shared(vma->resv, 1);
+ if (err)
+ return err;
+ }
+
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
}
@@ -2194,7 +2201,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
}
if (err == 0)
- err = i915_vma_move_to_active(vma, eb->request, flags);
+ err = i915_vma_move_to_active(vma, eb->request,
+ flags | __EXEC_OBJECT_NO_RESERVE);
}
if (unlikely(err))
@@ -2446,6 +2454,10 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (err)
goto err_commit;
+ err = dma_resv_reserve_shared(shadow->resv, 1);
+ if (err)
+ goto err_commit;
+
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
@@ -296,18 +296,13 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
static struct i915_active_fence *
active_instance(struct i915_active *ref, u64 idx)
{
- struct active_node *node, *prealloc;
+ struct active_node *node;
struct rb_node **p, *parent;
node = __active_lookup(ref, idx);
if (likely(node))
return &node->base;
- /* Preallocate a replacement, just in case */
- prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
- if (!prealloc)
- return NULL;
-
spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
@@ -317,10 +312,8 @@ active_instance(struct i915_active *ref, u64 idx)
parent = *p;
node = rb_entry(parent, struct active_node, node);
- if (node->timeline == idx) {
- kmem_cache_free(global.slab_cache, prealloc);
+ if (node->timeline == idx)
goto out;
- }
if (node->timeline < idx)
p = &parent->rb_right;
@@ -328,7 +321,14 @@ active_instance(struct i915_active *ref, u64 idx)
p = &parent->rb_left;
}
- node = prealloc;
+ /*
+ * XXX: We should preallocate this before i915_active_ref() is ever
+ * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
+ */
+ node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
+ if (!node)
+ goto out;
+
__i915_active_fence_init(&node->base, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -1281,9 +1281,11 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
} else {
- err = dma_resv_reserve_shared(vma->resv, 1);
- if (unlikely(err))
- return err;
+ if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+ err = dma_resv_reserve_shared(vma->resv, 1);
+ if (unlikely(err))
+ return err;
+ }
dma_resv_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0;
@@ -52,6 +52,9 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
return !i915_active_is_idle(&vma->active);
}
+/* do not reserve memory to prevent deadlocks */
+#define __EXEC_OBJECT_NO_RESERVE BIT(31)
+
int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq);
int __must_check i915_vma_move_to_active(struct i915_vma *vma,