diff mbox series

drm/i915: Allow dead vm to unbind vma's without lock.

Message ID 20220128085739.1464568-1-maarten.lankhorst@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: Allow dead vm to unbind vma's without lock. | expand

Commit Message

Maarten Lankhorst Jan. 28, 2022, 8:57 a.m. UTC
i915_gem_vm_close may take the lock, and we currently have no better way
of handling this. At least for now, allow a path in which holding vm->mutex
is sufficient. This is the case, because the object destroy path will
forcefully take vm->mutex now.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_vma.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

Comments

Thomas Hellström (Intel) Jan. 28, 2022, 11:02 a.m. UTC | #1
On 1/28/22 09:57, Maarten Lankhorst wrote:
> i915_gem_vm_close may take the lock, and we currently have no better way
> of handling this. At least for now, allow a path in which holding vm->mutex
> is sufficient. This is the case, because the object destroy path will
> forcefully take vm->mutex now.
>
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>

Reviewed-by: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>


> ---
>   drivers/gpu/drm/i915/i915_vma.c | 15 +++++++++++++--
>   1 file changed, 13 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index b959e904c4d3..14a301c4069f 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -40,6 +40,17 @@
>   #include "i915_vma.h"
>   #include "i915_vma_resource.h"
>   
> +static inline void assert_vma_held_evict(const struct i915_vma *vma)
> +{
> +	/*
> +	 * We may be forced to unbind when the vm is dead, to clean it up.
> +	 * This is the only exception to the requirement of the object lock
> +	 * being held.
> +	 */
> +	if (atomic_read(&vma->vm->open))
> +		assert_object_held_shared(vma->obj);
> +}
> +
>   static struct kmem_cache *slab_vmas;
>   
>   static struct i915_vma *i915_vma_alloc(void)
> @@ -1779,7 +1790,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
>   	struct dma_fence *unbind_fence;
>   
>   	GEM_BUG_ON(i915_vma_is_pinned(vma));
> -	assert_object_held_shared(vma->obj);
> +	assert_vma_held_evict(vma);
>   
>   	if (i915_vma_is_map_and_fenceable(vma)) {
>   		/* Force a pagefault for domain tracking on next user access */
> @@ -1846,7 +1857,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
>   	int ret;
>   
>   	lockdep_assert_held(&vma->vm->mutex);
> -	assert_object_held_shared(vma->obj);
> +	assert_vma_held_evict(vma);
>   
>   	if (!drm_mm_node_allocated(&vma->node))
>   		return 0;
Maarten Lankhorst Jan. 28, 2022, 11:25 a.m. UTC | #2
Op 28-01-2022 om 12:02 schreef Thomas Hellström (Intel):
>
> On 1/28/22 09:57, Maarten Lankhorst wrote:
>> i915_gem_vm_close may take the lock, and we currently have no better way
>> of handling this. At least for now, allow a path in which holding vm->mutex
>> is sufficient. This is the case, because the object destroy path will
>> forcefully take vm->mutex now.
>>
>> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
>
> Reviewed-by: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
>
>
>> ---
>>   drivers/gpu/drm/i915/i915_vma.c | 15 +++++++++++++--
>>   1 file changed, 13 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
>> index b959e904c4d3..14a301c4069f 100644
>> --- a/drivers/gpu/drm/i915/i915_vma.c
>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>> @@ -40,6 +40,17 @@
>>   #include "i915_vma.h"
>>   #include "i915_vma_resource.h"
>>   +static inline void assert_vma_held_evict(const struct i915_vma *vma)
>> +{
>> +    /*
>> +     * We may be forced to unbind when the vm is dead, to clean it up.
>> +     * This is the only exception to the requirement of the object lock
>> +     * being held.
>> +     */
>> +    if (atomic_read(&vma->vm->open))
>> +        assert_object_held_shared(vma->obj);
>> +}
>> +
>>   static struct kmem_cache *slab_vmas;
>>     static struct i915_vma *i915_vma_alloc(void)
>> @@ -1779,7 +1790,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
>>       struct dma_fence *unbind_fence;
>>         GEM_BUG_ON(i915_vma_is_pinned(vma));
>> -    assert_object_held_shared(vma->obj);
>> +    assert_vma_held_evict(vma);
>>         if (i915_vma_is_map_and_fenceable(vma)) {
>>           /* Force a pagefault for domain tracking on next user access */
>> @@ -1846,7 +1857,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
>>       int ret;
>>         lockdep_assert_held(&vma->vm->mutex);
>> -    assert_object_held_shared(vma->obj);
>> +    assert_vma_held_evict(vma);
>>         if (!drm_mm_node_allocated(&vma->node))
>>           return 0;

Thanks, pushed. Should fix alderlake DPT.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b959e904c4d3..14a301c4069f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -40,6 +40,17 @@ 
 #include "i915_vma.h"
 #include "i915_vma_resource.h"
 
+static inline void assert_vma_held_evict(const struct i915_vma *vma)
+{
+	/*
+	 * We may be forced to unbind when the vm is dead, to clean it up.
+	 * This is the only exception to the requirement of the object lock
+	 * being held.
+	 */
+	if (atomic_read(&vma->vm->open))
+		assert_object_held_shared(vma->obj);
+}
+
 static struct kmem_cache *slab_vmas;
 
 static struct i915_vma *i915_vma_alloc(void)
@@ -1779,7 +1790,7 @@  struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
 	struct dma_fence *unbind_fence;
 
 	GEM_BUG_ON(i915_vma_is_pinned(vma));
-	assert_object_held_shared(vma->obj);
+	assert_vma_held_evict(vma);
 
 	if (i915_vma_is_map_and_fenceable(vma)) {
 		/* Force a pagefault for domain tracking on next user access */
@@ -1846,7 +1857,7 @@  int __i915_vma_unbind(struct i915_vma *vma)
 	int ret;
 
 	lockdep_assert_held(&vma->vm->mutex);
-	assert_object_held_shared(vma->obj);
+	assert_vma_held_evict(vma);
 
 	if (!drm_mm_node_allocated(&vma->node))
 		return 0;