diff mbox series

drm/i915/gem: Make i915_gem_shrinker multi-gt aware

Message ID 20230922123541.23822-1-nirmoy.das@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915/gem: Make i915_gem_shrinker multi-gt aware | expand

Commit Message

Nirmoy Das Sept. 22, 2023, 12:35 p.m. UTC
From: Jonathan Cavitt <jonathan.cavitt@intel.com>

Where applicable, use for_each_gt instead of to_gt in the
i915_gem_shrinker functions to make them apply to more than just the
primary GT.  Specifically, this ensure i915_gem_shrink_all retires all
requests across all GTs, and this makes i915_gem_shrinker_vmap unmap
VMAs from all GTs.

Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 44 ++++++++++++--------
 1 file changed, 26 insertions(+), 18 deletions(-)

Comments

Andrzej Hajda Sept. 25, 2023, 1:23 p.m. UTC | #1
On 22.09.2023 14:35, Nirmoy Das wrote:
> From: Jonathan Cavitt <jonathan.cavitt@intel.com>
> 
> Where applicable, use for_each_gt instead of to_gt in the
> i915_gem_shrinker functions to make them apply to more than just the
> primary GT.  Specifically, this ensure i915_gem_shrink_all retires all
> requests across all GTs, and this makes i915_gem_shrinker_vmap unmap
> VMAs from all GTs.
> 
> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 44 ++++++++++++--------
>   1 file changed, 26 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> index 214763942aa2..3ef1fd32f80a 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> @@ -14,6 +14,7 @@
>   #include <linux/vmalloc.h>
>   
>   #include "gt/intel_gt_requests.h"
> +#include "gt/intel_gt.h"
>   
>   #include "i915_trace.h"
>   
> @@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
>   	intel_wakeref_t wakeref = 0;
>   	unsigned long count = 0;
>   	unsigned long scanned = 0;
> -	int err = 0;
> +	int err = 0, i = 0;
> +	struct intel_gt *gt;
>   
>   	/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
>   	bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
> @@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
>   	 * what we can do is give them a kick so that we do not keep idle
>   	 * contexts around longer than is necessary.
>   	 */
> -	if (shrink & I915_SHRINK_ACTIVE)
> -		/* Retire requests to unpin all idle contexts */
> -		intel_gt_retire_requests(to_gt(i915));
> +	if (shrink & I915_SHRINK_ACTIVE) {
> +		for_each_gt(gt, i915, i)
> +			/* Retire requests to unpin all idle contexts */
> +			intel_gt_retire_requests(to_gt(i915));


to_gt(...) -> gt ?


> +	}
>   
>   	/*
>   	 * As we may completely rewrite the (un)bound list whilst unbinding
> @@ -389,6 +393,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
>   	struct i915_vma *vma, *next;
>   	unsigned long freed_pages = 0;
>   	intel_wakeref_t wakeref;
> +	struct intel_gt *gt;
> +	int i;
>   
>   	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
>   		freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
> @@ -397,24 +403,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
>   					       I915_SHRINK_VMAPS);
>   
>   	/* We also want to clear any cached iomaps as they wrap vmap */
> -	mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
> -	list_for_each_entry_safe(vma, next,
> -				 &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
> -		unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
> -		struct drm_i915_gem_object *obj = vma->obj;
> -
> -		if (!vma->iomap || i915_vma_is_active(vma))
> -			continue;
> +	for_each_gt(gt, i915, i) {
> +		mutex_lock(&gt->ggtt->vm.mutex);
> +		list_for_each_entry_safe(vma, next,
> +					 &gt->ggtt->vm.bound_list, vm_link) {
> +			unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
> +			struct drm_i915_gem_object *obj = vma->obj;
> +
> +			if (!vma->iomap || i915_vma_is_active(vma))
> +				continue;
>   
> -		if (!i915_gem_object_trylock(obj, NULL))
> -			continue;
> +			if (!i915_gem_object_trylock(obj, NULL))
> +				continue;
>   
> -		if (__i915_vma_unbind(vma) == 0)
> -			freed_pages += count;
> +			if (__i915_vma_unbind(vma) == 0)
> +				freed_pages += count;
>   
> -		i915_gem_object_unlock(obj);
> +			i915_gem_object_unlock(obj);
> +		}
> +		mutex_unlock(&gt->ggtt->vm.mutex);
>   	}
> -	mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);


This seems correct.

With 1st stanza fixed:
Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com>

Regards
Andrzej


>   
>   	*(unsigned long *)ptr += freed_pages;
>   	return NOTIFY_DONE;
Nirmoy Das Sept. 25, 2023, 1:32 p.m. UTC | #2
On 9/25/2023 3:23 PM, Andrzej Hajda wrote:
> On 22.09.2023 14:35, Nirmoy Das wrote:
>> From: Jonathan Cavitt <jonathan.cavitt@intel.com>
>>
>> Where applicable, use for_each_gt instead of to_gt in the
>> i915_gem_shrinker functions to make them apply to more than just the
>> primary GT.  Specifically, this ensure i915_gem_shrink_all retires all
>> requests across all GTs, and this makes i915_gem_shrinker_vmap unmap
>> VMAs from all GTs.
>>
>> Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
>> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
>> ---
>>   drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 44 ++++++++++++--------
>>   1 file changed, 26 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 
>> b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
>> index 214763942aa2..3ef1fd32f80a 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
>> @@ -14,6 +14,7 @@
>>   #include <linux/vmalloc.h>
>>     #include "gt/intel_gt_requests.h"
>> +#include "gt/intel_gt.h"
>>     #include "i915_trace.h"
>>   @@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
>>       intel_wakeref_t wakeref = 0;
>>       unsigned long count = 0;
>>       unsigned long scanned = 0;
>> -    int err = 0;
>> +    int err = 0, i = 0;
>> +    struct intel_gt *gt;
>>         /* CHV + VTD workaround use stop_machine(); need to trylock 
>> vm->mutex */
>>       bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
>> @@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
>>        * what we can do is give them a kick so that we do not keep idle
>>        * contexts around longer than is necessary.
>>        */
>> -    if (shrink & I915_SHRINK_ACTIVE)
>> -        /* Retire requests to unpin all idle contexts */
>> -        intel_gt_retire_requests(to_gt(i915));
>> +    if (shrink & I915_SHRINK_ACTIVE) {
>> +        for_each_gt(gt, i915, i)
>> +            /* Retire requests to unpin all idle contexts */
>> +            intel_gt_retire_requests(to_gt(i915));
>
>
> to_gt(...) -> gt ?


Wow, a huge miss. Thanks will resend!

>
>
>> +    }
>>         /*
>>        * As we may completely rewrite the (un)bound list whilst 
>> unbinding
>> @@ -389,6 +393,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, 
>> unsigned long event, void *ptr
>>       struct i915_vma *vma, *next;
>>       unsigned long freed_pages = 0;
>>       intel_wakeref_t wakeref;
>> +    struct intel_gt *gt;
>> +    int i;
>>         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
>>           freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
>> @@ -397,24 +403,26 @@ i915_gem_shrinker_vmap(struct notifier_block 
>> *nb, unsigned long event, void *ptr
>>                              I915_SHRINK_VMAPS);
>>         /* We also want to clear any cached iomaps as they wrap vmap */
>> -    mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
>> -    list_for_each_entry_safe(vma, next,
>> -                 &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
>> -        unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
>> -        struct drm_i915_gem_object *obj = vma->obj;
>> -
>> -        if (!vma->iomap || i915_vma_is_active(vma))
>> -            continue;
>> +    for_each_gt(gt, i915, i) {
>> +        mutex_lock(&gt->ggtt->vm.mutex);
>> +        list_for_each_entry_safe(vma, next,
>> +                     &gt->ggtt->vm.bound_list, vm_link) {
>> +            unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
>> +            struct drm_i915_gem_object *obj = vma->obj;
>> +
>> +            if (!vma->iomap || i915_vma_is_active(vma))
>> +                continue;
>>   -        if (!i915_gem_object_trylock(obj, NULL))
>> -            continue;
>> +            if (!i915_gem_object_trylock(obj, NULL))
>> +                continue;
>>   -        if (__i915_vma_unbind(vma) == 0)
>> -            freed_pages += count;
>> +            if (__i915_vma_unbind(vma) == 0)
>> +                freed_pages += count;
>>   -        i915_gem_object_unlock(obj);
>> +            i915_gem_object_unlock(obj);
>> +        }
>> +        mutex_unlock(&gt->ggtt->vm.mutex);
>>       }
>> -    mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
>
>
> This seems correct.
>
> With 1st stanza fixed:
> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com>

Thanks,

Nirmoy

>
> Regards
> Andrzej
>
>
>>         *(unsigned long *)ptr += freed_pages;
>>       return NOTIFY_DONE;
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 214763942aa2..3ef1fd32f80a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -14,6 +14,7 @@ 
 #include <linux/vmalloc.h>
 
 #include "gt/intel_gt_requests.h"
+#include "gt/intel_gt.h"
 
 #include "i915_trace.h"
 
@@ -119,7 +120,8 @@  i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 	intel_wakeref_t wakeref = 0;
 	unsigned long count = 0;
 	unsigned long scanned = 0;
-	int err = 0;
+	int err = 0, i = 0;
+	struct intel_gt *gt;
 
 	/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
 	bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -147,9 +149,11 @@  i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 	 * what we can do is give them a kick so that we do not keep idle
 	 * contexts around longer than is necessary.
 	 */
-	if (shrink & I915_SHRINK_ACTIVE)
-		/* Retire requests to unpin all idle contexts */
-		intel_gt_retire_requests(to_gt(i915));
+	if (shrink & I915_SHRINK_ACTIVE) {
+		for_each_gt(gt, i915, i)
+			/* Retire requests to unpin all idle contexts */
+			intel_gt_retire_requests(to_gt(i915));
+	}
 
 	/*
 	 * As we may completely rewrite the (un)bound list whilst unbinding
@@ -389,6 +393,8 @@  i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 	struct i915_vma *vma, *next;
 	unsigned long freed_pages = 0;
 	intel_wakeref_t wakeref;
+	struct intel_gt *gt;
+	int i;
 
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 		freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
@@ -397,24 +403,26 @@  i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 					       I915_SHRINK_VMAPS);
 
 	/* We also want to clear any cached iomaps as they wrap vmap */
-	mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
-	list_for_each_entry_safe(vma, next,
-				 &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
-		unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		if (!vma->iomap || i915_vma_is_active(vma))
-			continue;
+	for_each_gt(gt, i915, i) {
+		mutex_lock(&gt->ggtt->vm.mutex);
+		list_for_each_entry_safe(vma, next,
+					 &gt->ggtt->vm.bound_list, vm_link) {
+			unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
+			struct drm_i915_gem_object *obj = vma->obj;
+
+			if (!vma->iomap || i915_vma_is_active(vma))
+				continue;
 
-		if (!i915_gem_object_trylock(obj, NULL))
-			continue;
+			if (!i915_gem_object_trylock(obj, NULL))
+				continue;
 
-		if (__i915_vma_unbind(vma) == 0)
-			freed_pages += count;
+			if (__i915_vma_unbind(vma) == 0)
+				freed_pages += count;
 
-		i915_gem_object_unlock(obj);
+			i915_gem_object_unlock(obj);
+		}
+		mutex_unlock(&gt->ggtt->vm.mutex);
 	}
-	mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
 
 	*(unsigned long *)ptr += freed_pages;
 	return NOTIFY_DONE;