diff mbox series

[05/13] drm/amdgpu: drop amdgpu_sync from amdgpu_vmid_grab

Message ID 20221014084641.128280-6-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [01/13] drm/scheduler: fix fence ref counting | expand

Commit Message

Christian König Oct. 14, 2022, 8:46 a.m. UTC
Instead return the fence directly. Avoids memory allocation to store the
fence.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 42 +++++++++++++------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 12 +++----
 3 files changed, 28 insertions(+), 28 deletions(-)

Comments

Luben Tuikov Oct. 23, 2022, 1:25 a.m. UTC | #1
On 2022-10-14 04:46, Christian König wrote:
> Instead return the fence directly. Avoids memory allocation to store the
> fence.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 42 +++++++++++++------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 12 +++----
>  3 files changed, 28 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index b76294d4275b..2a9a2593dc18 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
>   *
>   * @vm: vm to allocate id for
>   * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
>   * @idle: resulting idle VMID
> + * @fence: fence to wait for if no id could be grabbed
>   *
>   * Try to find an idle VMID, if none is idle add a fence to wait to the sync
>   * object. Returns -ENOMEM when we are out of memory.
>   */
>  static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
>  				 struct amdgpu_ring *ring,
> -				 struct amdgpu_sync *sync,
> -				 struct amdgpu_vmid **idle)
> +				 struct amdgpu_vmid **idle,
> +				 struct dma_fence **fence)
>  {
>  	struct amdgpu_device *adev = ring->adev;
>  	unsigned vmhub = ring->funcs->vmhub;
>  	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
>  	struct dma_fence **fences;
>  	unsigned i;
> -	int r;
>  
> -	if (!dma_fence_is_signaled(ring->vmid_wait))
> -		return amdgpu_sync_fence(sync, ring->vmid_wait);
> +	if (!dma_fence_is_signaled(ring->vmid_wait)) {
> +		*fence = dma_fence_get(ring->vmid_wait);
> +		return 0;
> +	}
>  
>  	fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
>  	if (!fences)
> @@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
>  			return -ENOMEM;
>  		}
>  
> -		r = amdgpu_sync_fence(sync, &array->base);
> +		*fence = dma_fence_get(&array->base);
>  		dma_fence_put(ring->vmid_wait);
>  		ring->vmid_wait = &array->base;
> -		return r;
> +		return 0;
>  	}
>  	kfree(fences);
>  
> @@ -243,17 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
>   *
>   * @vm: vm to allocate id for
>   * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
>   * @job: job who wants to use the VMID
>   * @id: resulting VMID
> + * @fence: fence to wait for if no id could be grabbed
>   *
>   * Try to assign a reserved VMID.
>   */
>  static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
>  				     struct amdgpu_ring *ring,
> -				     struct amdgpu_sync *sync,
>  				     struct amdgpu_job *job,
> -				     struct amdgpu_vmid **id)
> +				     struct amdgpu_vmid **id,
> +				     struct dma_fence **fence)
>  {
>  	struct amdgpu_device *adev = ring->adev;
>  	unsigned vmhub = ring->funcs->vmhub;
> @@ -280,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
>  		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
>  		if (tmp) {
>  			*id = NULL;
> -			return amdgpu_sync_fence(sync, tmp);
> +			*fence = dma_fence_get(tmp);
> +			return 0;
>  		}
>  		needs_flush = true;
>  	}
> @@ -302,17 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
>   *
>   * @vm: vm to allocate id for
>   * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
>   * @job: job who wants to use the VMID
>   * @id: resulting VMID
> + * @fence: fence to wait for if no id could be grabbed
>   *
>   * Try to reuse a VMID for this submission.
>   */
>  static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
>  				 struct amdgpu_ring *ring,
> -				 struct amdgpu_sync *sync,
>  				 struct amdgpu_job *job,
> -				 struct amdgpu_vmid **id)
> +				 struct amdgpu_vmid **id,
> +				 struct dma_fence **fence)
>  {
>  	struct amdgpu_device *adev = ring->adev;
>  	unsigned vmhub = ring->funcs->vmhub;
> @@ -367,13 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
>   *
>   * @vm: vm to allocate id for
>   * @ring: ring we want to submit job to
> - * @sync: sync object where we add dependencies
>   * @job: job who wants to use the VMID
> + * @fence: fence to wait for if no id could be grabbed
>   *
>   * Allocate an id for the vm, adding fences to the sync obj as necessary.
>   */
>  int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> -		     struct amdgpu_sync *sync, struct amdgpu_job *job)
> +		     struct amdgpu_job *job, struct dma_fence **fence)
>  {
>  	struct amdgpu_device *adev = ring->adev;
>  	unsigned vmhub = ring->funcs->vmhub;
> @@ -383,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>  	int r = 0;
>  
>  	mutex_lock(&id_mgr->lock);
> -	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
> +	r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
>  	if (r || !idle)
>  		goto error;
>  
>  	if (vm->reserved_vmid[vmhub]) {
> -		r = amdgpu_vmid_grab_reserved(vm, ring, sync, job, &id);
> +		r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
>  		if (r || !id)
>  			goto error;
>  	} else {
> -		r = amdgpu_vmid_grab_used(vm, ring, sync, job, &id);
> +		r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
>  		if (r)
>  			goto error;
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> index 1b1e7d04655c..57efe61dceed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
> @@ -84,7 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
>  			       struct amdgpu_vm *vm,
>  			       unsigned vmhub);
>  int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> -		     struct amdgpu_sync *sync, struct amdgpu_job *job);
> +		     struct amdgpu_job *job, struct dma_fence **fence);
>  void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
>  		       unsigned vmid);
>  void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 13b752687b30..e187dc0ab898 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -238,12 +238,12 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
>  	return 0;
>  }
>  
> -static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
> -					       struct drm_sched_entity *s_entity)
> +static struct dma_fence *
> +amdgpu_job_dependency(struct drm_sched_job *sched_job,
> +		      struct drm_sched_entity *s_entity)
>  {
>  	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
>  	struct amdgpu_job *job = to_amdgpu_job(sched_job);
> -	struct amdgpu_vm *vm = job->vm;
>  	struct dma_fence *fence;
>  	int r;
>  
> @@ -254,12 +254,10 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
>  			DRM_ERROR("Error adding fence (%d)\n", r);
>  	}
>  
> -	while (fence == NULL && vm && !job->vmid) {
> -		r = amdgpu_vmid_grab(vm, ring, &job->sync, job);
> +	while (fence == NULL && job->vm && !job->vmid) {

In preliminary application of the patch series, checkpatch.pl complains about comparison to NULL,
and wants !fence, instead:

	while (!fence && job->vm && !job->vmid) {

I can see that it had been like this before... and I know it's out of the scope of this series,
but we should fix this at some point in time.

Regards,
Luben
Christian König Oct. 24, 2022, 10:54 a.m. UTC | #2
Am 23.10.22 um 03:25 schrieb Luben Tuikov:
> On 2022-10-14 04:46, Christian König wrote:
>> [SNIP]
>> @@ -254,12 +254,10 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
>>   			DRM_ERROR("Error adding fence (%d)\n", r);
>>   	}
>>   
>> -	while (fence == NULL && vm && !job->vmid) {
>> -		r = amdgpu_vmid_grab(vm, ring, &job->sync, job);
>> +	while (fence == NULL && job->vm && !job->vmid) {
> In preliminary application of the patch series, checkpatch.pl complains about comparison to NULL,
> and wants !fence, instead:
>
> 	while (!fence && job->vm && !job->vmid) {
>
> I can see that it had been like this before... and I know it's out of the scope of this series,
> but we should fix this at some point in time.

Thanks for pointing that out. I try to fix it whenever I encounter 
something like this, but sometimes just forget to double check.

Thanks,
Christian.

>
> Regards,
> Luben
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b76294d4275b..2a9a2593dc18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -170,26 +170,27 @@  bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
  * @idle: resulting idle VMID
+ * @fence: fence to wait for if no id could be grabbed
  *
  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
  * object. Returns -ENOMEM when we are out of memory.
  */
 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 				 struct amdgpu_ring *ring,
-				 struct amdgpu_sync *sync,
-				 struct amdgpu_vmid **idle)
+				 struct amdgpu_vmid **idle,
+				 struct dma_fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	struct dma_fence **fences;
 	unsigned i;
-	int r;
 
-	if (!dma_fence_is_signaled(ring->vmid_wait))
-		return amdgpu_sync_fence(sync, ring->vmid_wait);
+	if (!dma_fence_is_signaled(ring->vmid_wait)) {
+		*fence = dma_fence_get(ring->vmid_wait);
+		return 0;
+	}
 
 	fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
 	if (!fences)
@@ -228,10 +229,10 @@  static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 			return -ENOMEM;
 		}
 
-		r = amdgpu_sync_fence(sync, &array->base);
+		*fence = dma_fence_get(&array->base);
 		dma_fence_put(ring->vmid_wait);
 		ring->vmid_wait = &array->base;
-		return r;
+		return 0;
 	}
 	kfree(fences);
 
@@ -243,17 +244,17 @@  static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
  * @job: job who wants to use the VMID
  * @id: resulting VMID
+ * @fence: fence to wait for if no id could be grabbed
  *
  * Try to assign a reserved VMID.
  */
 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 				     struct amdgpu_ring *ring,
-				     struct amdgpu_sync *sync,
 				     struct amdgpu_job *job,
-				     struct amdgpu_vmid **id)
+				     struct amdgpu_vmid **id,
+				     struct dma_fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
@@ -280,7 +281,8 @@  static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
 		if (tmp) {
 			*id = NULL;
-			return amdgpu_sync_fence(sync, tmp);
+			*fence = dma_fence_get(tmp);
+			return 0;
 		}
 		needs_flush = true;
 	}
@@ -302,17 +304,17 @@  static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
  * @job: job who wants to use the VMID
  * @id: resulting VMID
+ * @fence: fence to wait for if no id could be grabbed
  *
  * Try to reuse a VMID for this submission.
  */
 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
 				 struct amdgpu_ring *ring,
-				 struct amdgpu_sync *sync,
 				 struct amdgpu_job *job,
-				 struct amdgpu_vmid **id)
+				 struct amdgpu_vmid **id,
+				 struct dma_fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
@@ -367,13 +369,13 @@  static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
  * @job: job who wants to use the VMID
+ * @fence: fence to wait for if no id could be grabbed
  *
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
  */
 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		     struct amdgpu_sync *sync, struct amdgpu_job *job)
+		     struct amdgpu_job *job, struct dma_fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
@@ -383,16 +385,16 @@  int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	int r = 0;
 
 	mutex_lock(&id_mgr->lock);
-	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
+	r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
 	if (r || !idle)
 		goto error;
 
 	if (vm->reserved_vmid[vmhub]) {
-		r = amdgpu_vmid_grab_reserved(vm, ring, sync, job, &id);
+		r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
 		if (r || !id)
 			goto error;
 	} else {
-		r = amdgpu_vmid_grab_used(vm, ring, sync, job, &id);
+		r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
 		if (r)
 			goto error;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 1b1e7d04655c..57efe61dceed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -84,7 +84,7 @@  void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
 			       struct amdgpu_vm *vm,
 			       unsigned vmhub);
 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		     struct amdgpu_sync *sync, struct amdgpu_job *job);
+		     struct amdgpu_job *job, struct dma_fence **fence);
 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
 		       unsigned vmid);
 void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 13b752687b30..e187dc0ab898 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -238,12 +238,12 @@  int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
 	return 0;
 }
 
-static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
-					       struct drm_sched_entity *s_entity)
+static struct dma_fence *
+amdgpu_job_dependency(struct drm_sched_job *sched_job,
+		      struct drm_sched_entity *s_entity)
 {
 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
-	struct amdgpu_vm *vm = job->vm;
 	struct dma_fence *fence;
 	int r;
 
@@ -254,12 +254,10 @@  static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 			DRM_ERROR("Error adding fence (%d)\n", r);
 	}
 
-	while (fence == NULL && vm && !job->vmid) {
-		r = amdgpu_vmid_grab(vm, ring, &job->sync, job);
+	while (fence == NULL && job->vm && !job->vmid) {
+		r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
 		if (r)
 			DRM_ERROR("Error getting VM ID (%d)\n", r);
-
-		fence = amdgpu_sync_get_fence(&job->sync);
 	}
 
 	if (!fence && job->gang_submit)