diff mbox series

[1/3] drm/scheduler: track GPU active time per entity

Message ID 20220908181013.3214205-1-l.stach@pengutronix.de (mailing list archive)
State New, archived
Headers show
Series [1/3] drm/scheduler: track GPU active time per entity | expand

Commit Message

Lucas Stach Sept. 8, 2022, 6:10 p.m. UTC
Track the accumulated time that jobs from this entity were active
on the GPU. This allows drivers using the scheduler to trivially
implement the DRM fdinfo when the hardware doesn't provide more
specific information than signalling job completion anyways.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
 drivers/gpu/drm/scheduler/sched_main.c | 6 ++++++
 include/drm/gpu_scheduler.h            | 7 +++++++
 2 files changed, 13 insertions(+)

Comments

Andrey Grodzovsky Sept. 8, 2022, 6:33 p.m. UTC | #1
On 2022-09-08 14:10, Lucas Stach wrote:
> Track the accumulated time that jobs from this entity were active
> on the GPU. This allows drivers using the scheduler to trivially
> implement the DRM fdinfo when the hardware doesn't provide more
> specific information than signalling job completion anyways.
>
> Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
> ---
>   drivers/gpu/drm/scheduler/sched_main.c | 6 ++++++
>   include/drm/gpu_scheduler.h            | 7 +++++++
>   2 files changed, 13 insertions(+)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 76fd2904c7c6..24c77a6a157f 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -847,6 +847,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
>   
>   	spin_unlock(&sched->job_list_lock);
>   
> +	if (job) {
> +		job->entity->elapsed_ns += ktime_to_ns(
> +			ktime_sub(job->s_fence->finished.timestamp,
> +				  job->s_fence->scheduled.timestamp));
> +	}
> +
>   	return job;


Looks like you making as assumption that drm_sched_entity will always be 
allocated using kzalloc ? Isn't it a bit dangerous assumption ?

Andrey


>   }
>   
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index addb135eeea6..573bef640664 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -196,6 +196,13 @@ struct drm_sched_entity {
>   	 * drm_sched_entity_fini().
>   	 */
>   	struct completion		entity_idle;
> +	/**
> +	 * @elapsed_ns
> +	 *
> +	 * Records the amount of time where jobs from this entity were active
> +	 * on the GPU.
> +	 */
> +	uint64_t elapsed_ns;
>   };
>   
>   /**
Lucas Stach Sept. 16, 2022, 9:12 a.m. UTC | #2
Am Donnerstag, dem 08.09.2022 um 14:33 -0400 schrieb Andrey Grodzovsky:
> On 2022-09-08 14:10, Lucas Stach wrote:
> > Track the accumulated time that jobs from this entity were active
> > on the GPU. This allows drivers using the scheduler to trivially
> > implement the DRM fdinfo when the hardware doesn't provide more
> > specific information than signalling job completion anyways.
> > 
> > Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
> > ---
> >   drivers/gpu/drm/scheduler/sched_main.c | 6 ++++++
> >   include/drm/gpu_scheduler.h            | 7 +++++++
> >   2 files changed, 13 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index 76fd2904c7c6..24c77a6a157f 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -847,6 +847,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> >   
> >   	spin_unlock(&sched->job_list_lock);
> >   
> > +	if (job) {
> > +		job->entity->elapsed_ns += ktime_to_ns(
> > +			ktime_sub(job->s_fence->finished.timestamp,
> > +				  job->s_fence->scheduled.timestamp));
> > +	}
> > +
> >   	return job;
> 
> 
> Looks like you making as assumption that drm_sched_entity will always be 
> allocated using kzalloc ? Isn't it a bit dangerous assumption ?
> 
No, drm_sched_entity_init() memsets the whole struct to 0 before
initializing any members that need more specific init values.

Regards,
Lucas

> Andrey
> 
> 
> >   }
> >   
> > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> > index addb135eeea6..573bef640664 100644
> > --- a/include/drm/gpu_scheduler.h
> > +++ b/include/drm/gpu_scheduler.h
> > @@ -196,6 +196,13 @@ struct drm_sched_entity {
> >   	 * drm_sched_entity_fini().
> >   	 */
> >   	struct completion		entity_idle;
> > +	/**
> > +	 * @elapsed_ns
> > +	 *
> > +	 * Records the amount of time where jobs from this entity were active
> > +	 * on the GPU.
> > +	 */
> > +	uint64_t elapsed_ns;
> >   };
> >   
> >   /**
Andrey Grodzovsky Sept. 16, 2022, 1:37 p.m. UTC | #3
On 2022-09-16 05:12, Lucas Stach wrote:
> Am Donnerstag, dem 08.09.2022 um 14:33 -0400 schrieb Andrey Grodzovsky:
>> On 2022-09-08 14:10, Lucas Stach wrote:
>>> Track the accumulated time that jobs from this entity were active
>>> on the GPU. This allows drivers using the scheduler to trivially
>>> implement the DRM fdinfo when the hardware doesn't provide more
>>> specific information than signalling job completion anyways.
>>>
>>> Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
>>> ---
>>>    drivers/gpu/drm/scheduler/sched_main.c | 6 ++++++
>>>    include/drm/gpu_scheduler.h            | 7 +++++++
>>>    2 files changed, 13 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>>> index 76fd2904c7c6..24c77a6a157f 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -847,6 +847,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
>>>    
>>>    	spin_unlock(&sched->job_list_lock);
>>>    
>>> +	if (job) {
>>> +		job->entity->elapsed_ns += ktime_to_ns(
>>> +			ktime_sub(job->s_fence->finished.timestamp,
>>> +				  job->s_fence->scheduled.timestamp));
>>> +	}
>>> +
>>>    	return job;
>>
>> Looks like you making as assumption that drm_sched_entity will always be
>> allocated using kzalloc ? Isn't it a bit dangerous assumption ?
>>
> No, drm_sched_entity_init() memsets the whole struct to 0 before
> initializing any members that need more specific init values.
>
> Regards,
> Lucas


Missed the memset, in that case Reviewed-by: Andrey Grodzovsky 
<andrey.grodzovsky@amd.com>

I assume you can push that change yourself with the rest of your patchset ?

Andrey

>
>> Andrey
>>
>>
>>>    }
>>>    
>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>> index addb135eeea6..573bef640664 100644
>>> --- a/include/drm/gpu_scheduler.h
>>> +++ b/include/drm/gpu_scheduler.h
>>> @@ -196,6 +196,13 @@ struct drm_sched_entity {
>>>    	 * drm_sched_entity_fini().
>>>    	 */
>>>    	struct completion		entity_idle;
>>> +	/**
>>> +	 * @elapsed_ns
>>> +	 *
>>> +	 * Records the amount of time where jobs from this entity were active
>>> +	 * on the GPU.
>>> +	 */
>>> +	uint64_t elapsed_ns;
>>>    };
>>>    
>>>    /**
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 76fd2904c7c6..24c77a6a157f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -847,6 +847,12 @@  drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 
 	spin_unlock(&sched->job_list_lock);
 
+	if (job) {
+		job->entity->elapsed_ns += ktime_to_ns(
+			ktime_sub(job->s_fence->finished.timestamp,
+				  job->s_fence->scheduled.timestamp));
+	}
+
 	return job;
 }
 
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index addb135eeea6..573bef640664 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -196,6 +196,13 @@  struct drm_sched_entity {
 	 * drm_sched_entity_fini().
 	 */
 	struct completion		entity_idle;
+	/**
+	 * @elapsed_ns
+	 *
+	 * Records the amount of time where jobs from this entity were active
+	 * on the GPU.
+	 */
+	uint64_t elapsed_ns;
 };
 
 /**