diff mbox

drm/scheduler: fix param documentation

Message ID CADnq5_MrOzV3J3sBc=CAR0G3etMuo6+MDXhDr=oZVPt3C+-SXA@mail.gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alex Deucher March 29, 2018, 3:26 p.m. UTC
On Tue, Mar 27, 2018 at 1:29 PM, Nayan Deshmukh
<nayan26deshmukh@gmail.com> wrote:
> On Tue, Mar 27, 2018 at 1:47 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>> On Mon, Mar 26, 2018 at 08:51:14PM +0530, Nayan Deshmukh wrote:
>>> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
>>
>> You might want to add a kerneldoc page in Documentation/gpu/scheduler.rst,
>> which pulls in all the nice kerneldoc you have here + has a short intro
>> text what this is all about.
>>
> Yeah Sure. I'll send a patch for this in a while.

FWIW, I started improving the documentation in the GPU scheduler last
week, but ran out of time to finish it.  Here's the WIP patch. Feel
free to run with it.

Alex

>
> Cheers,
> Nayan
>> Cheers, Daniel
>>
>>> ---
>>>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 ++-
>>>  1 file changed, 2 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>> index 0d95888ccc3e..1d368bc66ac2 100644
>>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>> @@ -117,8 +117,9 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>>>   * @sched    The pointer to the scheduler
>>>   * @entity   The pointer to a valid drm_sched_entity
>>>   * @rq               The run queue this entity belongs
>>> - * @kernel   If this is an entity for the kernel
>>>   * @jobs     The max number of jobs in the job queue
>>> + * @guilty      atomic_t set to 1 when a job on this queue
>>> + *              is found to be guilty causing a timeout
>>>   *
>>>   * return 0 if succeed. negative error code on failure
>>>  */
>>> --
>>> 2.14.3
>>>
>>> _______________________________________________
>>> dri-devel mailing list
>>> dri-devel@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>
>> --
>> Daniel Vetter
>> Software Engineer, Intel Corporation
>> http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

Comments

Nayan Deshmukh March 29, 2018, 5:08 p.m. UTC | #1
On Thu, Mar 29, 2018 at 8:56 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> On Tue, Mar 27, 2018 at 1:29 PM, Nayan Deshmukh
> <nayan26deshmukh@gmail.com> wrote:
>> On Tue, Mar 27, 2018 at 1:47 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>> On Mon, Mar 26, 2018 at 08:51:14PM +0530, Nayan Deshmukh wrote:
>>>> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
>>>
>>> You might want to add a kerneldoc page in Documentation/gpu/scheduler.rst,
>>> which pulls in all the nice kerneldoc you have here + has a short intro
>>> text what this is all about.
>>>
>> Yeah Sure. I'll send a patch for this in a while.
>
> FWIW, I started improving the documentation in the GPU scheduler last
> week, but ran out of time to finish it.  Here's the WIP patch. Feel
> free to run with it.
>
This will be useful. I'll go over it during the weekend and probably
send a patch early next week.

Thanks
> Alex
>
>>
>> Cheers,
>> Nayan
>>> Cheers, Daniel
>>>
>>>> ---
>>>>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 ++-
>>>>  1 file changed, 2 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>>> index 0d95888ccc3e..1d368bc66ac2 100644
>>>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>>> @@ -117,8 +117,9 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>>>>   * @sched    The pointer to the scheduler
>>>>   * @entity   The pointer to a valid drm_sched_entity
>>>>   * @rq               The run queue this entity belongs
>>>> - * @kernel   If this is an entity for the kernel
>>>>   * @jobs     The max number of jobs in the job queue
>>>> + * @guilty      atomic_t set to 1 when a job on this queue
>>>> + *              is found to be guilty causing a timeout
>>>>   *
>>>>   * return 0 if succeed. negative error code on failure
>>>>  */
>>>> --
>>>> 2.14.3
>>>>
>>>> _______________________________________________
>>>> dri-devel mailing list
>>>> dri-devel@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>
>>> --
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> http://blog.ffwll.ch
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox

Patch

From 1b98337276bb86e0e6aeacffc4cf7ccfa06d7b55 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 20 Mar 2018 23:03:48 -0500
Subject: [PATCH] XXX: wip gpu scheduler documentation

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 160 ++++++++++++++++++++++--------
 1 file changed, 121 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 0d95888ccc3e..635aec73d5f0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -39,7 +39,13 @@  static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
-/* Initialize a given run queue struct */
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @rq: scheduler run queue
+ *
+ * This function initializes a scheduler runqueue.
+ */
 static void drm_sched_rq_init(struct drm_sched_rq *rq)
 {
 	spin_lock_init(&rq->lock);
@@ -47,6 +53,14 @@  static void drm_sched_rq_init(struct drm_sched_rq *rq)
 	rq->current_entity = NULL;
 }
 
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * This function adds a scheduling entity to the run queue.
+ */
 static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 				    struct drm_sched_entity *entity)
 {
@@ -57,6 +71,14 @@  static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 	spin_unlock(&rq->lock);
 }
 
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * This function removes a scheduling entity from the run queue.
+ */
 static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 				       struct drm_sched_entity *entity)
 {
@@ -70,9 +92,9 @@  static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 }
 
 /**
- * Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
  *
- * @rq		The run queue to check.
+ * @rq: scheduler run queue to check.
  *
  * Try to find a ready entity, returns NULL if none found.
  */
@@ -112,16 +134,17 @@  drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 }
 
 /**
- * Init a context entity used by scheduler when submit to HW ring.
+ * drm_sched_entity_init - init a context entity used by scheduler when submit to HW queue
  *
- * @sched	The pointer to the scheduler
- * @entity	The pointer to a valid drm_sched_entity
- * @rq		The run queue this entity belongs
- * @kernel	If this is an entity for the kernel
- * @jobs	The max number of jobs in the job queue
+ * @sched: scheduler instance
+ * @entity: scheduler entity
+ * @rq: scheduler run queue to which this entity belongs
+ * @kernel: if this is an entity for the kernel
+ * @jobs: max number of jobs in the job queue
+ * @guilty: the guilty marker for the context
  *
- * return 0 if succeed. negative error code on failure
-*/
+ * Return 0 on succeed, negative error code on failure.
+ */
 int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 			  struct drm_sched_entity *entity,
 			  struct drm_sched_rq *rq,
@@ -148,10 +171,10 @@  int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 EXPORT_SYMBOL(drm_sched_entity_init);
 
 /**
- * Query if entity is initialized
+ * drm_sched_entity_is_initialized - Query if entity is initialized
  *
- * @sched       Pointer to scheduler instance
- * @entity	The pointer to a valid scheduler entity
+ * @sched: scheduler instance
+ * @entity: scheduler entity
  *
  * return true if entity is initialized, false otherwise
 */
@@ -163,11 +186,11 @@  static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
 }
 
 /**
- * Check if entity is idle
+ * drm_sched_entity_is_idle - Check if entity is idle
  *
- * @entity	The pointer to a valid scheduler entity
+ * @entity: scheduler entity
  *
- * Return true if entity don't has any unscheduled jobs.
+ * Return true if the entity does not have any unscheduled jobs.
  */
 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
 {
@@ -179,9 +202,9 @@  static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
 }
 
 /**
- * Check if entity is ready
+ * drm_sched_entity_is_ready - Check if entity is ready
  *
- * @entity	The pointer to a valid scheduler entity
+ * @entity: scheduler entity
  *
  * Return true if entity could provide a job.
  */
@@ -197,12 +220,12 @@  static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
 }
 
 /**
- * Destroy a context entity
+ * drm_sched_entity_fini - Destroy a context entity
  *
- * @sched       Pointer to scheduler instance
- * @entity	The pointer to a valid scheduler entity
+ * @sched: scheduler instance
+ * @entity: scheduler entity
  *
- * Cleanup and free the allocated resources.
+ * Cleanup and free the allocated entity resources.
  */
 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
 			   struct drm_sched_entity *entity)
@@ -457,7 +480,15 @@  static void drm_sched_job_timedout(struct work_struct *work)
 	job->sched->ops->timedout_job(job);
 }
 
-void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
+/**
+ * drm_sched_hw_job_reset - stop the scheduer if it contains the bad job
+ *
+ * @sched: scheduler instance
+ * @bad: bad scheduler job
+ *
+ */
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
+			    struct drm_sched_job *bad)
 {
 	struct drm_sched_job *s_job;
 	struct drm_sched_entity *entity, *tmp;
@@ -478,8 +509,8 @@  void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 	if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
 		atomic_inc(&bad->karma);
 		/* don't increase @bad's karma if it's from KERNEL RQ,
-		 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
-		 * corrupt but keep in mind that kernel jobs always considered good.
+		 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
+		 * to break but keep in mind that kernel jobs always considered good.
 		 */
 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
 			struct drm_sched_rq *rq = &sched->sched_rq[i];
@@ -501,6 +532,12 @@  void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 }
 EXPORT_SYMBOL(drm_sched_hw_job_reset);
 
+/**
+ * drm_sched_job_recovery - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ *
+ */
 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 {
 	struct drm_sched_job *s_job, *tmp;
@@ -548,7 +585,16 @@  void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 }
 EXPORT_SYMBOL(drm_sched_job_recovery);
 
-/* init a sched_job with basic field */
+/**
+ * drm_sched_job_init - init a scheduler job
+ *
+ * @job: scheduler job to init
+ * @sched: scheduler instance
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
 int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
@@ -570,7 +616,11 @@  int drm_sched_job_init(struct drm_sched_job *job,
 EXPORT_SYMBOL(drm_sched_job_init);
 
 /**
- * Return ture if we can push more jobs to the hw.
+ * drm_sched_ready - is the scheduler ready
+ *
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
  */
 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 {
@@ -579,7 +629,10 @@  static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 }
 
 /**
- * Wake up the scheduler when it is ready
+ * drm_sched_wakeup - Wake up the scheduler when it is ready
+ *
+ * @sched: scheduler instance
+ *
  */
 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 {
@@ -588,8 +641,12 @@  static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 }
 
 /**
- * Select next entity to process
-*/
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
 static struct drm_sched_entity *
 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 {
@@ -609,6 +666,13 @@  drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 	return entity;
 }
 
+/**
+ * drm_sched_process_job - process a job
+ *
+ * @dma_fence: fence
+ * @cb: fence callbacks
+ *
+ */
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 	struct drm_sched_fence *s_fence =
@@ -624,6 +688,13 @@  static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 	wake_up_interruptible(&sched->wake_up_worker);
 }
 
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
 {
 	if (kthread_should_park()) {
@@ -634,6 +705,13 @@  static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
 	return false;
 }
 
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
 static int drm_sched_main(void *param)
 {
 	struct sched_param sparam = {.sched_priority = 1};
@@ -688,15 +766,17 @@  static int drm_sched_main(void *param)
 }
 
 /**
- * Init a gpu scheduler instance
+ * drm_sched_init - Init a gpu scheduler instance
  *
- * @sched		The pointer to the scheduler
- * @ops			The backend operations for this scheduler.
- * @hw_submissions	Number of hw submissions to do.
- * @name		Name used for debugging
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submissions: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @name: name used for debugging
  *
  * Return 0 on success, otherwise error code.
-*/
+ */
 int drm_sched_init(struct drm_gpu_scheduler *sched,
 		   const struct drm_sched_backend_ops *ops,
 		   unsigned hw_submission,
@@ -732,9 +812,11 @@  int drm_sched_init(struct drm_gpu_scheduler *sched,
 EXPORT_SYMBOL(drm_sched_init);
 
 /**
- * Destroy a gpu scheduler
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
  *
- * @sched	The pointer to the scheduler
+ * Tears down and cleans up the scheduler.
  */
 void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
-- 
2.13.6