Message ID | 20230919050155.2647172-2-matthew.brost@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | DRM scheduler changes for Xe | expand |
Am 19.09.23 um 07:01 schrieb Matthew Brost: > Add scheduler submit ready, stop, and start helpers to hide the > implementation details of the scheduler from the drivers. > > Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Christian König <christian.koenig@amd.com> for this one. No idea when I have time to look into the rest :( But Luben should take a look. Regards, Christian > --- > .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 15 +++---- > drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++--- > drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++- > drivers/gpu/drm/scheduler/sched_main.c | 40 ++++++++++++++++++- > include/drm/gpu_scheduler.h | 3 ++ > 6 files changed, 60 insertions(+), 18 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c > index 625db444df1c..36a1accbc846 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c > @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus > for (i = 0; i < adev->gfx.num_compute_rings; i++) { > struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; > > - if (!(ring && ring->sched.thread)) > + if (!(ring && drm_sched_submit_ready(&ring->sched))) > continue; > > /* stop secheduler and drain ring. */ > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c > index a4faea4fa0b5..fb5dad687168 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c > @@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) > for (i = 0; i < AMDGPU_MAX_RINGS; i++) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > - kthread_park(ring->sched.thread); > + drm_sched_submit_stop(&ring->sched); > } > > seq_puts(m, "run ib test:\n"); > @@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) > for (i = 0; i < AMDGPU_MAX_RINGS; i++) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > - kthread_unpark(ring->sched.thread); > + drm_sched_submit_start(&ring->sched); > } > > up_write(&adev->reset_domain->sem); > @@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) > > ring = adev->rings[val]; > > - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) > + if (!ring || !ring->funcs->preempt_ib || > + !drm_sched_submit_ready(&ring->sched)) > return -EINVAL; > > /* the last preemption failed */ > @@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) > goto pro_end; > > /* stop the scheduler */ > - kthread_park(ring->sched.thread); > + drm_sched_submit_stop(&ring->sched); > > /* preempt the IB */ > r = amdgpu_ring_preempt_ib(ring); > @@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) > > failure: > /* restart the scheduler */ > - kthread_unpark(ring->sched.thread); > + drm_sched_submit_start(&ring->sched); > > up_read(&adev->reset_domain->sem); > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > index 30c4f5cca02c..e366f61c3aed 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > @@ -4588,7 +4588,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > spin_lock(&ring->sched.job_list_lock); > @@ -4727,7 +4727,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > /* Clear job fence from fence drv to avoid force_completion > @@ -5266,7 +5266,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = tmp_adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_stop(&ring->sched, job ? &job->base : NULL); > @@ -5341,7 +5341,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = tmp_adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_start(&ring->sched, true); > @@ -5667,7 +5667,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_stop(&ring->sched, NULL); > @@ -5795,7 +5795,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_start(&ring->sched, true); > diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c > index fa527935ffd4..e046dc5ff72a 100644 > --- a/drivers/gpu/drm/msm/adreno/adreno_device.c > +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c > @@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu) > */ > for (i = 0; i < gpu->nr_rings; i++) { > struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; > - kthread_park(sched->thread); > + > + drm_sched_submit_stop(sched); > } > } > > @@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu) > > for (i = 0; i < gpu->nr_rings; i++) { > struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; > - kthread_unpark(sched->thread); > + > + drm_sched_submit_start(sched); > } > } > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c > index 506371c42745..e4fa62abca41 100644 > --- a/drivers/gpu/drm/scheduler/sched_main.c > +++ b/drivers/gpu/drm/scheduler/sched_main.c > @@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) > { > struct drm_sched_job *s_job, *tmp; > > - kthread_park(sched->thread); > + drm_sched_submit_stop(sched); > > /* > * Reinsert back the bad job here - now it's safe as > @@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) > spin_unlock(&sched->job_list_lock); > } > > - kthread_unpark(sched->thread); > + drm_sched_submit_start(sched); > } > EXPORT_SYMBOL(drm_sched_start); > > @@ -1206,3 +1206,39 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) > } > } > EXPORT_SYMBOL(drm_sched_increase_karma); > + > +/** > + * drm_sched_submit_ready - scheduler ready for submission > + * > + * @sched: scheduler instance > + * > + * Returns true if submission is ready > + */ > +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched) > +{ > + return !!sched->thread; > + > +} > +EXPORT_SYMBOL(drm_sched_submit_ready); > + > +/** > + * drm_sched_submit_stop - stop scheduler submission > + * > + * @sched: scheduler instance > + */ > +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched) > +{ > + kthread_park(sched->thread); > +} > +EXPORT_SYMBOL(drm_sched_submit_stop); > + > +/** > + * drm_sched_submit_start - start scheduler submission > + * > + * @sched: scheduler instance > + */ > +void drm_sched_submit_start(struct drm_gpu_scheduler *sched) > +{ > + kthread_unpark(sched->thread); > +} > +EXPORT_SYMBOL(drm_sched_submit_start); > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h > index f9544d9b670d..f12c5aea5294 100644 > --- a/include/drm/gpu_scheduler.h > +++ b/include/drm/gpu_scheduler.h > @@ -550,6 +550,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, > > void drm_sched_job_cleanup(struct drm_sched_job *job); > void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); > +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched); > +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched); > +void drm_sched_submit_start(struct drm_gpu_scheduler *sched); > void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); > void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); > void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
On 2023-09-19 01:58, Christian König wrote: > Am 19.09.23 um 07:01 schrieb Matthew Brost: >> Add scheduler submit ready, stop, and start helpers to hide the >> implementation details of the scheduler from the drivers. >> >> Signed-off-by: Matthew Brost <matthew.brost@intel.com> > > Reviewed-by: Christian König <christian.koenig@amd.com> for this one. > > No idea when I have time to look into the rest :( But Luben should take > a look. Hi Christian, Yes, I'll finish up with v3 and v4 tomorrow morning and afternoon. Regards, Luben > > Regards, > Christian > >> --- >> .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 +- >> drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 15 +++---- >> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++--- >> drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++- >> drivers/gpu/drm/scheduler/sched_main.c | 40 ++++++++++++++++++- >> include/drm/gpu_scheduler.h | 3 ++ >> 6 files changed, 60 insertions(+), 18 deletions(-) >> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c >> index 625db444df1c..36a1accbc846 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c >> @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus >> for (i = 0; i < adev->gfx.num_compute_rings; i++) { >> struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; >> >> - if (!(ring && ring->sched.thread)) >> + if (!(ring && drm_sched_submit_ready(&ring->sched))) >> continue; >> >> /* stop secheduler and drain ring. */ >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c >> index a4faea4fa0b5..fb5dad687168 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c >> @@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) >> for (i = 0; i < AMDGPU_MAX_RINGS; i++) { >> struct amdgpu_ring *ring = adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> - kthread_park(ring->sched.thread); >> + drm_sched_submit_stop(&ring->sched); >> } >> >> seq_puts(m, "run ib test:\n"); >> @@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) >> for (i = 0; i < AMDGPU_MAX_RINGS; i++) { >> struct amdgpu_ring *ring = adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> - kthread_unpark(ring->sched.thread); >> + drm_sched_submit_start(&ring->sched); >> } >> >> up_write(&adev->reset_domain->sem); >> @@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) >> >> ring = adev->rings[val]; >> >> - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) >> + if (!ring || !ring->funcs->preempt_ib || >> + !drm_sched_submit_ready(&ring->sched)) >> return -EINVAL; >> >> /* the last preemption failed */ >> @@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) >> goto pro_end; >> >> /* stop the scheduler */ >> - kthread_park(ring->sched.thread); >> + drm_sched_submit_stop(&ring->sched); >> >> /* preempt the IB */ >> r = amdgpu_ring_preempt_ib(ring); >> @@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) >> >> failure: >> /* restart the scheduler */ >> - kthread_unpark(ring->sched.thread); >> + drm_sched_submit_start(&ring->sched); >> >> up_read(&adev->reset_domain->sem); >> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c >> index 30c4f5cca02c..e366f61c3aed 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c >> @@ -4588,7 +4588,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) >> for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { >> struct amdgpu_ring *ring = adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> >> spin_lock(&ring->sched.job_list_lock); >> @@ -4727,7 +4727,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, >> for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { >> struct amdgpu_ring *ring = adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> >> /* Clear job fence from fence drv to avoid force_completion >> @@ -5266,7 +5266,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, >> for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { >> struct amdgpu_ring *ring = tmp_adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> >> drm_sched_stop(&ring->sched, job ? &job->base : NULL); >> @@ -5341,7 +5341,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, >> for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { >> struct amdgpu_ring *ring = tmp_adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> >> drm_sched_start(&ring->sched, true); >> @@ -5667,7 +5667,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta >> for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { >> struct amdgpu_ring *ring = adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> >> drm_sched_stop(&ring->sched, NULL); >> @@ -5795,7 +5795,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) >> for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { >> struct amdgpu_ring *ring = adev->rings[i]; >> >> - if (!ring || !ring->sched.thread) >> + if (!ring || !drm_sched_submit_ready(&ring->sched)) >> continue; >> >> drm_sched_start(&ring->sched, true); >> diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c >> index fa527935ffd4..e046dc5ff72a 100644 >> --- a/drivers/gpu/drm/msm/adreno/adreno_device.c >> +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c >> @@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu) >> */ >> for (i = 0; i < gpu->nr_rings; i++) { >> struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; >> - kthread_park(sched->thread); >> + >> + drm_sched_submit_stop(sched); >> } >> } >> >> @@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu) >> >> for (i = 0; i < gpu->nr_rings; i++) { >> struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; >> - kthread_unpark(sched->thread); >> + >> + drm_sched_submit_start(sched); >> } >> } >> >> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c >> index 506371c42745..e4fa62abca41 100644 >> --- a/drivers/gpu/drm/scheduler/sched_main.c >> +++ b/drivers/gpu/drm/scheduler/sched_main.c >> @@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) >> { >> struct drm_sched_job *s_job, *tmp; >> >> - kthread_park(sched->thread); >> + drm_sched_submit_stop(sched); >> >> /* >> * Reinsert back the bad job here - now it's safe as >> @@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) >> spin_unlock(&sched->job_list_lock); >> } >> >> - kthread_unpark(sched->thread); >> + drm_sched_submit_start(sched); >> } >> EXPORT_SYMBOL(drm_sched_start); >> >> @@ -1206,3 +1206,39 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) >> } >> } >> EXPORT_SYMBOL(drm_sched_increase_karma); >> + >> +/** >> + * drm_sched_submit_ready - scheduler ready for submission >> + * >> + * @sched: scheduler instance >> + * >> + * Returns true if submission is ready >> + */ >> +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched) >> +{ >> + return !!sched->thread; >> + >> +} >> +EXPORT_SYMBOL(drm_sched_submit_ready); >> + >> +/** >> + * drm_sched_submit_stop - stop scheduler submission >> + * >> + * @sched: scheduler instance >> + */ >> +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched) >> +{ >> + kthread_park(sched->thread); >> +} >> +EXPORT_SYMBOL(drm_sched_submit_stop); >> + >> +/** >> + * drm_sched_submit_start - start scheduler submission >> + * >> + * @sched: scheduler instance >> + */ >> +void drm_sched_submit_start(struct drm_gpu_scheduler *sched) >> +{ >> + kthread_unpark(sched->thread); >> +} >> +EXPORT_SYMBOL(drm_sched_submit_start); >> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h >> index f9544d9b670d..f12c5aea5294 100644 >> --- a/include/drm/gpu_scheduler.h >> +++ b/include/drm/gpu_scheduler.h >> @@ -550,6 +550,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, >> >> void drm_sched_job_cleanup(struct drm_sched_job *job); >> void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); >> +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched); >> +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched); >> +void drm_sched_submit_start(struct drm_gpu_scheduler *sched); >> void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); >> void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); >> void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); >
On 2023-09-19 01:01, Matthew Brost wrote: > Add scheduler submit ready, stop, and start helpers to hide the > implementation details of the scheduler from the drivers. > > Signed-off-by: Matthew Brost <matthew.brost@intel.com> > --- > .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 15 +++---- > drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++--- > drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++- > drivers/gpu/drm/scheduler/sched_main.c | 40 ++++++++++++++++++- > include/drm/gpu_scheduler.h | 3 ++ > 6 files changed, 60 insertions(+), 18 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c > index 625db444df1c..36a1accbc846 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c > @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus > for (i = 0; i < adev->gfx.num_compute_rings; i++) { > struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; > > - if (!(ring && ring->sched.thread)) > + if (!(ring && drm_sched_submit_ready(&ring->sched))) > continue; > > /* stop secheduler and drain ring. */ > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c > index a4faea4fa0b5..fb5dad687168 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c > @@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) > for (i = 0; i < AMDGPU_MAX_RINGS; i++) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > - kthread_park(ring->sched.thread); > + drm_sched_submit_stop(&ring->sched); > } > > seq_puts(m, "run ib test:\n"); > @@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) > for (i = 0; i < AMDGPU_MAX_RINGS; i++) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > - kthread_unpark(ring->sched.thread); > + drm_sched_submit_start(&ring->sched); > } > > up_write(&adev->reset_domain->sem); > @@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) > > ring = adev->rings[val]; > > - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) > + if (!ring || !ring->funcs->preempt_ib || > + !drm_sched_submit_ready(&ring->sched)) > return -EINVAL; > > /* the last preemption failed */ > @@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) > goto pro_end; > > /* stop the scheduler */ > - kthread_park(ring->sched.thread); > + drm_sched_submit_stop(&ring->sched); > > /* preempt the IB */ > r = amdgpu_ring_preempt_ib(ring); > @@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) > > failure: > /* restart the scheduler */ > - kthread_unpark(ring->sched.thread); > + drm_sched_submit_start(&ring->sched); > > up_read(&adev->reset_domain->sem); > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > index 30c4f5cca02c..e366f61c3aed 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > @@ -4588,7 +4588,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > spin_lock(&ring->sched.job_list_lock); > @@ -4727,7 +4727,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > /* Clear job fence from fence drv to avoid force_completion > @@ -5266,7 +5266,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = tmp_adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_stop(&ring->sched, job ? &job->base : NULL); > @@ -5341,7 +5341,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = tmp_adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_start(&ring->sched, true); > @@ -5667,7 +5667,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_stop(&ring->sched, NULL); > @@ -5795,7 +5795,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) > for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { > struct amdgpu_ring *ring = adev->rings[i]; > > - if (!ring || !ring->sched.thread) > + if (!ring || !drm_sched_submit_ready(&ring->sched)) > continue; > > drm_sched_start(&ring->sched, true); > diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c > index fa527935ffd4..e046dc5ff72a 100644 > --- a/drivers/gpu/drm/msm/adreno/adreno_device.c > +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c > @@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu) > */ > for (i = 0; i < gpu->nr_rings; i++) { > struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; > - kthread_park(sched->thread); > + > + drm_sched_submit_stop(sched); > } > } > > @@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu) > > for (i = 0; i < gpu->nr_rings; i++) { > struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; > - kthread_unpark(sched->thread); > + > + drm_sched_submit_start(sched); > } > } > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c > index 506371c42745..e4fa62abca41 100644 > --- a/drivers/gpu/drm/scheduler/sched_main.c > +++ b/drivers/gpu/drm/scheduler/sched_main.c > @@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) > { > struct drm_sched_job *s_job, *tmp; > > - kthread_park(sched->thread); > + drm_sched_submit_stop(sched); > > /* > * Reinsert back the bad job here - now it's safe as > @@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) > spin_unlock(&sched->job_list_lock); > } > > - kthread_unpark(sched->thread); > + drm_sched_submit_start(sched); > } > EXPORT_SYMBOL(drm_sched_start); > > @@ -1206,3 +1206,39 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) > } > } > EXPORT_SYMBOL(drm_sched_increase_karma); > + > +/** > + * drm_sched_submit_ready - scheduler ready for submission "Is the scheduler ready for submission" is so much more clear and approachable. Let's have that go in the kernel, yes? > + * > + * @sched: scheduler instance > + * > + * Returns true if submission is ready > + */ > +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched) > +{ > + return !!sched->thread; > + > +} Remove the extra white line after the return-statement. (Please run your patches through checkpatch.pl to catch those.) With these two changes this patch is: Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> > +EXPORT_SYMBOL(drm_sched_submit_ready); > + > +/** > + * drm_sched_submit_stop - stop scheduler submission > + * > + * @sched: scheduler instance > + */ > +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched) > +{ > + kthread_park(sched->thread); > +} > +EXPORT_SYMBOL(drm_sched_submit_stop); > + > +/** > + * drm_sched_submit_start - start scheduler submission > + * > + * @sched: scheduler instance > + */ > +void drm_sched_submit_start(struct drm_gpu_scheduler *sched) > +{ > + kthread_unpark(sched->thread); > +} > +EXPORT_SYMBOL(drm_sched_submit_start); > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h > index f9544d9b670d..f12c5aea5294 100644 > --- a/include/drm/gpu_scheduler.h > +++ b/include/drm/gpu_scheduler.h > @@ -550,6 +550,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, > > void drm_sched_job_cleanup(struct drm_sched_job *job); > void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); > +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched); > +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched); > +void drm_sched_submit_start(struct drm_gpu_scheduler *sched); > void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); > void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); > void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 625db444df1c..36a1accbc846 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (!(ring && ring->sched.thread)) + if (!(ring && drm_sched_submit_ready(&ring->sched))) continue; /* stop secheduler and drain ring. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a4faea4fa0b5..fb5dad687168 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; - kthread_park(ring->sched.thread); + drm_sched_submit_stop(&ring->sched); } seq_puts(m, "run ib test:\n"); @@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; - kthread_unpark(ring->sched.thread); + drm_sched_submit_start(&ring->sched); } up_write(&adev->reset_domain->sem); @@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ring = adev->rings[val]; - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) + if (!ring || !ring->funcs->preempt_ib || + !drm_sched_submit_ready(&ring->sched)) return -EINVAL; /* the last preemption failed */ @@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) goto pro_end; /* stop the scheduler */ - kthread_park(ring->sched.thread); + drm_sched_submit_stop(&ring->sched); /* preempt the IB */ r = amdgpu_ring_preempt_ib(ring); @@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) failure: /* restart the scheduler */ - kthread_unpark(ring->sched.thread); + drm_sched_submit_start(&ring->sched); up_read(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 30c4f5cca02c..e366f61c3aed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4588,7 +4588,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; spin_lock(&ring->sched.job_list_lock); @@ -4727,7 +4727,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; /* Clear job fence from fence drv to avoid force_completion @@ -5266,7 +5266,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); @@ -5341,7 +5341,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; drm_sched_start(&ring->sched, true); @@ -5667,7 +5667,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; drm_sched_stop(&ring->sched, NULL); @@ -5795,7 +5795,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_submit_ready(&ring->sched)) continue; drm_sched_start(&ring->sched, true); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index fa527935ffd4..e046dc5ff72a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu) */ for (i = 0; i < gpu->nr_rings; i++) { struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; - kthread_park(sched->thread); + + drm_sched_submit_stop(sched); } } @@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu) for (i = 0; i < gpu->nr_rings; i++) { struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; - kthread_unpark(sched->thread); + + drm_sched_submit_start(sched); } } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 506371c42745..e4fa62abca41 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { struct drm_sched_job *s_job, *tmp; - kthread_park(sched->thread); + drm_sched_submit_stop(sched); /* * Reinsert back the bad job here - now it's safe as @@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) spin_unlock(&sched->job_list_lock); } - kthread_unpark(sched->thread); + drm_sched_submit_start(sched); } EXPORT_SYMBOL(drm_sched_start); @@ -1206,3 +1206,39 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) } } EXPORT_SYMBOL(drm_sched_increase_karma); + +/** + * drm_sched_submit_ready - scheduler ready for submission + * + * @sched: scheduler instance + * + * Returns true if submission is ready + */ +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched) +{ + return !!sched->thread; + +} +EXPORT_SYMBOL(drm_sched_submit_ready); + +/** + * drm_sched_submit_stop - stop scheduler submission + * + * @sched: scheduler instance + */ +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched) +{ + kthread_park(sched->thread); +} +EXPORT_SYMBOL(drm_sched_submit_stop); + +/** + * drm_sched_submit_start - start scheduler submission + * + * @sched: scheduler instance + */ +void drm_sched_submit_start(struct drm_gpu_scheduler *sched) +{ + kthread_unpark(sched->thread); +} +EXPORT_SYMBOL(drm_sched_submit_start); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index f9544d9b670d..f12c5aea5294 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -550,6 +550,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); +bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched); +void drm_sched_submit_stop(struct drm_gpu_scheduler *sched); +void drm_sched_submit_start(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
Add scheduler submit ready, stop, and start helpers to hide the implementation details of the scheduler from the drivers. Signed-off-by: Matthew Brost <matthew.brost@intel.com> --- .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 15 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++--- drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++- drivers/gpu/drm/scheduler/sched_main.c | 40 ++++++++++++++++++- include/drm/gpu_scheduler.h | 3 ++ 6 files changed, 60 insertions(+), 18 deletions(-)