diff mbox series

[RFC,v2,4/8] drm/amdgpu: Serialize non TDR gpu recovery with TDRs

Message ID 20211222220506.789133-5-andrey.grodzovsky@amd.com (mailing list archive)
State New, archived
Headers show
Series Define and use reset domain for GPU recovery in amdgpu | expand

Commit Message

Andrey Grodzovsky Dec. 22, 2021, 10:05 p.m. UTC
Use reset domain wq also for non TDR gpu recovery trigers
such as sysfs and RAS. We must serialize all possible
GPU recoveries to gurantee no concurrency there.
For TDR call the original recovery function directly since
it's already executed from within the wq. For others just
use a wrapper to qeueue work and wait on it to finish.

v2: Rename to amdgpu_recover_work_struct

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++++++++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
 3 files changed, 35 insertions(+), 2 deletions(-)

Comments

Christian König Dec. 23, 2021, 8:41 a.m. UTC | #1
Am 22.12.21 um 23:05 schrieb Andrey Grodzovsky:
> Use reset domain wq also for non TDR gpu recovery trigers
> such as sysfs and RAS. We must serialize all possible
> GPU recoveries to gurantee no concurrency there.
> For TDR call the original recovery function directly since
> it's already executed from within the wq. For others just
> use a wrapper to qeueue work and wait on it to finish.
>
> v2: Rename to amdgpu_recover_work_struct
>
> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++++++++++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>   3 files changed, 35 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index b5ff76aae7e0..8e96b9a14452 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>   			      struct amdgpu_job* job);
> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
> +			      struct amdgpu_job *job);
>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 7c063fd37389..258ec3c0b2af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>    * Returns 0 for success or an error on failure.
>    */
>   
> -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>   			      struct amdgpu_job *job)
>   {
>   	struct list_head device_list, *device_list_handle =  NULL;
> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>   	return r;
>   }
>   
> +struct amdgpu_recover_work_struct {
> +	struct work_struct base;
> +	struct amdgpu_device *adev;
> +	struct amdgpu_job *job;
> +	int ret;
> +};
> +
> +static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
> +{
> +	struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
> +
> +	recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
> +}
> +/*
> + * Serialize gpu recover into reset domain single threaded wq
> + */
> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
> +				    struct amdgpu_job *job)
> +{
> +	struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
> +
> +	INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
> +
> +	if (!queue_work(adev->reset_domain.wq, &work.base))
> +		return -EAGAIN;
> +
> +	flush_work(&work.base);
> +
> +	return work.ret;
> +}
> +
>   /**
>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
>    *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index bfc47bea23db..38c9fd7b7ad4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>   		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
>   
>   	if (amdgpu_device_should_recover_gpu(ring->adev)) {
> -		amdgpu_device_gpu_recover(ring->adev, job);
> +		amdgpu_device_gpu_recover_imp(ring->adev, job);
>   	} else {
>   		drm_sched_suspend_timeout(&ring->sched);
>   		if (amdgpu_sriov_vf(adev))
Lazar, Lijo Jan. 5, 2022, 9:54 a.m. UTC | #2
On 12/23/2021 3:35 AM, Andrey Grodzovsky wrote:
> Use reset domain wq also for non TDR gpu recovery trigers
> such as sysfs and RAS. We must serialize all possible
> GPU recoveries to gurantee no concurrency there.
> For TDR call the original recovery function directly since
> it's already executed from within the wq. For others just
> use a wrapper to qeueue work and wait on it to finish.
> 
> v2: Rename to amdgpu_recover_work_struct
> 
> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++++++++++-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>   3 files changed, 35 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index b5ff76aae7e0..8e96b9a14452 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>   			      struct amdgpu_job* job);
> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
> +			      struct amdgpu_job *job);
>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 7c063fd37389..258ec3c0b2af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>    * Returns 0 for success or an error on failure.
>    */
>   
> -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>   			      struct amdgpu_job *job)
>   {
>   	struct list_head device_list, *device_list_handle =  NULL;
> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>   	return r;
>   }
>   
> +struct amdgpu_recover_work_struct {
> +	struct work_struct base;
> +	struct amdgpu_device *adev;
> +	struct amdgpu_job *job;
> +	int ret;
> +};
> +
> +static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
> +{
> +	struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
> +
> +	recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
> +}
> +/*
> + * Serialize gpu recover into reset domain single threaded wq
> + */
> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
> +				    struct amdgpu_job *job)
> +{
> +	struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
> +
> +	INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
> +
> +	if (!queue_work(adev->reset_domain.wq, &work.base))
> +		return -EAGAIN;
> +

The decision to schedule a reset is made at this point. Subsequent 
accesses to hardware may not be reliable. So should the flag in_reset be 
set here itself rather than waiting for the work to start execution?

Also, what about having the reset_active or in_reset flag in the 
reset_domain itself?

Thanks,
Lijo

> +	flush_work(&work.base);
> +
> +	return work.ret;
> +}
> +
>   /**
>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
>    *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index bfc47bea23db..38c9fd7b7ad4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>   		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
>   
>   	if (amdgpu_device_should_recover_gpu(ring->adev)) {
> -		amdgpu_device_gpu_recover(ring->adev, job);
> +		amdgpu_device_gpu_recover_imp(ring->adev, job);
>   	} else {
>   		drm_sched_suspend_timeout(&ring->sched);
>   		if (amdgpu_sriov_vf(adev))
>
Christian König Jan. 5, 2022, 12:31 p.m. UTC | #3
Am 05.01.22 um 10:54 schrieb Lazar, Lijo:
> On 12/23/2021 3:35 AM, Andrey Grodzovsky wrote:
>> Use reset domain wq also for non TDR gpu recovery trigers
>> such as sysfs and RAS. We must serialize all possible
>> GPU recoveries to gurantee no concurrency there.
>> For TDR call the original recovery function directly since
>> it's already executed from within the wq. For others just
>> use a wrapper to qeueue work and wait on it to finish.
>>
>> v2: Rename to amdgpu_recover_work_struct
>>
>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++++++++++-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>>   3 files changed, 35 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index b5ff76aae7e0..8e96b9a14452 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct 
>> amdgpu_device *adev);
>>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>                     struct amdgpu_job* job);
>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>> +                  struct amdgpu_job *job);
>>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 7c063fd37389..258ec3c0b2af 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>>    * Returns 0 for success or an error on failure.
>>    */
>>   -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>                     struct amdgpu_job *job)
>>   {
>>       struct list_head device_list, *device_list_handle =  NULL;
>> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct 
>> amdgpu_device *adev,
>>       return r;
>>   }
>>   +struct amdgpu_recover_work_struct {
>> +    struct work_struct base;
>> +    struct amdgpu_device *adev;
>> +    struct amdgpu_job *job;
>> +    int ret;
>> +};
>> +
>> +static void amdgpu_device_queue_gpu_recover_work(struct work_struct 
>> *work)
>> +{
>> +    struct amdgpu_recover_work_struct *recover_work = 
>> container_of(work, struct amdgpu_recover_work_struct, base);
>> +
>> +    recover_work->ret = 
>> amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
>> +}
>> +/*
>> + * Serialize gpu recover into reset domain single threaded wq
>> + */
>> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>> +                    struct amdgpu_job *job)
>> +{
>> +    struct amdgpu_recover_work_struct work = {.adev = adev, .job = 
>> job};
>> +
>> +    INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
>> +
>> +    if (!queue_work(adev->reset_domain.wq, &work.base))
>> +        return -EAGAIN;
>> +
>
> The decision to schedule a reset is made at this point. Subsequent 
> accesses to hardware may not be reliable. So should the flag in_reset 
> be set here itself rather than waiting for the work to start execution?

No, when we race and lose the VM is completely lost and probably 
restarted by the hypervisor.

And when we race and win we properly set the flag before signaling the 
hypervisor that it can continue with the reset.

> Also, what about having the reset_active or in_reset flag in the 
> reset_domain itself?

Of hand that sounds like a good idea.

Regards,
Christian.

>
> Thanks,
> Lijo
>
>> +    flush_work(&work.base);
>> +
>> +    return work.ret;
>> +}
>> +
>>   /**
>>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
>>    *
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index bfc47bea23db..38c9fd7b7ad4 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat 
>> amdgpu_job_timedout(struct drm_sched_job *s_job)
>>             ti.process_name, ti.tgid, ti.task_name, ti.pid);
>>         if (amdgpu_device_should_recover_gpu(ring->adev)) {
>> -        amdgpu_device_gpu_recover(ring->adev, job);
>> +        amdgpu_device_gpu_recover_imp(ring->adev, job);
>>       } else {
>>           drm_sched_suspend_timeout(&ring->sched);
>>           if (amdgpu_sriov_vf(adev))
>>
Lazar, Lijo Jan. 5, 2022, 1:11 p.m. UTC | #4
On 1/5/2022 6:01 PM, Christian König wrote:
> Am 05.01.22 um 10:54 schrieb Lazar, Lijo:
>> On 12/23/2021 3:35 AM, Andrey Grodzovsky wrote:
>>> Use reset domain wq also for non TDR gpu recovery trigers
>>> such as sysfs and RAS. We must serialize all possible
>>> GPU recoveries to gurantee no concurrency there.
>>> For TDR call the original recovery function directly since
>>> it's already executed from within the wq. For others just
>>> use a wrapper to qeueue work and wait on it to finish.
>>>
>>> v2: Rename to amdgpu_recover_work_struct
>>>
>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++++++++++-
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>>>   3 files changed, 35 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> index b5ff76aae7e0..8e96b9a14452 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct 
>>> amdgpu_device *adev);
>>>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>>>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>                     struct amdgpu_job* job);
>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>> +                  struct amdgpu_job *job);
>>>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>>>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>>>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 7c063fd37389..258ec3c0b2af 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>>>    * Returns 0 for success or an error on failure.
>>>    */
>>>   -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>                     struct amdgpu_job *job)
>>>   {
>>>       struct list_head device_list, *device_list_handle =  NULL;
>>> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct 
>>> amdgpu_device *adev,
>>>       return r;
>>>   }
>>>   +struct amdgpu_recover_work_struct {
>>> +    struct work_struct base;
>>> +    struct amdgpu_device *adev;
>>> +    struct amdgpu_job *job;
>>> +    int ret;
>>> +};
>>> +
>>> +static void amdgpu_device_queue_gpu_recover_work(struct work_struct 
>>> *work)
>>> +{
>>> +    struct amdgpu_recover_work_struct *recover_work = 
>>> container_of(work, struct amdgpu_recover_work_struct, base);
>>> +
>>> +    recover_work->ret = 
>>> amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
>>> +}
>>> +/*
>>> + * Serialize gpu recover into reset domain single threaded wq
>>> + */
>>> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>> +                    struct amdgpu_job *job)
>>> +{
>>> +    struct amdgpu_recover_work_struct work = {.adev = adev, .job = 
>>> job};
>>> +
>>> +    INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
>>> +
>>> +    if (!queue_work(adev->reset_domain.wq, &work.base))
>>> +        return -EAGAIN;
>>> +
>>
>> The decision to schedule a reset is made at this point. Subsequent 
>> accesses to hardware may not be reliable. So should the flag in_reset 
>> be set here itself rather than waiting for the work to start execution?
> 
> No, when we race and lose the VM is completely lost and probably 
> restarted by the hypervisor.
> 
> And when we race and win we properly set the flag before signaling the 
> hypervisor that it can continue with the reset.
> 

I was talking about baremetal case. When this was synchronous, in_reset 
flag is set as one of the first things and amdgpu_in_reset is checked to 
prevent further hardware accesses. This design only changes the recover 
part and doesn't change the hardware perspective. Potential accesses 
from other processes need to be blocked as soon as we determine a reset 
is required. Are we expecting the work to be immediately executed and 
set the flags?

Thanks,
Lijo

>> Also, what about having the reset_active or in_reset flag in the 
>> reset_domain itself?
> 
> Of hand that sounds like a good idea.
> 
> Regards,
> Christian.
> 
>>
>> Thanks,
>> Lijo
>>
>>> +    flush_work(&work.base);
>>> +
>>> +    return work.ret;
>>> +}
>>> +
>>>   /**
>>>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
>>>    *
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> index bfc47bea23db..38c9fd7b7ad4 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat 
>>> amdgpu_job_timedout(struct drm_sched_job *s_job)
>>>             ti.process_name, ti.tgid, ti.task_name, ti.pid);
>>>         if (amdgpu_device_should_recover_gpu(ring->adev)) {
>>> -        amdgpu_device_gpu_recover(ring->adev, job);
>>> +        amdgpu_device_gpu_recover_imp(ring->adev, job);
>>>       } else {
>>>           drm_sched_suspend_timeout(&ring->sched);
>>>           if (amdgpu_sriov_vf(adev))
>>>
>
Christian König Jan. 5, 2022, 1:15 p.m. UTC | #5
Am 05.01.22 um 14:11 schrieb Lazar, Lijo:
> On 1/5/2022 6:01 PM, Christian König wrote:
>> Am 05.01.22 um 10:54 schrieb Lazar, Lijo:
>>> On 12/23/2021 3:35 AM, Andrey Grodzovsky wrote:
>>>> Use reset domain wq also for non TDR gpu recovery trigers
>>>> such as sysfs and RAS. We must serialize all possible
>>>> GPU recoveries to gurantee no concurrency there.
>>>> For TDR call the original recovery function directly since
>>>> it's already executed from within the wq. For others just
>>>> use a wrapper to qeueue work and wait on it to finish.
>>>>
>>>> v2: Rename to amdgpu_recover_work_struct
>>>>
>>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 
>>>> +++++++++++++++++++++-
>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>>>>   3 files changed, 35 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>> index b5ff76aae7e0..8e96b9a14452 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct 
>>>> amdgpu_device *adev);
>>>>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>>>>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>>                     struct amdgpu_job* job);
>>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>> +                  struct amdgpu_job *job);
>>>>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>>>>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>>>>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>> index 7c063fd37389..258ec3c0b2af 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>>>>    * Returns 0 for success or an error on failure.
>>>>    */
>>>>   -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>>                     struct amdgpu_job *job)
>>>>   {
>>>>       struct list_head device_list, *device_list_handle = NULL;
>>>> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct 
>>>> amdgpu_device *adev,
>>>>       return r;
>>>>   }
>>>>   +struct amdgpu_recover_work_struct {
>>>> +    struct work_struct base;
>>>> +    struct amdgpu_device *adev;
>>>> +    struct amdgpu_job *job;
>>>> +    int ret;
>>>> +};
>>>> +
>>>> +static void amdgpu_device_queue_gpu_recover_work(struct 
>>>> work_struct *work)
>>>> +{
>>>> +    struct amdgpu_recover_work_struct *recover_work = 
>>>> container_of(work, struct amdgpu_recover_work_struct, base);
>>>> +
>>>> +    recover_work->ret = 
>>>> amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
>>>> +}
>>>> +/*
>>>> + * Serialize gpu recover into reset domain single threaded wq
>>>> + */
>>>> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>> +                    struct amdgpu_job *job)
>>>> +{
>>>> +    struct amdgpu_recover_work_struct work = {.adev = adev, .job = 
>>>> job};
>>>> +
>>>> +    INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
>>>> +
>>>> +    if (!queue_work(adev->reset_domain.wq, &work.base))
>>>> +        return -EAGAIN;
>>>> +
>>>
>>> The decision to schedule a reset is made at this point. Subsequent 
>>> accesses to hardware may not be reliable. So should the flag 
>>> in_reset be set here itself rather than waiting for the work to 
>>> start execution?
>>
>> No, when we race and lose the VM is completely lost and probably 
>> restarted by the hypervisor.
>>
>> And when we race and win we properly set the flag before signaling 
>> the hypervisor that it can continue with the reset.
>>
>
> I was talking about baremetal case. When this was synchronous, 
> in_reset flag is set as one of the first things and amdgpu_in_reset is 
> checked to prevent further hardware accesses. This design only changes 
> the recover part and doesn't change the hardware perspective. 

> Potential accesses from other processes need to be blocked as soon as 
> we determine a reset is required.

That's an incorrect assumption.

Accessing the hardware is perfectly ok as long as the reset hasn't 
started yet. In other words even when the hardware is locked up you can 
still happily read/write registers or access the VRAM BAR.

Only when the hardware is currently performing a reset, then we can't 
touch it or there might be unfortunate consequences (usually complete 
system lockup).

Regards,
Christian.

> Are we expecting the work to be immediately executed and set the flags?
>
> Thanks,
> Lijo
>
>>> Also, what about having the reset_active or in_reset flag in the 
>>> reset_domain itself?
>>
>> Of hand that sounds like a good idea.
>>
>> Regards,
>> Christian.
>>
>>>
>>> Thanks,
>>> Lijo
>>>
>>>> +    flush_work(&work.base);
>>>> +
>>>> +    return work.ret;
>>>> +}
>>>> +
>>>>   /**
>>>>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
>>>>    *
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> index bfc47bea23db..38c9fd7b7ad4 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat 
>>>> amdgpu_job_timedout(struct drm_sched_job *s_job)
>>>>             ti.process_name, ti.tgid, ti.task_name, ti.pid);
>>>>         if (amdgpu_device_should_recover_gpu(ring->adev)) {
>>>> -        amdgpu_device_gpu_recover(ring->adev, job);
>>>> +        amdgpu_device_gpu_recover_imp(ring->adev, job);
>>>>       } else {
>>>>           drm_sched_suspend_timeout(&ring->sched);
>>>>           if (amdgpu_sriov_vf(adev))
>>>>
>>
Lazar, Lijo Jan. 5, 2022, 1:26 p.m. UTC | #6
On 1/5/2022 6:45 PM, Christian König wrote:
> Am 05.01.22 um 14:11 schrieb Lazar, Lijo:
>> On 1/5/2022 6:01 PM, Christian König wrote:
>>> Am 05.01.22 um 10:54 schrieb Lazar, Lijo:
>>>> On 12/23/2021 3:35 AM, Andrey Grodzovsky wrote:
>>>>> Use reset domain wq also for non TDR gpu recovery trigers
>>>>> such as sysfs and RAS. We must serialize all possible
>>>>> GPU recoveries to gurantee no concurrency there.
>>>>> For TDR call the original recovery function directly since
>>>>> it's already executed from within the wq. For others just
>>>>> use a wrapper to qeueue work and wait on it to finish.
>>>>>
>>>>> v2: Rename to amdgpu_recover_work_struct
>>>>>
>>>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>>>> ---
>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 
>>>>> +++++++++++++++++++++-
>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>>>>>   3 files changed, 35 insertions(+), 2 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>> index b5ff76aae7e0..8e96b9a14452 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct 
>>>>> amdgpu_device *adev);
>>>>>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>>>>>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>>>                     struct amdgpu_job* job);
>>>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>>> +                  struct amdgpu_job *job);
>>>>>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>>>>>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>>>>>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>>> index 7c063fd37389..258ec3c0b2af 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>>> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>>>>>    * Returns 0 for success or an error on failure.
>>>>>    */
>>>>>   -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>>>                     struct amdgpu_job *job)
>>>>>   {
>>>>>       struct list_head device_list, *device_list_handle = NULL;
>>>>> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct 
>>>>> amdgpu_device *adev,
>>>>>       return r;
>>>>>   }
>>>>>   +struct amdgpu_recover_work_struct {
>>>>> +    struct work_struct base;
>>>>> +    struct amdgpu_device *adev;
>>>>> +    struct amdgpu_job *job;
>>>>> +    int ret;
>>>>> +};
>>>>> +
>>>>> +static void amdgpu_device_queue_gpu_recover_work(struct 
>>>>> work_struct *work)
>>>>> +{
>>>>> +    struct amdgpu_recover_work_struct *recover_work = 
>>>>> container_of(work, struct amdgpu_recover_work_struct, base);
>>>>> +
>>>>> +    recover_work->ret = 
>>>>> amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
>>>>> +}
>>>>> +/*
>>>>> + * Serialize gpu recover into reset domain single threaded wq
>>>>> + */
>>>>> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>>> +                    struct amdgpu_job *job)
>>>>> +{
>>>>> +    struct amdgpu_recover_work_struct work = {.adev = adev, .job = 
>>>>> job};
>>>>> +
>>>>> +    INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
>>>>> +
>>>>> +    if (!queue_work(adev->reset_domain.wq, &work.base))
>>>>> +        return -EAGAIN;
>>>>> +
>>>>
>>>> The decision to schedule a reset is made at this point. Subsequent 
>>>> accesses to hardware may not be reliable. So should the flag 
>>>> in_reset be set here itself rather than waiting for the work to 
>>>> start execution?
>>>
>>> No, when we race and lose the VM is completely lost and probably 
>>> restarted by the hypervisor.
>>>
>>> And when we race and win we properly set the flag before signaling 
>>> the hypervisor that it can continue with the reset.
>>>
>>
>> I was talking about baremetal case. When this was synchronous, 
>> in_reset flag is set as one of the first things and amdgpu_in_reset is 
>> checked to prevent further hardware accesses. This design only changes 
>> the recover part and doesn't change the hardware perspective. 
> 
>> Potential accesses from other processes need to be blocked as soon as 
>> we determine a reset is required.
> 
> That's an incorrect assumption.
> 
> Accessing the hardware is perfectly ok as long as the reset hasn't 
> started yet. In other words even when the hardware is locked up you can 
> still happily read/write registers or access the VRAM BAR.
> 

Not sure if that is 100% correct like a recovery triggered by RAS error 
(depends on the access done).

Thanks,
Lijo

> Only when the hardware is currently performing a reset, then we can't 
> touch it or there might be unfortunate consequences (usually complete 
> system lockup).
> 
> Regards,
> Christian.
> 
>> Are we expecting the work to be immediately executed and set the flags?
>>
>> Thanks,
>> Lijo
>>
>>>> Also, what about having the reset_active or in_reset flag in the 
>>>> reset_domain itself?
>>>
>>> Of hand that sounds like a good idea.
>>>
>>> Regards,
>>> Christian.
>>>
>>>>
>>>> Thanks,
>>>> Lijo
>>>>
>>>>> +    flush_work(&work.base);
>>>>> +
>>>>> +    return work.ret;
>>>>> +}
>>>>> +
>>>>>   /**
>>>>>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
>>>>>    *
>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>>> index bfc47bea23db..38c9fd7b7ad4 100644
>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>>> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat 
>>>>> amdgpu_job_timedout(struct drm_sched_job *s_job)
>>>>>             ti.process_name, ti.tgid, ti.task_name, ti.pid);
>>>>>         if (amdgpu_device_should_recover_gpu(ring->adev)) {
>>>>> -        amdgpu_device_gpu_recover(ring->adev, job);
>>>>> +        amdgpu_device_gpu_recover_imp(ring->adev, job);
>>>>>       } else {
>>>>>           drm_sched_suspend_timeout(&ring->sched);
>>>>>           if (amdgpu_sriov_vf(adev))
>>>>>
>>>
>
Christian König Jan. 5, 2022, 1:41 p.m. UTC | #7
Am 05.01.22 um 14:26 schrieb Lazar, Lijo:
> On 1/5/2022 6:45 PM, Christian König wrote:
>> Am 05.01.22 um 14:11 schrieb Lazar, Lijo:
>>> On 1/5/2022 6:01 PM, Christian König wrote:
>>>> Am 05.01.22 um 10:54 schrieb Lazar, Lijo:
>>>>> On 12/23/2021 3:35 AM, Andrey Grodzovsky wrote:
>>>>>> Use reset domain wq also for non TDR gpu recovery trigers
>>>>>> such as sysfs and RAS. We must serialize all possible
>>>>>> GPU recoveries to gurantee no concurrency there.
>>>>>> For TDR call the original recovery function directly since
>>>>>> it's already executed from within the wq. For others just
>>>>>> use a wrapper to qeueue work and wait on it to finish.
>>>>>>
>>>>>> v2: Rename to amdgpu_recover_work_struct
>>>>>>
>>>>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>>>>> ---
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 
>>>>>> +++++++++++++++++++++-
>>>>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>>>>>>   3 files changed, 35 insertions(+), 2 deletions(-)
>>>>>>
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>> index b5ff76aae7e0..8e96b9a14452 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>>>>> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct 
>>>>>> amdgpu_device *adev);
>>>>>>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>>>>>>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>>>>                     struct amdgpu_job* job);
>>>>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>>>> +                  struct amdgpu_job *job);
>>>>>>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>>>>>>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>>>>>>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>>>> index 7c063fd37389..258ec3c0b2af 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>>>>> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>>>>>>    * Returns 0 for success or an error on failure.
>>>>>>    */
>>>>>>   -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>>>>                     struct amdgpu_job *job)
>>>>>>   {
>>>>>>       struct list_head device_list, *device_list_handle = NULL;
>>>>>> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct 
>>>>>> amdgpu_device *adev,
>>>>>>       return r;
>>>>>>   }
>>>>>>   +struct amdgpu_recover_work_struct {
>>>>>> +    struct work_struct base;
>>>>>> +    struct amdgpu_device *adev;
>>>>>> +    struct amdgpu_job *job;
>>>>>> +    int ret;
>>>>>> +};
>>>>>> +
>>>>>> +static void amdgpu_device_queue_gpu_recover_work(struct 
>>>>>> work_struct *work)
>>>>>> +{
>>>>>> +    struct amdgpu_recover_work_struct *recover_work = 
>>>>>> container_of(work, struct amdgpu_recover_work_struct, base);
>>>>>> +
>>>>>> +    recover_work->ret = 
>>>>>> amdgpu_device_gpu_recover_imp(recover_work->adev, 
>>>>>> recover_work->job);
>>>>>> +}
>>>>>> +/*
>>>>>> + * Serialize gpu recover into reset domain single threaded wq
>>>>>> + */
>>>>>> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>>>> +                    struct amdgpu_job *job)
>>>>>> +{
>>>>>> +    struct amdgpu_recover_work_struct work = {.adev = adev, .job 
>>>>>> = job};
>>>>>> +
>>>>>> +    INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
>>>>>> +
>>>>>> +    if (!queue_work(adev->reset_domain.wq, &work.base))
>>>>>> +        return -EAGAIN;
>>>>>> +
>>>>>
>>>>> The decision to schedule a reset is made at this point. Subsequent 
>>>>> accesses to hardware may not be reliable. So should the flag 
>>>>> in_reset be set here itself rather than waiting for the work to 
>>>>> start execution?
>>>>
>>>> No, when we race and lose the VM is completely lost and probably 
>>>> restarted by the hypervisor.
>>>>
>>>> And when we race and win we properly set the flag before signaling 
>>>> the hypervisor that it can continue with the reset.
>>>>
>>>
>>> I was talking about baremetal case. When this was synchronous, 
>>> in_reset flag is set as one of the first things and amdgpu_in_reset 
>>> is checked to prevent further hardware accesses. This design only 
>>> changes the recover part and doesn't change the hardware perspective. 
>>
>>> Potential accesses from other processes need to be blocked as soon 
>>> as we determine a reset is required.
>>
>> That's an incorrect assumption.
>>
>> Accessing the hardware is perfectly ok as long as the reset hasn't 
>> started yet. In other words even when the hardware is locked up you 
>> can still happily read/write registers or access the VRAM BAR.
>>
>
> Not sure if that is 100% correct like a recovery triggered by RAS 
> error (depends on the access done).

Yeah, for RAS there should just be one error triggered as far as I know. 
Otherwise we have a problem because there can be any number of hardware 
accesses between RAS interrupt and setting the in_reset flag anyway.

There are some cases where we shouldn't access the hardware any more. 
E.g. we had cases of static discharge with external mining cases for 
example.

But in those case the hardware is so severely gone that the user should 
either replace it completely or at least power cycle the system.

Regards,
Christian.

>
> Thanks,
> Lijo
>
>> Only when the hardware is currently performing a reset, then we can't 
>> touch it or there might be unfortunate consequences (usually complete 
>> system lockup).
>>
>> Regards,
>> Christian.
>>
>>> Are we expecting the work to be immediately executed and set the flags?
>>>
>>> Thanks,
>>> Lijo
>>>
>>>>> Also, what about having the reset_active or in_reset flag in the 
>>>>> reset_domain itself?
>>>>
>>>> Of hand that sounds like a good idea.
>>>>
>>>> Regards,
>>>> Christian.
>>>>
>>>>>
>>>>> Thanks,
>>>>> Lijo
>>>>>
>>>>>> +    flush_work(&work.base);
>>>>>> +
>>>>>> +    return work.ret;
>>>>>> +}
>>>>>> +
>>>>>>   /**
>>>>>>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE 
>>>>>> slot
>>>>>>    *
>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>>>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>>>> index bfc47bea23db..38c9fd7b7ad4 100644
>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>>>> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat 
>>>>>> amdgpu_job_timedout(struct drm_sched_job *s_job)
>>>>>>             ti.process_name, ti.tgid, ti.task_name, ti.pid);
>>>>>>         if (amdgpu_device_should_recover_gpu(ring->adev)) {
>>>>>> -        amdgpu_device_gpu_recover(ring->adev, job);
>>>>>> +        amdgpu_device_gpu_recover_imp(ring->adev, job);
>>>>>>       } else {
>>>>>> drm_sched_suspend_timeout(&ring->sched);
>>>>>>           if (amdgpu_sriov_vf(adev))
>>>>>>
>>>>
>>
Andrey Grodzovsky Jan. 5, 2022, 6:11 p.m. UTC | #8
On 2022-01-05 7:31 a.m., Christian König wrote:

> Am 05.01.22 um 10:54 schrieb Lazar, Lijo:
>> On 12/23/2021 3:35 AM, Andrey Grodzovsky wrote:
>>> Use reset domain wq also for non TDR gpu recovery trigers
>>> such as sysfs and RAS. We must serialize all possible
>>> GPU recoveries to gurantee no concurrency there.
>>> For TDR call the original recovery function directly since
>>> it's already executed from within the wq. For others just
>>> use a wrapper to qeueue work and wait on it to finish.
>>>
>>> v2: Rename to amdgpu_recover_work_struct
>>>
>>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 
>>> +++++++++++++++++++++-
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  2 +-
>>>   3 files changed, 35 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> index b5ff76aae7e0..8e96b9a14452 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>>> @@ -1296,6 +1296,8 @@ bool amdgpu_device_has_job_running(struct 
>>> amdgpu_device *adev);
>>>   bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
>>>   int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>>                     struct amdgpu_job* job);
>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>> +                  struct amdgpu_job *job);
>>>   void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
>>>   int amdgpu_device_pci_reset(struct amdgpu_device *adev);
>>>   bool amdgpu_device_need_post(struct amdgpu_device *adev);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 7c063fd37389..258ec3c0b2af 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -4979,7 +4979,7 @@ static void amdgpu_device_recheck_guilty_jobs(
>>>    * Returns 0 for success or an error on failure.
>>>    */
>>>   -int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>> +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
>>>                     struct amdgpu_job *job)
>>>   {
>>>       struct list_head device_list, *device_list_handle = NULL;
>>> @@ -5237,6 +5237,37 @@ int amdgpu_device_gpu_recover(struct 
>>> amdgpu_device *adev,
>>>       return r;
>>>   }
>>>   +struct amdgpu_recover_work_struct {
>>> +    struct work_struct base;
>>> +    struct amdgpu_device *adev;
>>> +    struct amdgpu_job *job;
>>> +    int ret;
>>> +};
>>> +
>>> +static void amdgpu_device_queue_gpu_recover_work(struct work_struct 
>>> *work)
>>> +{
>>> +    struct amdgpu_recover_work_struct *recover_work = 
>>> container_of(work, struct amdgpu_recover_work_struct, base);
>>> +
>>> +    recover_work->ret = 
>>> amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
>>> +}
>>> +/*
>>> + * Serialize gpu recover into reset domain single threaded wq
>>> + */
>>> +int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>>> +                    struct amdgpu_job *job)
>>> +{
>>> +    struct amdgpu_recover_work_struct work = {.adev = adev, .job = 
>>> job};
>>> +
>>> +    INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
>>> +
>>> +    if (!queue_work(adev->reset_domain.wq, &work.base))
>>> +        return -EAGAIN;
>>> +
>>
>> The decision to schedule a reset is made at this point. Subsequent 
>> accesses to hardware may not be reliable. So should the flag in_reset 
>> be set here itself rather than waiting for the work to start execution?
>
> No, when we race and lose the VM is completely lost and probably 
> restarted by the hypervisor.
>
> And when we race and win we properly set the flag before signaling the 
> hypervisor that it can continue with the reset.
>
>> Also, what about having the reset_active or in_reset flag in the 
>> reset_domain itself?
>
> Of hand that sounds like a good idea.


What then about the adev->reset_sem semaphore ? Should we also move this 
to reset_domain ?  Both of the moves have functional
implications only for XGMI case because there will be contention over 
accessing those single instance variables from multiple devices
while now each device has it's own copy.

What benefit the centralization into reset_domain gives - is it for 
example to prevent one device in a hive trying to access through MMIO 
another one's
VRAM (shared FB memory) while the other one goes through reset ?

Andrey


>
> Regards,
> Christian.
>
>>
>> Thanks,
>> Lijo
>>
>>> +    flush_work(&work.base);
>>> +
>>> +    return work.ret;
>>> +}
>>> +
>>>   /**
>>>    * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
>>>    *
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> index bfc47bea23db..38c9fd7b7ad4 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> @@ -63,7 +63,7 @@ static enum drm_gpu_sched_stat 
>>> amdgpu_job_timedout(struct drm_sched_job *s_job)
>>>             ti.process_name, ti.tgid, ti.task_name, ti.pid);
>>>         if (amdgpu_device_should_recover_gpu(ring->adev)) {
>>> -        amdgpu_device_gpu_recover(ring->adev, job);
>>> +        amdgpu_device_gpu_recover_imp(ring->adev, job);
>>>       } else {
>>>           drm_sched_suspend_timeout(&ring->sched);
>>>           if (amdgpu_sriov_vf(adev))
>>>
>
Andrey Grodzovsky Jan. 17, 2022, 7:14 p.m. UTC | #9
Ping on the question

Andrey

On 2022-01-05 1:11 p.m., Andrey Grodzovsky wrote:
>>> Also, what about having the reset_active or in_reset flag in the 
>>> reset_domain itself?
>>
>> Of hand that sounds like a good idea.
>
>
> What then about the adev->reset_sem semaphore ? Should we also move 
> this to reset_domain ?  Both of the moves have functional
> implications only for XGMI case because there will be contention over 
> accessing those single instance variables from multiple devices
> while now each device has it's own copy.
>
> What benefit the centralization into reset_domain gives - is it for 
> example to prevent one device in a hive trying to access through MMIO 
> another one's
> VRAM (shared FB memory) while the other one goes through reset ?
>
> Andrey
Christian König Jan. 17, 2022, 7:17 p.m. UTC | #10
Am 17.01.22 um 20:14 schrieb Andrey Grodzovsky:
>
> Ping on the question
>

Oh, my! That was already more than a week ago and is completely swapped 
out of my head again.

> Andrey
>
> On 2022-01-05 1:11 p.m., Andrey Grodzovsky wrote:
>>>> Also, what about having the reset_active or in_reset flag in the 
>>>> reset_domain itself?
>>>
>>> Of hand that sounds like a good idea.
>>
>>
>> What then about the adev->reset_sem semaphore ? Should we also move 
>> this to reset_domain ?  Both of the moves have functional
>> implications only for XGMI case because there will be contention over 
>> accessing those single instance variables from multiple devices
>> while now each device has it's own copy.

Since this is a rw semaphore that should be unproblematic I think. It 
could just be that the cache line of the lock then plays ping/pong 
between the CPU cores.

>>
>> What benefit the centralization into reset_domain gives - is it for 
>> example to prevent one device in a hive trying to access through MMIO 
>> another one's
>> VRAM (shared FB memory) while the other one goes through reset ?

I think that this is the killer argument for a centralized lock, yes.

Christian.

>>
>> Andrey
Andrey Grodzovsky Jan. 17, 2022, 7:21 p.m. UTC | #11
On 2022-01-17 2:17 p.m., Christian König wrote:
> Am 17.01.22 um 20:14 schrieb Andrey Grodzovsky:
>>
>> Ping on the question
>>
>
> Oh, my! That was already more than a week ago and is completely 
> swapped out of my head again.
>
>> Andrey
>>
>> On 2022-01-05 1:11 p.m., Andrey Grodzovsky wrote:
>>>>> Also, what about having the reset_active or in_reset flag in the 
>>>>> reset_domain itself?
>>>>
>>>> Of hand that sounds like a good idea.
>>>
>>>
>>> What then about the adev->reset_sem semaphore ? Should we also move 
>>> this to reset_domain ?  Both of the moves have functional
>>> implications only for XGMI case because there will be contention 
>>> over accessing those single instance variables from multiple devices
>>> while now each device has it's own copy.
>
> Since this is a rw semaphore that should be unproblematic I think. It 
> could just be that the cache line of the lock then plays ping/pong 
> between the CPU cores.
>
>>>
>>> What benefit the centralization into reset_domain gives - is it for 
>>> example to prevent one device in a hive trying to access through 
>>> MMIO another one's
>>> VRAM (shared FB memory) while the other one goes through reset ?
>
> I think that this is the killer argument for a centralized lock, yes.


np, i will add a patch with centralizing both flag into reset domain and 
resend.

Andrey


>
> Christian.
>
>>>
>>> Andrey 
>
Andrey Grodzovsky Jan. 26, 2022, 3:52 p.m. UTC | #12
JingWen - could you maybe give those patches a try on SRIOV XGMI system 
? If you see issues maybe you could let me connect and debug. My SRIOV 
XGMI system which Shayun kindly arranged for me is not loading the 
driver with my drm-misc-next branch even without my patches.

Andrey

On 2022-01-17 14:21, Andrey Grodzovsky wrote:
>
>
> On 2022-01-17 2:17 p.m., Christian König wrote:
>> Am 17.01.22 um 20:14 schrieb Andrey Grodzovsky:
>>>
>>> Ping on the question
>>>
>>
>> Oh, my! That was already more than a week ago and is completely 
>> swapped out of my head again.
>>
>>> Andrey
>>>
>>> On 2022-01-05 1:11 p.m., Andrey Grodzovsky wrote:
>>>>>> Also, what about having the reset_active or in_reset flag in the 
>>>>>> reset_domain itself?
>>>>>
>>>>> Of hand that sounds like a good idea.
>>>>
>>>>
>>>> What then about the adev->reset_sem semaphore ? Should we also move 
>>>> this to reset_domain ?  Both of the moves have functional
>>>> implications only for XGMI case because there will be contention 
>>>> over accessing those single instance variables from multiple devices
>>>> while now each device has it's own copy.
>>
>> Since this is a rw semaphore that should be unproblematic I think. It 
>> could just be that the cache line of the lock then plays ping/pong 
>> between the CPU cores.
>>
>>>>
>>>> What benefit the centralization into reset_domain gives - is it for 
>>>> example to prevent one device in a hive trying to access through 
>>>> MMIO another one's
>>>> VRAM (shared FB memory) while the other one goes through reset ?
>>
>> I think that this is the killer argument for a centralized lock, yes.
>
>
> np, i will add a patch with centralizing both flag into reset domain 
> and resend.
>
> Andrey
>
>
>>
>> Christian.
>>
>>>>
>>>> Andrey 
>>
Andrey Grodzovsky Jan. 28, 2022, 4:57 p.m. UTC | #13
Just a gentle ping.

Andrey
JingWen Chen Feb. 7, 2022, 2:41 a.m. UTC | #14
Hi Andrey,

I don't have any XGMI machines here, maybe you can reach out shaoyun for help.

On 2022/1/29 上午12:57, Grodzovsky, Andrey wrote:
> Just a gentle ping.
>
> Andrey
> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
> *From:* Grodzovsky, Andrey
> *Sent:* 26 January 2022 10:52
> *To:* Christian König <ckoenig.leichtzumerken@gmail.com>; Koenig, Christian <Christian.Koenig@amd.com>; Lazar, Lijo <Lijo.Lazar@amd.com>; dri-devel@lists.freedesktop.org <dri-devel@lists.freedesktop.org>; amd-gfx@lists.freedesktop.org <amd-gfx@lists.freedesktop.org>; Chen, JingWen <JingWen.Chen2@amd.com>
> *Cc:* Chen, Horace <Horace.Chen@amd.com>; Liu, Monk <Monk.Liu@amd.com>
> *Subject:* Re: [RFC v2 4/8] drm/amdgpu: Serialize non TDR gpu recovery with TDRs
>  
>
> JingWen - could you maybe give those patches a try on SRIOV XGMI system ? If you see issues maybe you could let me connect and debug. My SRIOV XGMI system which Shayun kindly arranged for me is not loading the driver with my drm-misc-next branch even without my patches.
>
> Andrey
>
> On 2022-01-17 14:21, Andrey Grodzovsky wrote:
>>
>>
>> On 2022-01-17 2:17 p.m., Christian König wrote:
>>> Am 17.01.22 um 20:14 schrieb Andrey Grodzovsky:
>>>>
>>>> Ping on the question
>>>>
>>>
>>> Oh, my! That was already more than a week ago and is completely swapped out of my head again.
>>>
>>>> Andrey
>>>>
>>>> On 2022-01-05 1:11 p.m., Andrey Grodzovsky wrote:
>>>>>>> Also, what about having the reset_active or in_reset flag in the reset_domain itself?
>>>>>>
>>>>>> Of hand that sounds like a good idea.
>>>>>
>>>>>
>>>>> What then about the adev->reset_sem semaphore ? Should we also move this to reset_domain ?  Both of the moves have functional
>>>>> implications only for XGMI case because there will be contention over accessing those single instance variables from multiple devices
>>>>> while now each device has it's own copy.
>>>
>>> Since this is a rw semaphore that should be unproblematic I think. It could just be that the cache line of the lock then plays ping/pong between the CPU cores.
>>>
>>>>>
>>>>> What benefit the centralization into reset_domain gives - is it for example to prevent one device in a hive trying to access through MMIO another one's
>>>>> VRAM (shared FB memory) while the other one goes through reset ?
>>>
>>> I think that this is the killer argument for a centralized lock, yes.
>>
>>
>> np, i will add a patch with centralizing both flag into reset domain and resend.
>>
>> Andrey
>>
>>
>>>
>>> Christian.
>>>
>>>>>
>>>>> Andrey 
>>>
Andrey Grodzovsky Feb. 7, 2022, 3:08 a.m. UTC | #15
I already did, thanks to Shayun I already tested on XGMI SRIOV and it looks ok. What I need now is code review, mostly on the new patches (8-12). I hope you, Monk, Shayun, Lijo and Christian can help with that.

Andrey
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b5ff76aae7e0..8e96b9a14452 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1296,6 +1296,8 @@  bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 			      struct amdgpu_job* job);
+int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
+			      struct amdgpu_job *job);
 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
 bool amdgpu_device_need_post(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7c063fd37389..258ec3c0b2af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4979,7 +4979,7 @@  static void amdgpu_device_recheck_guilty_jobs(
  * Returns 0 for success or an error on failure.
  */
 
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
 			      struct amdgpu_job *job)
 {
 	struct list_head device_list, *device_list_handle =  NULL;
@@ -5237,6 +5237,37 @@  int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 	return r;
 }
 
+struct amdgpu_recover_work_struct {
+	struct work_struct base;
+	struct amdgpu_device *adev;
+	struct amdgpu_job *job;
+	int ret;
+};
+
+static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
+{
+	struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
+
+	recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
+}
+/*
+ * Serialize gpu recover into reset domain single threaded wq
+ */
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+				    struct amdgpu_job *job)
+{
+	struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
+
+	INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
+
+	if (!queue_work(adev->reset_domain.wq, &work.base))
+		return -EAGAIN;
+
+	flush_work(&work.base);
+
+	return work.ret;
+}
+
 /**
  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index bfc47bea23db..38c9fd7b7ad4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -63,7 +63,7 @@  static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
 
 	if (amdgpu_device_should_recover_gpu(ring->adev)) {
-		amdgpu_device_gpu_recover(ring->adev, job);
+		amdgpu_device_gpu_recover_imp(ring->adev, job);
 	} else {
 		drm_sched_suspend_timeout(&ring->sched);
 		if (amdgpu_sriov_vf(adev))