diff mbox

[RFC,05/24] Revert "drm: Nerf the preclose callback for modern drivers"

Message ID 20180518092815.25280-6-yuq825@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Qiang Yu May 18, 2018, 9:27 a.m. UTC
This reverts commit 45c3d213a400c952ab7119f394c5293bb6877e6b.

lima driver need preclose to wait all task in the context
created within closing file to finish before free all the
buffer object. Otherwise pending tesk may fail and get
noisy MMU fault message.

Move this wait to each buffer object free function can
achieve the same result but some buffer object is shared
with other file context, but we only want to wait the
closing file context's tasks. So the implementation is
not that straight forword compared to the preclose one.

Signed-off-by: Qiang Yu <yuq825@gmail.com>
---
 drivers/gpu/drm/drm_file.c |  8 ++++----
 include/drm/drm_drv.h      | 23 +++++++++++++++++++++--
 2 files changed, 25 insertions(+), 6 deletions(-)

Comments

Christian König May 23, 2018, 9:35 a.m. UTC | #1
Well NAK, that brings back a callback we worked quite hard on getting 
rid of.

It looks like the problem isn't that you need the preclose callback, but 
you rather seem to misunderstood how TTM works.

All you need to do is to cleanup your command submission path so that 
the caller of lima_sched_context_queue_task() adds the resulting 
scheduler fence to TTMs buffer objects.

Regards,
Christian.

Am 18.05.2018 um 11:27 schrieb Qiang Yu:
> This reverts commit 45c3d213a400c952ab7119f394c5293bb6877e6b.
>
> lima driver need preclose to wait all task in the context
> created within closing file to finish before free all the
> buffer object. Otherwise pending tesk may fail and get
> noisy MMU fault message.
>
> Move this wait to each buffer object free function can
> achieve the same result but some buffer object is shared
> with other file context, but we only want to wait the
> closing file context's tasks. So the implementation is
> not that straight forword compared to the preclose one.
>
> Signed-off-by: Qiang Yu <yuq825@gmail.com>
> ---
>   drivers/gpu/drm/drm_file.c |  8 ++++----
>   include/drm/drm_drv.h      | 23 +++++++++++++++++++++--
>   2 files changed, 25 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
> index e394799979a6..0a43107396b9 100644
> --- a/drivers/gpu/drm/drm_file.c
> +++ b/drivers/gpu/drm/drm_file.c
> @@ -361,8 +361,9 @@ void drm_lastclose(struct drm_device * dev)
>    *
>    * This function must be used by drivers as their &file_operations.release
>    * method. It frees any resources associated with the open file, and calls the
> - * &drm_driver.postclose driver callback. If this is the last open file for the
> - * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
> + * &drm_driver.preclose and &drm_driver.lastclose driver callbacks. If this is
> + * the last open file for the DRM device also proceeds to call the
> + * &drm_driver.lastclose driver callback.
>    *
>    * RETURNS:
>    *
> @@ -382,8 +383,7 @@ int drm_release(struct inode *inode, struct file *filp)
>   	list_del(&file_priv->lhead);
>   	mutex_unlock(&dev->filelist_mutex);
>   
> -	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
> -	    dev->driver->preclose)
> +	if (dev->driver->preclose)
>   		dev->driver->preclose(dev, file_priv);
>   
>   	/* ========================================================
> diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
> index d23dcdd1bd95..8d6080f97ed4 100644
> --- a/include/drm/drm_drv.h
> +++ b/include/drm/drm_drv.h
> @@ -107,6 +107,23 @@ struct drm_driver {
>   	 */
>   	int (*open) (struct drm_device *, struct drm_file *);
>   
> +	/**
> +	 * @preclose:
> +	 *
> +	 * One of the driver callbacks when a new &struct drm_file is closed.
> +	 * Useful for tearing down driver-private data structures allocated in
> +	 * @open like buffer allocators, execution contexts or similar things.
> +	 *
> +	 * Since the display/modeset side of DRM can only be owned by exactly
> +	 * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
> +	 * there should never be a need to tear down any modeset related
> +	 * resources in this callback. Doing so would be a driver design bug.
> +	 *
> +	 * FIXME: It is not really clear why there's both @preclose and
> +	 * @postclose. Without a really good reason, use @postclose only.
> +	 */
> +	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
> +
>   	/**
>   	 * @postclose:
>   	 *
> @@ -118,6 +135,9 @@ struct drm_driver {
>   	 * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
>   	 * there should never be a need to tear down any modeset related
>   	 * resources in this callback. Doing so would be a driver design bug.
> +	 *
> +	 * FIXME: It is not really clear why there's both @preclose and
> +	 * @postclose. Without a really good reason, use @postclose only.
>   	 */
>   	void (*postclose) (struct drm_device *, struct drm_file *);
>   
> @@ -134,7 +154,7 @@ struct drm_driver {
>   	 * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
>   	 * infrastructure.
>   	 *
> -	 * This is called after @postclose hook has been called.
> +	 * This is called after @preclose and @postclose have been called.
>   	 *
>   	 * NOTE:
>   	 *
> @@ -601,7 +621,6 @@ struct drm_driver {
>   	/* List of devices hanging off this driver with stealth attach. */
>   	struct list_head legacy_dev_list;
>   	int (*firstopen) (struct drm_device *);
> -	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
>   	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
>   	int (*dma_quiescent) (struct drm_device *);
>   	int (*context_dtor) (struct drm_device *dev, int context);
Qiang Yu May 23, 2018, 1:13 p.m. UTC | #2
On Wed, May 23, 2018 at 5:35 PM, Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
> Well NAK, that brings back a callback we worked quite hard on getting rid
> of.
>
> It looks like the problem isn't that you need the preclose callback, but you
> rather seem to misunderstood how TTM works.
>
> All you need to do is to cleanup your command submission path so that the
> caller of lima_sched_context_queue_task() adds the resulting scheduler fence
> to TTMs buffer objects.

You mean adding the finished dma fence to the buffer's reservation object then
waiting it before unmap the buffer from GPU VM in the drm_release()'s buffer
close callback?

Adding fence is done already, and I did wait it before unmap. But then
I see when
the buffer is shared between processes, the "perfect wait" is just
wait the fence
from this process's task, so it's better to also distinguish fences.
If so, I just think
why we don't just wait tasks from this process in the preclose before unmap/free
buffer in the drm_release()?

Regards,
Qiang

>
>
> Am 18.05.2018 um 11:27 schrieb Qiang Yu:
>>
>> This reverts commit 45c3d213a400c952ab7119f394c5293bb6877e6b.
>>
>> lima driver need preclose to wait all task in the context
>> created within closing file to finish before free all the
>> buffer object. Otherwise pending tesk may fail and get
>> noisy MMU fault message.
>>
>> Move this wait to each buffer object free function can
>> achieve the same result but some buffer object is shared
>> with other file context, but we only want to wait the
>> closing file context's tasks. So the implementation is
>> not that straight forword compared to the preclose one.
>>
>> Signed-off-by: Qiang Yu <yuq825@gmail.com>
>> ---
>>   drivers/gpu/drm/drm_file.c |  8 ++++----
>>   include/drm/drm_drv.h      | 23 +++++++++++++++++++++--
>>   2 files changed, 25 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
>> index e394799979a6..0a43107396b9 100644
>> --- a/drivers/gpu/drm/drm_file.c
>> +++ b/drivers/gpu/drm/drm_file.c
>> @@ -361,8 +361,9 @@ void drm_lastclose(struct drm_device * dev)
>>    *
>>    * This function must be used by drivers as their
>> &file_operations.release
>>    * method. It frees any resources associated with the open file, and
>> calls the
>> - * &drm_driver.postclose driver callback. If this is the last open file
>> for the
>> - * DRM device also proceeds to call the &drm_driver.lastclose driver
>> callback.
>> + * &drm_driver.preclose and &drm_driver.lastclose driver callbacks. If
>> this is
>> + * the last open file for the DRM device also proceeds to call the
>> + * &drm_driver.lastclose driver callback.
>>    *
>>    * RETURNS:
>>    *
>> @@ -382,8 +383,7 @@ int drm_release(struct inode *inode, struct file
>> *filp)
>>         list_del(&file_priv->lhead);
>>         mutex_unlock(&dev->filelist_mutex);
>>   -     if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
>> -           dev->driver->preclose)
>> +       if (dev->driver->preclose)
>>                 dev->driver->preclose(dev, file_priv);
>>         /* ========================================================
>> diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
>> index d23dcdd1bd95..8d6080f97ed4 100644
>> --- a/include/drm/drm_drv.h
>> +++ b/include/drm/drm_drv.h
>> @@ -107,6 +107,23 @@ struct drm_driver {
>>          */
>>         int (*open) (struct drm_device *, struct drm_file *);
>>   +     /**
>> +        * @preclose:
>> +        *
>> +        * One of the driver callbacks when a new &struct drm_file is
>> closed.
>> +        * Useful for tearing down driver-private data structures
>> allocated in
>> +        * @open like buffer allocators, execution contexts or similar
>> things.
>> +        *
>> +        * Since the display/modeset side of DRM can only be owned by
>> exactly
>> +        * one &struct drm_file (see &drm_file.is_master and
>> &drm_device.master)
>> +        * there should never be a need to tear down any modeset related
>> +        * resources in this callback. Doing so would be a driver design
>> bug.
>> +        *
>> +        * FIXME: It is not really clear why there's both @preclose and
>> +        * @postclose. Without a really good reason, use @postclose only.
>> +        */
>> +       void (*preclose) (struct drm_device *, struct drm_file
>> *file_priv);
>> +
>>         /**
>>          * @postclose:
>>          *
>> @@ -118,6 +135,9 @@ struct drm_driver {
>>          * one &struct drm_file (see &drm_file.is_master and
>> &drm_device.master)
>>          * there should never be a need to tear down any modeset related
>>          * resources in this callback. Doing so would be a driver design
>> bug.
>> +        *
>> +        * FIXME: It is not really clear why there's both @preclose and
>> +        * @postclose. Without a really good reason, use @postclose only.
>>          */
>>         void (*postclose) (struct drm_device *, struct drm_file *);
>>   @@ -134,7 +154,7 @@ struct drm_driver {
>>          * state changes, e.g. in conjunction with the
>> :ref:`vga_switcheroo`
>>          * infrastructure.
>>          *
>> -        * This is called after @postclose hook has been called.
>> +        * This is called after @preclose and @postclose have been called.
>>          *
>>          * NOTE:
>>          *
>> @@ -601,7 +621,6 @@ struct drm_driver {
>>         /* List of devices hanging off this driver with stealth attach. */
>>         struct list_head legacy_dev_list;
>>         int (*firstopen) (struct drm_device *);
>> -       void (*preclose) (struct drm_device *, struct drm_file
>> *file_priv);
>>         int (*dma_ioctl) (struct drm_device *dev, void *data, struct
>> drm_file *file_priv);
>>         int (*dma_quiescent) (struct drm_device *);
>>         int (*context_dtor) (struct drm_device *dev, int context);
>
>
Christian König May 23, 2018, 1:41 p.m. UTC | #3
Am 23.05.2018 um 15:13 schrieb Qiang Yu:
> On Wed, May 23, 2018 at 5:35 PM, Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
>> Well NAK, that brings back a callback we worked quite hard on getting rid
>> of.
>>
>> It looks like the problem isn't that you need the preclose callback, but you
>> rather seem to misunderstood how TTM works.
>>
>> All you need to do is to cleanup your command submission path so that the
>> caller of lima_sched_context_queue_task() adds the resulting scheduler fence
>> to TTMs buffer objects.
> You mean adding the finished dma fence to the buffer's reservation object then
> waiting it before unmap the buffer from GPU VM in the drm_release()'s buffer
> close callback?

That is one possibility, but also not necessary.

TTM has a destroy callback which is called from a workqueue when all 
fences on that BOs have signaled.

Depending on your VM management you can use it to delay unmapping the 
buffer until it is actually not used any more.

> Adding fence is done already, and I did wait it before unmap. But then
> I see when
> the buffer is shared between processes, the "perfect wait" is just
> wait the fence
> from this process's task, so it's better to also distinguish fences.
> If so, I just think
> why we don't just wait tasks from this process in the preclose before unmap/free
> buffer in the drm_release()?

Well it depends on your VM management. When userspace expects that the 
VM space the BO used is reusable immediately than the TTM callback won't 
work.

On the other hand you can just grab the list of fences on a BO and 
filter out the ones from your current process and wait for those. See 
amdgpu_sync_resv() as an example how to do that.

Christian.

>
> Regards,
> Qiang
>
>>
>> Am 18.05.2018 um 11:27 schrieb Qiang Yu:
>>> This reverts commit 45c3d213a400c952ab7119f394c5293bb6877e6b.
>>>
>>> lima driver need preclose to wait all task in the context
>>> created within closing file to finish before free all the
>>> buffer object. Otherwise pending tesk may fail and get
>>> noisy MMU fault message.
>>>
>>> Move this wait to each buffer object free function can
>>> achieve the same result but some buffer object is shared
>>> with other file context, but we only want to wait the
>>> closing file context's tasks. So the implementation is
>>> not that straight forword compared to the preclose one.
>>>
>>> Signed-off-by: Qiang Yu <yuq825@gmail.com>
>>> ---
>>>    drivers/gpu/drm/drm_file.c |  8 ++++----
>>>    include/drm/drm_drv.h      | 23 +++++++++++++++++++++--
>>>    2 files changed, 25 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
>>> index e394799979a6..0a43107396b9 100644
>>> --- a/drivers/gpu/drm/drm_file.c
>>> +++ b/drivers/gpu/drm/drm_file.c
>>> @@ -361,8 +361,9 @@ void drm_lastclose(struct drm_device * dev)
>>>     *
>>>     * This function must be used by drivers as their
>>> &file_operations.release
>>>     * method. It frees any resources associated with the open file, and
>>> calls the
>>> - * &drm_driver.postclose driver callback. If this is the last open file
>>> for the
>>> - * DRM device also proceeds to call the &drm_driver.lastclose driver
>>> callback.
>>> + * &drm_driver.preclose and &drm_driver.lastclose driver callbacks. If
>>> this is
>>> + * the last open file for the DRM device also proceeds to call the
>>> + * &drm_driver.lastclose driver callback.
>>>     *
>>>     * RETURNS:
>>>     *
>>> @@ -382,8 +383,7 @@ int drm_release(struct inode *inode, struct file
>>> *filp)
>>>          list_del(&file_priv->lhead);
>>>          mutex_unlock(&dev->filelist_mutex);
>>>    -     if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
>>> -           dev->driver->preclose)
>>> +       if (dev->driver->preclose)
>>>                  dev->driver->preclose(dev, file_priv);
>>>          /* ========================================================
>>> diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
>>> index d23dcdd1bd95..8d6080f97ed4 100644
>>> --- a/include/drm/drm_drv.h
>>> +++ b/include/drm/drm_drv.h
>>> @@ -107,6 +107,23 @@ struct drm_driver {
>>>           */
>>>          int (*open) (struct drm_device *, struct drm_file *);
>>>    +     /**
>>> +        * @preclose:
>>> +        *
>>> +        * One of the driver callbacks when a new &struct drm_file is
>>> closed.
>>> +        * Useful for tearing down driver-private data structures
>>> allocated in
>>> +        * @open like buffer allocators, execution contexts or similar
>>> things.
>>> +        *
>>> +        * Since the display/modeset side of DRM can only be owned by
>>> exactly
>>> +        * one &struct drm_file (see &drm_file.is_master and
>>> &drm_device.master)
>>> +        * there should never be a need to tear down any modeset related
>>> +        * resources in this callback. Doing so would be a driver design
>>> bug.
>>> +        *
>>> +        * FIXME: It is not really clear why there's both @preclose and
>>> +        * @postclose. Without a really good reason, use @postclose only.
>>> +        */
>>> +       void (*preclose) (struct drm_device *, struct drm_file
>>> *file_priv);
>>> +
>>>          /**
>>>           * @postclose:
>>>           *
>>> @@ -118,6 +135,9 @@ struct drm_driver {
>>>           * one &struct drm_file (see &drm_file.is_master and
>>> &drm_device.master)
>>>           * there should never be a need to tear down any modeset related
>>>           * resources in this callback. Doing so would be a driver design
>>> bug.
>>> +        *
>>> +        * FIXME: It is not really clear why there's both @preclose and
>>> +        * @postclose. Without a really good reason, use @postclose only.
>>>           */
>>>          void (*postclose) (struct drm_device *, struct drm_file *);
>>>    @@ -134,7 +154,7 @@ struct drm_driver {
>>>           * state changes, e.g. in conjunction with the
>>> :ref:`vga_switcheroo`
>>>           * infrastructure.
>>>           *
>>> -        * This is called after @postclose hook has been called.
>>> +        * This is called after @preclose and @postclose have been called.
>>>           *
>>>           * NOTE:
>>>           *
>>> @@ -601,7 +621,6 @@ struct drm_driver {
>>>          /* List of devices hanging off this driver with stealth attach. */
>>>          struct list_head legacy_dev_list;
>>>          int (*firstopen) (struct drm_device *);
>>> -       void (*preclose) (struct drm_device *, struct drm_file
>>> *file_priv);
>>>          int (*dma_ioctl) (struct drm_device *dev, void *data, struct
>>> drm_file *file_priv);
>>>          int (*dma_quiescent) (struct drm_device *);
>>>          int (*context_dtor) (struct drm_device *dev, int context);
>>
Qiang Yu May 24, 2018, 1:38 a.m. UTC | #4
On Wed, May 23, 2018 at 9:41 PM, Christian König
<christian.koenig@amd.com> wrote:
> Am 23.05.2018 um 15:13 schrieb Qiang Yu:
>>
>> On Wed, May 23, 2018 at 5:35 PM, Christian König
>> <ckoenig.leichtzumerken@gmail.com> wrote:
>>>
>>> Well NAK, that brings back a callback we worked quite hard on getting rid
>>> of.
>>>
>>> It looks like the problem isn't that you need the preclose callback, but
>>> you
>>> rather seem to misunderstood how TTM works.
>>>
>>> All you need to do is to cleanup your command submission path so that the
>>> caller of lima_sched_context_queue_task() adds the resulting scheduler
>>> fence
>>> to TTMs buffer objects.
>>
>> You mean adding the finished dma fence to the buffer's reservation object
>> then
>> waiting it before unmap the buffer from GPU VM in the drm_release()'s
>> buffer
>> close callback?
>
>
> That is one possibility, but also not necessary.
>
> TTM has a destroy callback which is called from a workqueue when all fences
> on that BOs have signaled.
>
> Depending on your VM management you can use it to delay unmapping the buffer
> until it is actually not used any more.
>
>> Adding fence is done already, and I did wait it before unmap. But then
>> I see when
>> the buffer is shared between processes, the "perfect wait" is just
>> wait the fence
>> from this process's task, so it's better to also distinguish fences.
>> If so, I just think
>> why we don't just wait tasks from this process in the preclose before
>> unmap/free
>> buffer in the drm_release()?
>
>
> Well it depends on your VM management. When userspace expects that the VM
> space the BO used is reusable immediately than the TTM callback won't work.
>
> On the other hand you can just grab the list of fences on a BO and filter
> out the ones from your current process and wait for those. See
> amdgpu_sync_resv() as an example how to do that.

In current lima implementation, user space driver is responsible not unmap/free
buffer before task is complete. And VM map/unmap is not differed.

This works simple and fine except the case that user press Ctrl+C to terminate
the application which will force to close drm fd.

I'd more prefer to wait buffer fence before vm unmap and filter like
amdgpu_sync_resv() compared to implement refcount in kernel task.
But these two ways are all not as simple as preclose.

So I still don't understand why you don't want to get preclose back even
have to introduce other complicated mechanism to cover the case free/unmap
buffer before this process's task is done?

Regards,
Qiang

>
> Christian.
>
>
>>
>> Regards,
>> Qiang
>>
>>>
>>> Am 18.05.2018 um 11:27 schrieb Qiang Yu:
>>>>
>>>> This reverts commit 45c3d213a400c952ab7119f394c5293bb6877e6b.
>>>>
>>>> lima driver need preclose to wait all task in the context
>>>> created within closing file to finish before free all the
>>>> buffer object. Otherwise pending tesk may fail and get
>>>> noisy MMU fault message.
>>>>
>>>> Move this wait to each buffer object free function can
>>>> achieve the same result but some buffer object is shared
>>>> with other file context, but we only want to wait the
>>>> closing file context's tasks. So the implementation is
>>>> not that straight forword compared to the preclose one.
>>>>
>>>> Signed-off-by: Qiang Yu <yuq825@gmail.com>
>>>> ---
>>>>    drivers/gpu/drm/drm_file.c |  8 ++++----
>>>>    include/drm/drm_drv.h      | 23 +++++++++++++++++++++--
>>>>    2 files changed, 25 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
>>>> index e394799979a6..0a43107396b9 100644
>>>> --- a/drivers/gpu/drm/drm_file.c
>>>> +++ b/drivers/gpu/drm/drm_file.c
>>>> @@ -361,8 +361,9 @@ void drm_lastclose(struct drm_device * dev)
>>>>     *
>>>>     * This function must be used by drivers as their
>>>> &file_operations.release
>>>>     * method. It frees any resources associated with the open file, and
>>>> calls the
>>>> - * &drm_driver.postclose driver callback. If this is the last open file
>>>> for the
>>>> - * DRM device also proceeds to call the &drm_driver.lastclose driver
>>>> callback.
>>>> + * &drm_driver.preclose and &drm_driver.lastclose driver callbacks. If
>>>> this is
>>>> + * the last open file for the DRM device also proceeds to call the
>>>> + * &drm_driver.lastclose driver callback.
>>>>     *
>>>>     * RETURNS:
>>>>     *
>>>> @@ -382,8 +383,7 @@ int drm_release(struct inode *inode, struct file
>>>> *filp)
>>>>          list_del(&file_priv->lhead);
>>>>          mutex_unlock(&dev->filelist_mutex);
>>>>    -     if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
>>>> -           dev->driver->preclose)
>>>> +       if (dev->driver->preclose)
>>>>                  dev->driver->preclose(dev, file_priv);
>>>>          /* ========================================================
>>>> diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
>>>> index d23dcdd1bd95..8d6080f97ed4 100644
>>>> --- a/include/drm/drm_drv.h
>>>> +++ b/include/drm/drm_drv.h
>>>> @@ -107,6 +107,23 @@ struct drm_driver {
>>>>           */
>>>>          int (*open) (struct drm_device *, struct drm_file *);
>>>>    +     /**
>>>> +        * @preclose:
>>>> +        *
>>>> +        * One of the driver callbacks when a new &struct drm_file is
>>>> closed.
>>>> +        * Useful for tearing down driver-private data structures
>>>> allocated in
>>>> +        * @open like buffer allocators, execution contexts or similar
>>>> things.
>>>> +        *
>>>> +        * Since the display/modeset side of DRM can only be owned by
>>>> exactly
>>>> +        * one &struct drm_file (see &drm_file.is_master and
>>>> &drm_device.master)
>>>> +        * there should never be a need to tear down any modeset related
>>>> +        * resources in this callback. Doing so would be a driver design
>>>> bug.
>>>> +        *
>>>> +        * FIXME: It is not really clear why there's both @preclose and
>>>> +        * @postclose. Without a really good reason, use @postclose
>>>> only.
>>>> +        */
>>>> +       void (*preclose) (struct drm_device *, struct drm_file
>>>> *file_priv);
>>>> +
>>>>          /**
>>>>           * @postclose:
>>>>           *
>>>> @@ -118,6 +135,9 @@ struct drm_driver {
>>>>           * one &struct drm_file (see &drm_file.is_master and
>>>> &drm_device.master)
>>>>           * there should never be a need to tear down any modeset
>>>> related
>>>>           * resources in this callback. Doing so would be a driver
>>>> design
>>>> bug.
>>>> +        *
>>>> +        * FIXME: It is not really clear why there's both @preclose and
>>>> +        * @postclose. Without a really good reason, use @postclose
>>>> only.
>>>>           */
>>>>          void (*postclose) (struct drm_device *, struct drm_file *);
>>>>    @@ -134,7 +154,7 @@ struct drm_driver {
>>>>           * state changes, e.g. in conjunction with the
>>>> :ref:`vga_switcheroo`
>>>>           * infrastructure.
>>>>           *
>>>> -        * This is called after @postclose hook has been called.
>>>> +        * This is called after @preclose and @postclose have been
>>>> called.
>>>>           *
>>>>           * NOTE:
>>>>           *
>>>> @@ -601,7 +621,6 @@ struct drm_driver {
>>>>          /* List of devices hanging off this driver with stealth attach.
>>>> */
>>>>          struct list_head legacy_dev_list;
>>>>          int (*firstopen) (struct drm_device *);
>>>> -       void (*preclose) (struct drm_device *, struct drm_file
>>>> *file_priv);
>>>>          int (*dma_ioctl) (struct drm_device *dev, void *data, struct
>>>> drm_file *file_priv);
>>>>          int (*dma_quiescent) (struct drm_device *);
>>>>          int (*context_dtor) (struct drm_device *dev, int context);
>>>
>>>
>
Christian König May 24, 2018, 6:46 a.m. UTC | #5
Am 24.05.2018 um 03:38 schrieb Qiang Yu:
> [SNIP]
>>> Adding fence is done already, and I did wait it before unmap. But then
>>> I see when
>>> the buffer is shared between processes, the "perfect wait" is just
>>> wait the fence
>>> from this process's task, so it's better to also distinguish fences.
>>> If so, I just think
>>> why we don't just wait tasks from this process in the preclose before
>>> unmap/free
>>> buffer in the drm_release()?
>>
>> Well it depends on your VM management. When userspace expects that the VM
>> space the BO used is reusable immediately than the TTM callback won't work.
>>
>> On the other hand you can just grab the list of fences on a BO and filter
>> out the ones from your current process and wait for those. See
>> amdgpu_sync_resv() as an example how to do that.
> In current lima implementation, user space driver is responsible not unmap/free
> buffer before task is complete. And VM map/unmap is not differed.

Well it's up to you how to design userspace, but in the past doing it 
like that turned out to be a rather bad design decision.

Keep in mind that the kernel driver must guarantee that a shaders can 
never access freed up memory.

Otherwise taking over the system from an unprivileged processes becomes 
just a typing exercise when you manage to access freed memory which is 
now used for a page table.

Because of this we have a separate tracking in amdgpu so that we not 
only know who is using which BO, who is using which VM.

> This works simple and fine except the case that user press Ctrl+C to terminate
> the application which will force to close drm fd.

I'm not sure if that actually works as fine as you think.

For an example of what we had to add to prevent security breaches, take 
a look at amdgpu_gem_object_close().

> I'd more prefer to wait buffer fence before vm unmap and filter like
> amdgpu_sync_resv() compared to implement refcount in kernel task.
> But these two ways are all not as simple as preclose.

Well, I would rather say you should either delay VM unmap operations 
until all users of the VM are done with their work using the 
ttm_bo_destroy callback.

Or you block in the gem_close_object callback until all tasks using the 
BO are done with it.

> So I still don't understand why you don't want to get preclose back even
> have to introduce other complicated mechanism to cover the case free/unmap
> buffer before this process's task is done?

We intentionally removed the preclose callback to prevent certain use 
cases, bringing it back to allow your use case looks rather fishy to me.

BTW: What exactly is the issue with using the postclose callback?

Regards,
Christian.

>
> Regards,
> Qiang
>
Qiang Yu May 24, 2018, 9:24 a.m. UTC | #6
On Thu, May 24, 2018 at 2:46 PM, Christian König
<christian.koenig@amd.com> wrote:
> Am 24.05.2018 um 03:38 schrieb Qiang Yu:
>
> [SNIP]
>
> Adding fence is done already, and I did wait it before unmap. But then
> I see when
> the buffer is shared between processes, the "perfect wait" is just
> wait the fence
> from this process's task, so it's better to also distinguish fences.
> If so, I just think
> why we don't just wait tasks from this process in the preclose before
> unmap/free
> buffer in the drm_release()?
>
> Well it depends on your VM management. When userspace expects that the VM
> space the BO used is reusable immediately than the TTM callback won't work.
>
> On the other hand you can just grab the list of fences on a BO and filter
> out the ones from your current process and wait for those. See
> amdgpu_sync_resv() as an example how to do that.
>
> In current lima implementation, user space driver is responsible not
> unmap/free
> buffer before task is complete. And VM map/unmap is not differed.
>
>
> Well it's up to you how to design userspace, but in the past doing it like
> that turned out to be a rather bad design decision.
>
> Keep in mind that the kernel driver must guarantee that a shaders can never
> access freed up memory.
>
> Otherwise taking over the system from an unprivileged processes becomes just
> a typing exercise when you manage to access freed memory which is now used
> for a page table.

Right, I know this has to be avoided.

>
> Because of this we have a separate tracking in amdgpu so that we not only
> know who is using which BO, who is using which VM.

amdgpu's VM implementation seems too complicated for this simple mali GPU,
but I may investigate it more to see if I can make it better.

>
> This works simple and fine except the case that user press Ctrl+C to
> terminate
> the application which will force to close drm fd.
>
>
> I'm not sure if that actually works as fine as you think.
>
> For an example of what we had to add to prevent security breaches, take a
> look at amdgpu_gem_object_close().
>
> I'd more prefer to wait buffer fence before vm unmap and filter like
> amdgpu_sync_resv() compared to implement refcount in kernel task.
> But these two ways are all not as simple as preclose.
>
>
> Well, I would rather say you should either delay VM unmap operations until
> all users of the VM are done with their work using the ttm_bo_destroy
> callback.
>
> Or you block in the gem_close_object callback until all tasks using the BO
> are done with it.
>
> So I still don't understand why you don't want to get preclose back even
> have to introduce other complicated mechanism to cover the case free/unmap
> buffer before this process's task is done?
>
>
> We intentionally removed the preclose callback to prevent certain use cases,
> bringing it back to allow your use case looks rather fishy to me.

Seems other drivers do either the deffer or wait way to adopt the drop
of preclose. I can do the same as you suggested, but just not understand why
we make our life harder. Can I know what's the case you want to prevent?

>
> BTW: What exactly is the issue with using the postclose callback?

The issue is, when Ctrl+C to terminate an application, if no wait or deffer
unmap, buffer just gets unmapped before task is done, so kernel driver
gets MMU fault and HW reset to recover the GPU.

Regards,
Qiang

>
> Regards,
> Christian.
>
>
> Regards,
> Qiang
>
>
Christian König May 24, 2018, 9:41 a.m. UTC | #7
Am 24.05.2018 um 11:24 schrieb Qiang Yu:
> On Thu, May 24, 2018 at 2:46 PM, Christian König
> <christian.koenig@amd.com> wrote:
> [SNIP]
>> Because of this we have a separate tracking in amdgpu so that we not only
>> know who is using which BO, who is using which VM.
> amdgpu's VM implementation seems too complicated for this simple mali GPU,
> but I may investigate it more to see if I can make it better.

Yeah, completely agree.

The VM handling in amdgpu is really complicated because we had to tune 
it for multiple use cases. E.g. partial resident textures, delayed 
updates etc etc....

But you should at least be able to take the lessons learned we had with 
that VM code and not make the same mistakes again.

>> We intentionally removed the preclose callback to prevent certain use cases,
>> bringing it back to allow your use case looks rather fishy to me.
> Seems other drivers do either the deffer or wait way to adopt the drop
> of preclose. I can do the same as you suggested, but just not understand why
> we make our life harder. Can I know what's the case you want to prevent?

I think what matters most for your case is the issue is that drivers 
should handle closing a BO because userspace said so in the same way it 
handles closing a BO because of a process termination, but see below.

>> BTW: What exactly is the issue with using the postclose callback?
> The issue is, when Ctrl+C to terminate an application, if no wait or deffer
> unmap, buffer just gets unmapped before task is done, so kernel driver
> gets MMU fault and HW reset to recover the GPU.

Yeah, that sounds like exactly one of the reasons we had the callback in 
the first place and worked on to removing it.

See the intention is to have reliable handling, e.g. use the same code 
path for closing a BO because of an IOCTL and closing a BO because of 
process termination.

In other words what happens when userspace closes a BO while the GPU is 
still using it? Would you then run into a GPU reset as well?

I mean it's your driver stack, so I'm not against it as long as you can 
live with it. But it's exactly the thing we wanted to avoid here.

Regards,
Christian.
Qiang Yu May 24, 2018, 12:54 p.m. UTC | #8
On Thu, May 24, 2018 at 5:41 PM, Christian König
<christian.koenig@amd.com> wrote:
> Am 24.05.2018 um 11:24 schrieb Qiang Yu:
>>
>> On Thu, May 24, 2018 at 2:46 PM, Christian König
>> <christian.koenig@amd.com> wrote:
>> [SNIP]
>>>
>>> Because of this we have a separate tracking in amdgpu so that we not only
>>> know who is using which BO, who is using which VM.
>>
>> amdgpu's VM implementation seems too complicated for this simple mali GPU,
>> but I may investigate it more to see if I can make it better.
>
>
> Yeah, completely agree.
>
> The VM handling in amdgpu is really complicated because we had to tune it
> for multiple use cases. E.g. partial resident textures, delayed updates etc
> etc....
>
> But you should at least be able to take the lessons learned we had with that
> VM code and not make the same mistakes again.
>
>>> We intentionally removed the preclose callback to prevent certain use
>>> cases,
>>> bringing it back to allow your use case looks rather fishy to me.
>>
>> Seems other drivers do either the deffer or wait way to adopt the drop
>> of preclose. I can do the same as you suggested, but just not understand
>> why
>> we make our life harder. Can I know what's the case you want to prevent?
>
>
> I think what matters most for your case is the issue is that drivers should
> handle closing a BO because userspace said so in the same way it handles
> closing a BO because of a process termination, but see below.
>
>>> BTW: What exactly is the issue with using the postclose callback?
>>
>> The issue is, when Ctrl+C to terminate an application, if no wait or
>> deffer
>> unmap, buffer just gets unmapped before task is done, so kernel driver
>> gets MMU fault and HW reset to recover the GPU.
>
>
> Yeah, that sounds like exactly one of the reasons we had the callback in the
> first place and worked on to removing it.
>
> See the intention is to have reliable handling, e.g. use the same code path
> for closing a BO because of an IOCTL and closing a BO because of process
> termination.
>
> In other words what happens when userspace closes a BO while the GPU is
> still using it? Would you then run into a GPU reset as well?

Yes, also a MMU fault and GPU reset when user space driver error usage like
this. I think I don't need to avoid this case because it's user error
usage which deserve a GPU reset, but process termination is not. But you
remind me they indeed share the same code path if remove preclose now.

Regards,
Qiang

>
> I mean it's your driver stack, so I'm not against it as long as you can live
> with it. But it's exactly the thing we wanted to avoid here.

Seems

>
> Regards,
> Christian.
diff mbox

Patch

diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index e394799979a6..0a43107396b9 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -361,8 +361,9 @@  void drm_lastclose(struct drm_device * dev)
  *
  * This function must be used by drivers as their &file_operations.release
  * method. It frees any resources associated with the open file, and calls the
- * &drm_driver.postclose driver callback. If this is the last open file for the
- * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
+ * &drm_driver.preclose and &drm_driver.lastclose driver callbacks. If this is
+ * the last open file for the DRM device also proceeds to call the
+ * &drm_driver.lastclose driver callback.
  *
  * RETURNS:
  *
@@ -382,8 +383,7 @@  int drm_release(struct inode *inode, struct file *filp)
 	list_del(&file_priv->lhead);
 	mutex_unlock(&dev->filelist_mutex);
 
-	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
-	    dev->driver->preclose)
+	if (dev->driver->preclose)
 		dev->driver->preclose(dev, file_priv);
 
 	/* ========================================================
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index d23dcdd1bd95..8d6080f97ed4 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -107,6 +107,23 @@  struct drm_driver {
 	 */
 	int (*open) (struct drm_device *, struct drm_file *);
 
+	/**
+	 * @preclose:
+	 *
+	 * One of the driver callbacks when a new &struct drm_file is closed.
+	 * Useful for tearing down driver-private data structures allocated in
+	 * @open like buffer allocators, execution contexts or similar things.
+	 *
+	 * Since the display/modeset side of DRM can only be owned by exactly
+	 * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
+	 * there should never be a need to tear down any modeset related
+	 * resources in this callback. Doing so would be a driver design bug.
+	 *
+	 * FIXME: It is not really clear why there's both @preclose and
+	 * @postclose. Without a really good reason, use @postclose only.
+	 */
+	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+
 	/**
 	 * @postclose:
 	 *
@@ -118,6 +135,9 @@  struct drm_driver {
 	 * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
 	 * there should never be a need to tear down any modeset related
 	 * resources in this callback. Doing so would be a driver design bug.
+	 *
+	 * FIXME: It is not really clear why there's both @preclose and
+	 * @postclose. Without a really good reason, use @postclose only.
 	 */
 	void (*postclose) (struct drm_device *, struct drm_file *);
 
@@ -134,7 +154,7 @@  struct drm_driver {
 	 * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
 	 * infrastructure.
 	 *
-	 * This is called after @postclose hook has been called.
+	 * This is called after @preclose and @postclose have been called.
 	 *
 	 * NOTE:
 	 *
@@ -601,7 +621,6 @@  struct drm_driver {
 	/* List of devices hanging off this driver with stealth attach. */
 	struct list_head legacy_dev_list;
 	int (*firstopen) (struct drm_device *);
-	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
 	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
 	int (*dma_quiescent) (struct drm_device *);
 	int (*context_dtor) (struct drm_device *dev, int context);