diff mbox

[1/5] dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER

Message ID 20120928124148.14366.21063.stgit@patser.local (mailing list archive)
State New, archived
Headers show

Commit Message

Maarten Lankhorst Sept. 28, 2012, 12:41 p.m. UTC
Documentation says that code requiring dma-buf should add it to
select, so inline fallbacks are not going to be used. A link error
will make it obvious what went wrong, instead of silently doing
nothing at runtime.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
---
 include/linux/dma-buf.h |   99 -----------------------------------------------
 1 file changed, 99 deletions(-)

Comments

Daniel Vetter Sept. 28, 2012, 1:20 p.m. UTC | #1
On Fri, Sep 28, 2012 at 02:41:48PM +0200, Maarten Lankhorst wrote:
> Documentation says that code requiring dma-buf should add it to
> select, so inline fallbacks are not going to be used. A link error
> will make it obvious what went wrong, instead of silently doing
> nothing at runtime.
> 
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

I think it'd be good if we could merge this for 3.7 ...
-Daniel

> ---
>  include/linux/dma-buf.h |   99 -----------------------------------------------
>  1 file changed, 99 deletions(-)
> 
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index eb48f38..bd2e52c 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -156,7 +156,6 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
>  	get_file(dmabuf->file);
>  }
>  
> -#ifdef CONFIG_DMA_SHARED_BUFFER
>  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>  							struct device *dev);
>  void dma_buf_detach(struct dma_buf *dmabuf,
> @@ -184,103 +183,5 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
>  		 unsigned long);
>  void *dma_buf_vmap(struct dma_buf *);
>  void dma_buf_vunmap(struct dma_buf *, void *vaddr);
> -#else
> -
> -static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> -							struct device *dev)
> -{
> -	return ERR_PTR(-ENODEV);
> -}
> -
> -static inline void dma_buf_detach(struct dma_buf *dmabuf,
> -				  struct dma_buf_attachment *dmabuf_attach)
> -{
> -	return;
> -}
> -
> -static inline struct dma_buf *dma_buf_export(void *priv,
> -					     const struct dma_buf_ops *ops,
> -					     size_t size, int flags)
> -{
> -	return ERR_PTR(-ENODEV);
> -}
> -
> -static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
> -{
> -	return -ENODEV;
> -}
> -
> -static inline struct dma_buf *dma_buf_get(int fd)
> -{
> -	return ERR_PTR(-ENODEV);
> -}
> -
> -static inline void dma_buf_put(struct dma_buf *dmabuf)
> -{
> -	return;
> -}
> -
> -static inline struct sg_table *dma_buf_map_attachment(
> -	struct dma_buf_attachment *attach, enum dma_data_direction write)
> -{
> -	return ERR_PTR(-ENODEV);
> -}
> -
> -static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
> -			struct sg_table *sg, enum dma_data_direction dir)
> -{
> -	return;
> -}
> -
> -static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
> -					   size_t start, size_t len,
> -					   enum dma_data_direction dir)
> -{
> -	return -ENODEV;
> -}
> -
> -static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
> -					  size_t start, size_t len,
> -					  enum dma_data_direction dir)
> -{
> -}
> -
> -static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
> -					unsigned long pnum)
> -{
> -	return NULL;
> -}
> -
> -static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
> -					 unsigned long pnum, void *vaddr)
> -{
> -}
> -
> -static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
> -{
> -	return NULL;
> -}
> -
> -static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
> -				  unsigned long pnum, void *vaddr)
> -{
> -}
> -
> -static inline int dma_buf_mmap(struct dma_buf *dmabuf,
> -			       struct vm_area_struct *vma,
> -			       unsigned long pgoff)
> -{
> -	return -ENODEV;
> -}
> -
> -static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
> -{
> -	return NULL;
> -}
> -
> -static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
> -{
> -}
> -#endif /* CONFIG_DMA_SHARED_BUFFER */
>  
>  #endif /* __DMA_BUF_H__ */
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Maarten Lankhorst Sept. 28, 2012, 2:14 p.m. UTC | #2
Hey,

Op 28-09-12 14:41, Maarten Lankhorst schreef:
> Documentation says that code requiring dma-buf should add it to
> select, so inline fallbacks are not going to be used. A link error
> will make it obvious what went wrong, instead of silently doing
> nothing at runtime.
>
  


The whole patch series is in my tree, I use stg so things might
move around, do not use for merging currently:

http://cgit.freedesktop.org/~mlankhorst/linux/log/?h=v10-wip

It contains everything in here plus the patches for ttm to make
it work, I use a old snapshot of drm-next + merge of nouveau as
base. Description of what the parts do:

Series to fix small api issues when moving over:

drm/ttm: Remove cpu_writers related code
drm/ttm: Add ttm_bo_is_reserved function
drm/radeon: Use ttm_bo_is_reserved
drm/vmwgfx: use ttm_bo_is_reserved

drm/vmwgfx: remove use of fence_obj_args
drm/ttm: remove sync_obj_arg
drm/ttm: remove sync_obj_arg from ttm_bo_move_accel_cleanup
drm/ttm: remove sync_arg entirely

drm/nouveau: unpin buffers before releasing to prevent lockdep warnings
drm/nouveau: add reservation to nouveau_bo_vma_del
drm/nouveau: add reservation to nouveau_gem_ioctl_cpu_prep

Hey great, now we only have one user left for fence waiting before reserving,
lets fix that and remove fence lock:
ttm_bo_cleanup_refs_or_queue and ttm_bo_cleanup_refs have to reserve before
waiting, lets do it in the squash commit so we don't have to throw lock order
around everywhere:

drm/ttm: remove fence_lock

-- Up to this point should be mergeable now

Then we start working on lru_lock removal slightly, this means the lru
list no longer is empty but can contain only reserved buffers:

drm/ttm: do not check if list is empty in ttm_bo_force_list_clean
drm/ttm: move reservations for ttm_bo_cleanup_refs

-- Still mergeable up to this point, just fixes

Patch series from this email:
dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER
fence: dma-buf cross-device synchronization (v9)
seqno-fence: Hardware dma-buf implementation of fencing (v3)
reservation: cross-device reservation support
reservation: Add lockdep annotation and selftests

Now hook it up to drm/ttm in a few steps:
usage around reservations:
drm/ttm: make ttm reservation calls behave like reservation calls
drm/ttm: use dma_reservation api
dma-buf: use reservations
drm/ttm: allow drivers to pass custom dma_reservation_objects for a bo

then kill off the lru lock around reservation:
drm/ttm: remove lru_lock around ttm_bo_reserve
drm/ttm: simplify ttm_eu_*

The lru_lock removal patch removes the lock around lru_lock around the
reservation, this will break the assumption that items on the lru list
and swap list can always be reserved, and this gets patched up too.
Is there any part in ttm you disagree with? I believe that this 
is all mergeable, the lru_lock removal patch could be moved to before
the reservation parts, this might make merging easier, but I don't
think there is any ttm part of the series that are wrong on a conceptual
level.

~Maarten
Thomas Hellstrom Sept. 28, 2012, 7:42 p.m. UTC | #3
On 09/28/2012 04:14 PM, Maarten Lankhorst wrote:
> Hey,
>
> Op 28-09-12 14:41, Maarten Lankhorst schreef:
>> Documentation says that code requiring dma-buf should add it to
>> select, so inline fallbacks are not going to be used. A link error
>> will make it obvious what went wrong, instead of silently doing
>> nothing at runtime.
>>
>    
>
>
> The whole patch series is in my tree, I use stg so things might
> move around, do not use for merging currently:
>
> http://cgit.freedesktop.org/~mlankhorst/linux/log/?h=v10-wip
>
> It contains everything in here plus the patches for ttm to make
> it work, I use a old snapshot of drm-next + merge of nouveau as
> base. Description of what the parts do:
>
> Series to fix small api issues when moving over:
>
> drm/ttm: Remove cpu_writers related code
> drm/ttm: Add ttm_bo_is_reserved function
> drm/radeon: Use ttm_bo_is_reserved
> drm/vmwgfx: use ttm_bo_is_reserved
>
> drm/vmwgfx: remove use of fence_obj_args
> drm/ttm: remove sync_obj_arg
> drm/ttm: remove sync_obj_arg from ttm_bo_move_accel_cleanup
> drm/ttm: remove sync_arg entirely
>
> drm/nouveau: unpin buffers before releasing to prevent lockdep warnings
> drm/nouveau: add reservation to nouveau_bo_vma_del
> drm/nouveau: add reservation to nouveau_gem_ioctl_cpu_prep
>
> Hey great, now we only have one user left for fence waiting before reserving,
> lets fix that and remove fence lock:
> ttm_bo_cleanup_refs_or_queue and ttm_bo_cleanup_refs have to reserve before
> waiting, lets do it in the squash commit so we don't have to throw lock order
> around everywhere:
>
> drm/ttm: remove fence_lock
>
> -- Up to this point should be mergeable now
>
> Then we start working on lru_lock removal slightly, this means the lru
> list no longer is empty but can contain only reserved buffers:
>
> drm/ttm: do not check if list is empty in ttm_bo_force_list_clean
> drm/ttm: move reservations for ttm_bo_cleanup_refs
>
> -- Still mergeable up to this point, just fixes
>
> Patch series from this email:
> dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER
> fence: dma-buf cross-device synchronization (v9)
> seqno-fence: Hardware dma-buf implementation of fencing (v3)
> reservation: cross-device reservation support
> reservation: Add lockdep annotation and selftests
>
> Now hook it up to drm/ttm in a few steps:
> usage around reservations:
> drm/ttm: make ttm reservation calls behave like reservation calls
> drm/ttm: use dma_reservation api
> dma-buf: use reservations
> drm/ttm: allow drivers to pass custom dma_reservation_objects for a bo
>
> then kill off the lru lock around reservation:
> drm/ttm: remove lru_lock around ttm_bo_reserve
> drm/ttm: simplify ttm_eu_*
>
> The lru_lock removal patch removes the lock around lru_lock around the
> reservation, this will break the assumption that items on the lru list
> and swap list can always be reserved, and this gets patched up too.
> Is there any part in ttm you disagree with? I believe that this
> is all mergeable, the lru_lock removal patch could be moved to before
> the reservation parts, this might make merging easier, but I don't
> think there is any ttm part of the series that are wrong on a conceptual
> level.
>
> ~Maarten
>
....From another email

>> As previously discussed, I'm unfortunately not prepared to accept removal of the reserve-lru atomicity
>>   into the TTM code at this point.
>> The current code is based on this assumption and removing it will end up with
>> efficiencies, breaking the delayed delete code and probably a locking nightmare when trying to write
>> new TTM code.
> The lru lock removal patch fixed the delayed delete code, it really is not different from the current
> situation. In fact it is more clear without the guarantee what various parts are trying to protect.
>
> Nothing prevents you from holding the lru_lock while trylocking,
[1]
While this would not cause any deadlocks, Any decent lockdep code would 
establish lru->reserve as the locking
order once a lru- reserve trylock succeeds, but the locking order is 
really reserve->lru for obvious reasons, which
means we will get a lot of lockdep errors? Yes, there are a two 
reversals like these already in the TTM code, and I'm
not very proud of them.

leaving that guarantee intact for that part. Can you really just review
the patch and tell me where it breaks and/or makes the code unreadable?

OK. Now I'm looking at
http://cgit.freedesktop.org/~mlankhorst/linux/tree/drivers/gpu/drm/ttm/ttm_bo.c?h=v10-wip&id=1436e2e64697c744d6e186618448e1e6354846bb

And let's start with a function that's seen some change, 
ttm_mem_evict_first:

*) Line 715: You're traversing a list using list_for_each() calling a 
function that may remove the list entr||||y
*) Line 722: You're unlocking the lock protecting the list in the middle 
of list traversal
*) Line 507: WARN_ON_ONCE in a code path quite likely to get called?
*) Line 512: sleep while atomic
*) Line 729: Forgot to unreserve
*) Line 730: Forgot to lock lru
*) Line 735: Now you're restarting with the first item on the LRU list. 
Why the loop at line 715?
*) Line 740: Deadlocking reserve
*) Line 757: Calling TTM Bo evict, but there might have been another 
process already evicting the buffer while
you released the lru_lock in line 739, before reserving the buffer.

And this is even before it starts to get interesting, like how you 
guarantee that when you release a buffer from
the delayed delete list, you're the only process having a reference?

Now, it's probably possible to achieve what you're trying to do, if we 
accept the lock reversal in
[1], but since I have newborn twins and I have about one hour of spare 
time a week with I now spent on this
review and I guess there are countless more hours before this can work. 
(These code paths were never tested, right?)
One of the biggest TTM reworks was to introduce the atomicity assumption 
and remove a lot of code that was
prone to deadlocks, races and buffer leaks. I'm not prepared to revert 
that work without an extremely
good reason, and "It can be done" is not such a reason.

We *need* to carefully weigh it against any benefits you have in your 
work, and you need to test these codepaths
in parallell cases subject to heavy aperture / vram thrashing and 
frequent signals causing interrupted waits.

And I think you need to present the gains in your work that can motivate 
the testing-and review time for this.

Thanks,
Thomas
Thomas Hellstrom Sept. 28, 2012, 8:10 p.m. UTC | #4
On 09/28/2012 09:42 PM, Thomas Hellstrom wrote:
> On 09/28/2012 04:14 PM, Maarten Lankhorst wrote:
>> Hey,
>>
>> Op 28-09-12 14:41, Maarten Lankhorst schreef:
>>> Documentation says that code requiring dma-buf should add it to
>>> select, so inline fallbacks are not going to be used. A link error
>>> will make it obvious what went wrong, instead of silently doing
>>> nothing at runtime.
>>>
>>
>>
>> The whole patch series is in my tree, I use stg so things might
>> move around, do not use for merging currently:
>>
>> http://cgit.freedesktop.org/~mlankhorst/linux/log/?h=v10-wip
>>
>> It contains everything in here plus the patches for ttm to make
>> it work, I use a old snapshot of drm-next + merge of nouveau as
>> base. Description of what the parts do:
>>
>> Series to fix small api issues when moving over:
>>
>> drm/ttm: Remove cpu_writers related code
>> drm/ttm: Add ttm_bo_is_reserved function
>> drm/radeon: Use ttm_bo_is_reserved
>> drm/vmwgfx: use ttm_bo_is_reserved
>>
>> drm/vmwgfx: remove use of fence_obj_args
>> drm/ttm: remove sync_obj_arg
>> drm/ttm: remove sync_obj_arg from ttm_bo_move_accel_cleanup
>> drm/ttm: remove sync_arg entirely
>>
>> drm/nouveau: unpin buffers before releasing to prevent lockdep warnings
>> drm/nouveau: add reservation to nouveau_bo_vma_del
>> drm/nouveau: add reservation to nouveau_gem_ioctl_cpu_prep
>>
>> Hey great, now we only have one user left for fence waiting before 
>> reserving,
>> lets fix that and remove fence lock:
>> ttm_bo_cleanup_refs_or_queue and ttm_bo_cleanup_refs have to reserve 
>> before
>> waiting, lets do it in the squash commit so we don't have to throw 
>> lock order
>> around everywhere:
>>
>> drm/ttm: remove fence_lock
>>
>> -- Up to this point should be mergeable now
>>
>> Then we start working on lru_lock removal slightly, this means the lru
>> list no longer is empty but can contain only reserved buffers:
>>
>> drm/ttm: do not check if list is empty in ttm_bo_force_list_clean
>> drm/ttm: move reservations for ttm_bo_cleanup_refs
>>
>> -- Still mergeable up to this point, just fixes
>>
>> Patch series from this email:
>> dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER
>> fence: dma-buf cross-device synchronization (v9)
>> seqno-fence: Hardware dma-buf implementation of fencing (v3)
>> reservation: cross-device reservation support
>> reservation: Add lockdep annotation and selftests
>>
>> Now hook it up to drm/ttm in a few steps:
>> usage around reservations:
>> drm/ttm: make ttm reservation calls behave like reservation calls
>> drm/ttm: use dma_reservation api
>> dma-buf: use reservations
>> drm/ttm: allow drivers to pass custom dma_reservation_objects for a bo
>>
>> then kill off the lru lock around reservation:
>> drm/ttm: remove lru_lock around ttm_bo_reserve
>> drm/ttm: simplify ttm_eu_*
>>
>> The lru_lock removal patch removes the lock around lru_lock around the
>> reservation, this will break the assumption that items on the lru list
>> and swap list can always be reserved, and this gets patched up too.
>> Is there any part in ttm you disagree with? I believe that this
>> is all mergeable, the lru_lock removal patch could be moved to before
>> the reservation parts, this might make merging easier, but I don't
>> think there is any ttm part of the series that are wrong on a conceptual
>> level.
>>
>> ~Maarten
>>
> ....From another email
>
>>> As previously discussed, I'm unfortunately not prepared to accept 
>>> removal of the reserve-lru atomicity
>>>   into the TTM code at this point.
>>> The current code is based on this assumption and removing it will 
>>> end up with
>>> efficiencies, breaking the delayed delete code and probably a 
>>> locking nightmare when trying to write
>>> new TTM code.
>> The lru lock removal patch fixed the delayed delete code, it really 
>> is not different from the current
>> situation. In fact it is more clear without the guarantee what 
>> various parts are trying to protect.
>>
>> Nothing prevents you from holding the lru_lock while trylocking,
> [1]
> While this would not cause any deadlocks, Any decent lockdep code 
> would establish lru->reserve as the locking
> order once a lru- reserve trylock succeeds, but the locking order is 
> really reserve->lru for obvious reasons, which
> means we will get a lot of lockdep errors? Yes, there are a two 
> reversals like these already in the TTM code, and I'm
> not very proud of them.
>
> leaving that guarantee intact for that part. Can you really just review
> the patch and tell me where it breaks and/or makes the code unreadable?
>
> OK. Now I'm looking at
> http://cgit.freedesktop.org/~mlankhorst/linux/tree/drivers/gpu/drm/ttm/ttm_bo.c?h=v10-wip&id=1436e2e64697c744d6e186618448e1e6354846bb 
>
>
> And let's start with a function that's seen some change, 
> ttm_mem_evict_first:
>
> *) Line 715: You're traversing a list using list_for_each() calling a 
> function that may remove the list entr||||y
> *) Line 722: You're unlocking the lock protecting the list in the 
> middle of list traversal
> *) Line 507: WARN_ON_ONCE in a code path quite likely to get called?
> *) Line 512: sleep while atomic
> *) Line 729: Forgot to unreserve
> *) Line 730: Forgot to lock lru
> *) Line 735: Now you're restarting with the first item on the LRU 
> list. Why the loop at line 715?
> *) Line 740: Deadlocking reserve
> *) Line 757: Calling TTM Bo evict, but there might have been another 
> process already evicting the buffer while
> you released the lru_lock in line 739, before reserving the buffer.

Actually, Lines 715, 722, 730 and 735 are OK, sorry about that, but the 
others should be valid.
/Thomas
Maarten Lankhorst Sept. 29, 2012, 3:16 p.m. UTC | #5
Op 28-09-12 22:10, Thomas Hellstrom schreef:
> On 09/28/2012 09:42 PM, Thomas Hellstrom wrote:
>> On 09/28/2012 04:14 PM, Maarten Lankhorst wrote:
>>> Hey,
>>>
>>> Op 28-09-12 14:41, Maarten Lankhorst schreef:
>>>> Documentation says that code requiring dma-buf should add it to
>>>> select, so inline fallbacks are not going to be used. A link error
>>>> will make it obvious what went wrong, instead of silently doing
>>>> nothing at runtime.
>>>>
>>>
>>>
>>> The whole patch series is in my tree, I use stg so things might
>>> move around, do not use for merging currently:
>>>
>>> http://cgit.freedesktop.org/~mlankhorst/linux/log/?h=v10-wip
>>>
>>> It contains everything in here plus the patches for ttm to make
>>> it work, I use a old snapshot of drm-next + merge of nouveau as
>>> base. Description of what the parts do:
>>>
>>> Series to fix small api issues when moving over:
>>>
>>> drm/ttm: Remove cpu_writers related code
>>> drm/ttm: Add ttm_bo_is_reserved function
>>> drm/radeon: Use ttm_bo_is_reserved
>>> drm/vmwgfx: use ttm_bo_is_reserved
>>>
>>> drm/vmwgfx: remove use of fence_obj_args
>>> drm/ttm: remove sync_obj_arg
>>> drm/ttm: remove sync_obj_arg from ttm_bo_move_accel_cleanup
>>> drm/ttm: remove sync_arg entirely
>>>
>>> drm/nouveau: unpin buffers before releasing to prevent lockdep warnings
>>> drm/nouveau: add reservation to nouveau_bo_vma_del
>>> drm/nouveau: add reservation to nouveau_gem_ioctl_cpu_prep
>>>
>>> Hey great, now we only have one user left for fence waiting before reserving,
>>> lets fix that and remove fence lock:
>>> ttm_bo_cleanup_refs_or_queue and ttm_bo_cleanup_refs have to reserve before
>>> waiting, lets do it in the squash commit so we don't have to throw lock order
>>> around everywhere:
>>>
>>> drm/ttm: remove fence_lock
>>>
>>> -- Up to this point should be mergeable now
>>>
>>> Then we start working on lru_lock removal slightly, this means the lru
>>> list no longer is empty but can contain only reserved buffers:
>>>
>>> drm/ttm: do not check if list is empty in ttm_bo_force_list_clean
>>> drm/ttm: move reservations for ttm_bo_cleanup_refs
>>>
>>> -- Still mergeable up to this point, just fixes
>>>
>>> Patch series from this email:
>>> dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER
>>> fence: dma-buf cross-device synchronization (v9)
>>> seqno-fence: Hardware dma-buf implementation of fencing (v3)
>>> reservation: cross-device reservation support
>>> reservation: Add lockdep annotation and selftests
>>>
>>> Now hook it up to drm/ttm in a few steps:
>>> usage around reservations:
>>> drm/ttm: make ttm reservation calls behave like reservation calls
>>> drm/ttm: use dma_reservation api
>>> dma-buf: use reservations
>>> drm/ttm: allow drivers to pass custom dma_reservation_objects for a bo
>>>
>>> then kill off the lru lock around reservation:
>>> drm/ttm: remove lru_lock around ttm_bo_reserve
>>> drm/ttm: simplify ttm_eu_*
>>>
>>> The lru_lock removal patch removes the lock around lru_lock around the
>>> reservation, this will break the assumption that items on the lru list
>>> and swap list can always be reserved, and this gets patched up too.
>>> Is there any part in ttm you disagree with? I believe that this
>>> is all mergeable, the lru_lock removal patch could be moved to before
>>> the reservation parts, this might make merging easier, but I don't
>>> think there is any ttm part of the series that are wrong on a conceptual
>>> level.
>>>
>>> ~Maarten
>>>
>> ....From another email
>>
>>>> As previously discussed, I'm unfortunately not prepared to accept removal of the reserve-lru atomicity
>>>>   into the TTM code at this point.
>>>> The current code is based on this assumption and removing it will end up with
>>>> efficiencies, breaking the delayed delete code and probably a locking nightmare when trying to write
>>>> new TTM code.
>>> The lru lock removal patch fixed the delayed delete code, it really is not different from the current
>>> situation. In fact it is more clear without the guarantee what various parts are trying to protect.
>>>
>>> Nothing prevents you from holding the lru_lock while trylocking,
>> [1]
>> While this would not cause any deadlocks, Any decent lockdep code would establish lru->reserve as the locking
>> order once a lru- reserve trylock succeeds, but the locking order is really reserve->lru for obvious reasons, which
>> means we will get a lot of lockdep errors? Yes, there are a two reversals like these already in the TTM code, and I'm
>> not very proud of them.
>>
>> leaving that guarantee intact for that part. Can you really just review
>> the patch and tell me where it breaks and/or makes the code unreadable?
>>
>> OK. Now I'm looking at
>> http://cgit.freedesktop.org/~mlankhorst/linux/tree/drivers/gpu/drm/ttm/ttm_bo.c?h=v10-wip&id=1436e2e64697c744d6e186618448e1e6354846bb
>>
>> And let's start with a function that's seen some change, ttm_mem_evict_first:
>>
>> *) Line 715: You're traversing a list using list_for_each() calling a function that may remove the list entr||||y
>> *) Line 722: You're unlocking the lock protecting the list in the middle of list traversal

>> *) Line 507: WARN_ON_ONCE in a code path quite likely to get called?
When will it get called?
ttm_bo_delayed_delete calls it if it's already on delayed destroy list.
ttm_mem_evict_first only calls it if on that list too.
ttm_bo_swapout won't call it either if not on the list.

>> *) Line 512: sleep while atomic
Oops. Order is wrong, ttm_bo_wait has to be called first before anything else. Thanks for catching it.

>> *) Line 729: Forgot to unreserve
Thanks for catching it.
>> *) Line 740: Deadlocking reserve
Agreed, I'll remove it and just give up if no buffer could be reserved from the lru list.
I was just confused since that function passed on no_wait_reserve which doesn't
make sense there.

>> *) Line 757: Calling TTM Bo evict, but there might have been another process already evicting the buffer while
>> you released the lru_lock in line 739, before reserving the buffer.
Yeah I think this part of the code needs to be taken out, which would
be more similar to the old behavior since before nothing on the lru list
was going to be contended anyway. Removing the offending code and
just returning -EBUSY regardless should be the correct fix, maintaining
the current behavior.

I'll remove the no_wait_reserve argument too, since it was unused before
and would just be confusing to keep.

> Actually, Lines 715, 722, 730 and 735 are OK, sorry about that, but the others should be valid.
> /Thomas
Is there any point in keeping ttm_bo_move_ttm's no_wait_reserve and no_wait_gpu argument?
Nothing seems to use it and it would allow removal of 1 or more of those parameters for a whole
lot of functions:

ttm_bo_move*, ttm_bo_handle_move_mem, ttm_bo_evict (though keeping no_wait_gpu),
ttm_mem_evict_first, ttm_bo_mem_force_space, ttm_bo_mem_space, ttm_bo_move_buffer,
ttm_bo_validate.

This would be a separate patch prerequisite patch though, but it should make it more
clear that there's no blocking on reservation in eviction.

~Maarten
Thomas Hellstrom Oct. 1, 2012, 8:49 a.m. UTC | #6
On 09/29/2012 05:16 PM, Maarten Lankhorst wrote:
> Op 28-09-12 22:10, Thomas Hellstrom schreef:
>> On 09/28/2012 09:42 PM, Thomas Hellstrom wrote:
>>> On 09/28/2012 04:14 PM, Maarten Lankhorst wrote:
>>>> Hey,
>>>>
>>>> Op 28-09-12 14:41, Maarten Lankhorst schreef:
>>>>> Documentation says that code requiring dma-buf should add it to
>>>>> select, so inline fallbacks are not going to be used. A link error
>>>>> will make it obvious what went wrong, instead of silently doing
>>>>> nothing at runtime.
>>>>>
>>>>
>>>> The whole patch series is in my tree, I use stg so things might
>>>> move around, do not use for merging currently:
>>>>
>>>> http://cgit.freedesktop.org/~mlankhorst/linux/log/?h=v10-wip
>>>>
>>>> It contains everything in here plus the patches for ttm to make
>>>> it work, I use a old snapshot of drm-next + merge of nouveau as
>>>> base. Description of what the parts do:
>>>>
>>>> Series to fix small api issues when moving over:
>>>>
>>>> drm/ttm: Remove cpu_writers related code
>>>> drm/ttm: Add ttm_bo_is_reserved function
>>>> drm/radeon: Use ttm_bo_is_reserved
>>>> drm/vmwgfx: use ttm_bo_is_reserved
>>>>
>>>> drm/vmwgfx: remove use of fence_obj_args
>>>> drm/ttm: remove sync_obj_arg
>>>> drm/ttm: remove sync_obj_arg from ttm_bo_move_accel_cleanup
>>>> drm/ttm: remove sync_arg entirely
>>>>
>>>> drm/nouveau: unpin buffers before releasing to prevent lockdep warnings
>>>> drm/nouveau: add reservation to nouveau_bo_vma_del
>>>> drm/nouveau: add reservation to nouveau_gem_ioctl_cpu_prep
>>>>
>>>> Hey great, now we only have one user left for fence waiting before reserving,
>>>> lets fix that and remove fence lock:
>>>> ttm_bo_cleanup_refs_or_queue and ttm_bo_cleanup_refs have to reserve before
>>>> waiting, lets do it in the squash commit so we don't have to throw lock order
>>>> around everywhere:
>>>>
>>>> drm/ttm: remove fence_lock
>>>>
>>>> -- Up to this point should be mergeable now
>>>>
>>>> Then we start working on lru_lock removal slightly, this means the lru
>>>> list no longer is empty but can contain only reserved buffers:
>>>>
>>>> drm/ttm: do not check if list is empty in ttm_bo_force_list_clean
>>>> drm/ttm: move reservations for ttm_bo_cleanup_refs
>>>>
>>>> -- Still mergeable up to this point, just fixes
>>>>
>>>> Patch series from this email:
>>>> dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER
>>>> fence: dma-buf cross-device synchronization (v9)
>>>> seqno-fence: Hardware dma-buf implementation of fencing (v3)
>>>> reservation: cross-device reservation support
>>>> reservation: Add lockdep annotation and selftests
>>>>
>>>> Now hook it up to drm/ttm in a few steps:
>>>> usage around reservations:
>>>> drm/ttm: make ttm reservation calls behave like reservation calls
>>>> drm/ttm: use dma_reservation api
>>>> dma-buf: use reservations
>>>> drm/ttm: allow drivers to pass custom dma_reservation_objects for a bo
>>>>
>>>> then kill off the lru lock around reservation:
>>>> drm/ttm: remove lru_lock around ttm_bo_reserve
>>>> drm/ttm: simplify ttm_eu_*
>>>>
>>>> The lru_lock removal patch removes the lock around lru_lock around the
>>>> reservation, this will break the assumption that items on the lru list
>>>> and swap list can always be reserved, and this gets patched up too.
>>>> Is there any part in ttm you disagree with? I believe that this
>>>> is all mergeable, the lru_lock removal patch could be moved to before
>>>> the reservation parts, this might make merging easier, but I don't
>>>> think there is any ttm part of the series that are wrong on a conceptual
>>>> level.
>>>>
>>>> ~Maarten
>>>>
>>> ....From another email
>>>
>>>>> As previously discussed, I'm unfortunately not prepared to accept removal of the reserve-lru atomicity
>>>>>    into the TTM code at this point.
>>>>> The current code is based on this assumption and removing it will end up with
>>>>> efficiencies, breaking the delayed delete code and probably a locking nightmare when trying to write
>>>>> new TTM code.
>>>> The lru lock removal patch fixed the delayed delete code, it really is not different from the current
>>>> situation. In fact it is more clear without the guarantee what various parts are trying to protect.
>>>>
>>>> Nothing prevents you from holding the lru_lock while trylocking,
>>> [1]
>>> While this would not cause any deadlocks, Any decent lockdep code would establish lru->reserve as the locking
>>> order once a lru- reserve trylock succeeds, but the locking order is really reserve->lru for obvious reasons, which
>>> means we will get a lot of lockdep errors? Yes, there are a two reversals like these already in the TTM code, and I'm
>>> not very proud of them.
>>>
>>> leaving that guarantee intact for that part. Can you really just review
>>> the patch and tell me where it breaks and/or makes the code unreadable?
>>>
>>> OK. Now I'm looking at
>>> http://cgit.freedesktop.org/~mlankhorst/linux/tree/drivers/gpu/drm/ttm/ttm_bo.c?h=v10-wip&id=1436e2e64697c744d6e186618448e1e6354846bb
>>>
>>> And let's start with a function that's seen some change, ttm_mem_evict_first:
>>>
>>> *) Line 715: You're traversing a list using list_for_each() calling a function that may remove the list entr||||y
>>> *) Line 722: You're unlocking the lock protecting the list in the middle of list traversal
>>> *) Line 507: WARN_ON_ONCE in a code path quite likely to get called?
> When will it get called?
> ttm_bo_delayed_delete calls it if it's already on delayed destroy list.
> ttm_mem_evict_first only calls it if on that list too.
> ttm_bo_swapout won't call it either if not on the list.
>
Two threads calling ttm_bo_delayed_delete at the same time?

Anyway, in the swapout code there is an additional deadlock and no loop 
on reserved buffers

And this is really my point, by removing the atomicity you get into this 
kind of deadlock- and locking mess,
and TTM transits from hard to maintain to unmaintainable.

Furthermore, I've requested four times now that we bring this up on the 
design level, but you consequently
refuse to answer my questions asking me to review the code instead. With 
that kind of argumentation,
I could easily write a patch that changes your reservation objects to 
take a spinlock before reserving and a list
of functions to be called with that block held just after reserving, and 
ask you to review that, Then we could
compare the amount of stable and tested code needing a thorough 
retesting our pathes touch.

So, until we've agreed on a design, This is a firm NAK from my side when 
it comes to the TTM part.

Thanks,
Thomas
Maarten Lankhorst Oct. 1, 2012, 9:47 a.m. UTC | #7
Op 28-09-12 21:42, Thomas Hellstrom schreef:
> On 09/28/2012 04:14 PM, Maarten Lankhorst wrote:
>> Hey,
>>
>> Op 28-09-12 14:41, Maarten Lankhorst schreef:
>>> Documentation says that code requiring dma-buf should add it to
>>> select, so inline fallbacks are not going to be used. A link error
>>> will make it obvious what went wrong, instead of silently doing
>>> nothing at runtime.
>>>
>>   
>>
>> The whole patch series is in my tree, I use stg so things might
>> move around, do not use for merging currently:
>>
>> http://cgit.freedesktop.org/~mlankhorst/linux/log/?h=v10-wip
>>
>> It contains everything in here plus the patches for ttm to make
>> it work, I use a old snapshot of drm-next + merge of nouveau as
>> base. Description of what the parts do:
>>
>> Series to fix small api issues when moving over:
>>
>> drm/ttm: Remove cpu_writers related code
>> drm/ttm: Add ttm_bo_is_reserved function
>> drm/radeon: Use ttm_bo_is_reserved
>> drm/vmwgfx: use ttm_bo_is_reserved
>>
>> drm/vmwgfx: remove use of fence_obj_args
>> drm/ttm: remove sync_obj_arg
>> drm/ttm: remove sync_obj_arg from ttm_bo_move_accel_cleanup
>> drm/ttm: remove sync_arg entirely
>>
>> drm/nouveau: unpin buffers before releasing to prevent lockdep warnings
>> drm/nouveau: add reservation to nouveau_bo_vma_del
>> drm/nouveau: add reservation to nouveau_gem_ioctl_cpu_prep
>>
>> Hey great, now we only have one user left for fence waiting before reserving,
>> lets fix that and remove fence lock:
>> ttm_bo_cleanup_refs_or_queue and ttm_bo_cleanup_refs have to reserve before
>> waiting, lets do it in the squash commit so we don't have to throw lock order
>> around everywhere:
>>
>> drm/ttm: remove fence_lock
>>
>> -- Up to this point should be mergeable now
>>
>> Then we start working on lru_lock removal slightly, this means the lru
>> list no longer is empty but can contain only reserved buffers:
>>
>> drm/ttm: do not check if list is empty in ttm_bo_force_list_clean
>> drm/ttm: move reservations for ttm_bo_cleanup_refs
>>
>> -- Still mergeable up to this point, just fixes
>>
>> Patch series from this email:
>> dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER
>> fence: dma-buf cross-device synchronization (v9)
>> seqno-fence: Hardware dma-buf implementation of fencing (v3)
>> reservation: cross-device reservation support
>> reservation: Add lockdep annotation and selftests
>>
>> Now hook it up to drm/ttm in a few steps:
>> usage around reservations:
>> drm/ttm: make ttm reservation calls behave like reservation calls
>> drm/ttm: use dma_reservation api
>> dma-buf: use reservations
>> drm/ttm: allow drivers to pass custom dma_reservation_objects for a bo
>>
>> then kill off the lru lock around reservation:
>> drm/ttm: remove lru_lock around ttm_bo_reserve
>> drm/ttm: simplify ttm_eu_*
>>
>> The lru_lock removal patch removes the lock around lru_lock around the
>> reservation, this will break the assumption that items on the lru list
>> and swap list can always be reserved, and this gets patched up too.
>> Is there any part in ttm you disagree with? I believe that this
>> is all mergeable, the lru_lock removal patch could be moved to before
>> the reservation parts, this might make merging easier, but I don't
>> think there is any ttm part of the series that are wrong on a conceptual
>> level.
>>
>> ~Maarten
>>
> ....From another email
>
>>> As previously discussed, I'm unfortunately not prepared to accept removal of the reserve-lru atomicity
>>>   into the TTM code at this point.
>>> The current code is based on this assumption and removing it will end up with
>>> efficiencies, breaking the delayed delete code and probably a locking nightmare when trying to write
>>> new TTM code.
>> The lru lock removal patch fixed the delayed delete code, it really is not different from the current
>> situation. In fact it is more clear without the guarantee what various parts are trying to protect.
>>
>> Nothing prevents you from holding the lru_lock while trylocking,
> [1]
> While this would not cause any deadlocks, Any decent lockdep code would establish lru->reserve as the locking
> order once a lru- reserve trylock succeeds, but the locking order is really reserve->lru for obvious reasons, which
> means we will get a lot of lockdep errors? Yes, there are a two reversals like these already in the TTM code, and I'm
> not very proud of them.
I was doing a evil hack where I 'released' lru_lock to lockdep before doing the annotation
for a blocking acquire, and left trylock annotations as they were. This made lockdep do the
right thing.
> And this is even before it starts to get interesting, like how you guarantee that when you release a buffer from
> the delayed delete list, you're the only process having a reference?
l thought list_kref made sure of that? Even if not the only one with a reference, the list_empty check would
make sure it would only run once. I'l fix it up again so it doesn't become a WARN_ON_ONCE, I didn't know
it could run multiple times at the time, so I'll change it back to unlikely.

> Now, it's probably possible to achieve what you're trying to do, if we accept the lock reversal in
> [1], but since I have newborn twins and I have about one hour of spare time a week with I now spent on this
> review and I guess there are countless more hours before this can work. (These code paths were never tested, right?)
> One of the biggest TTM reworks was to introduce the atomicity assumption and remove a lot of code that was
> prone to deadlocks, races and buffer leaks. I'm not prepared to revert that work without an extremely
> good reason, and "It can be done" is not such a reason.
Deepest apologies, I only did a quick glance at the code part of this email, overlooked this part since
I was swamped with other things and meant to do a full reply on monday. I didn't mean to make it sound
like I only cared blindly about merging my code, just wanted to find a good solution.
> We *need* to carefully weigh it against any benefits you have in your work, and you need to test these codepaths
> in parallell cases subject to heavy aperture / vram thrashing and frequent signals causing interrupted waits.
Agreed, is there already a tester for this or should I write my own?
> And I think you need to present the gains in your work that can motivate the testing-and review time for this.
Agreed.

~Maarten
Thomas Hellstrom Oct. 2, 2012, 6:46 a.m. UTC | #8
On 10/01/2012 11:47 AM, Maarten Lankhorst wrote:
> Op 28-09-12 21:42, Thomas Hellstrom schreef:
>> On 09/28/2012 04:14 PM, Maarten Lankhorst wrote:
>>> Hey,
>>>
>>> Op 28-09-12 14:41, Maarten Lankhorst schreef:
>>>> Documentation says that code requiring dma-buf should add it to
>>>> select, so inline fallbacks are not going to be used. A link error
>>>> will make it obvious what went wrong, instead of silently doing
>>>> nothing at runtime.
>>>>
>>>    
>>>
>>> The whole patch series is in my tree, I use stg so things might
>>> move around, do not use for merging currently:
>>>
>>> http://cgit.freedesktop.org/~mlankhorst/linux/log/?h=v10-wip
>>>
>>> It contains everything in here plus the patches for ttm to make
>>> it work, I use a old snapshot of drm-next + merge of nouveau as
>>> base. Description of what the parts do:
>>>
>>> Series to fix small api issues when moving over:
>>>
>>> drm/ttm: Remove cpu_writers related code
>>> drm/ttm: Add ttm_bo_is_reserved function
>>> drm/radeon: Use ttm_bo_is_reserved
>>> drm/vmwgfx: use ttm_bo_is_reserved
>>>
>>> drm/vmwgfx: remove use of fence_obj_args
>>> drm/ttm: remove sync_obj_arg
>>> drm/ttm: remove sync_obj_arg from ttm_bo_move_accel_cleanup
>>> drm/ttm: remove sync_arg entirely
>>>
>>> drm/nouveau: unpin buffers before releasing to prevent lockdep warnings
>>> drm/nouveau: add reservation to nouveau_bo_vma_del
>>> drm/nouveau: add reservation to nouveau_gem_ioctl_cpu_prep
>>>
>>> Hey great, now we only have one user left for fence waiting before reserving,
>>> lets fix that and remove fence lock:
>>> ttm_bo_cleanup_refs_or_queue and ttm_bo_cleanup_refs have to reserve before
>>> waiting, lets do it in the squash commit so we don't have to throw lock order
>>> around everywhere:
>>>
>>> drm/ttm: remove fence_lock
>>>
>>> -- Up to this point should be mergeable now
>>>
>>> Then we start working on lru_lock removal slightly, this means the lru
>>> list no longer is empty but can contain only reserved buffers:
>>>
>>> drm/ttm: do not check if list is empty in ttm_bo_force_list_clean
>>> drm/ttm: move reservations for ttm_bo_cleanup_refs
>>>
>>> -- Still mergeable up to this point, just fixes
>>>
>>> Patch series from this email:
>>> dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER
>>> fence: dma-buf cross-device synchronization (v9)
>>> seqno-fence: Hardware dma-buf implementation of fencing (v3)
>>> reservation: cross-device reservation support
>>> reservation: Add lockdep annotation and selftests
>>>
>>> Now hook it up to drm/ttm in a few steps:
>>> usage around reservations:
>>> drm/ttm: make ttm reservation calls behave like reservation calls
>>> drm/ttm: use dma_reservation api
>>> dma-buf: use reservations
>>> drm/ttm: allow drivers to pass custom dma_reservation_objects for a bo
>>>
>>> then kill off the lru lock around reservation:
>>> drm/ttm: remove lru_lock around ttm_bo_reserve
>>> drm/ttm: simplify ttm_eu_*
>>>
>>> The lru_lock removal patch removes the lock around lru_lock around the
>>> reservation, this will break the assumption that items on the lru list
>>> and swap list can always be reserved, and this gets patched up too.
>>> Is there any part in ttm you disagree with? I believe that this
>>> is all mergeable, the lru_lock removal patch could be moved to before
>>> the reservation parts, this might make merging easier, but I don't
>>> think there is any ttm part of the series that are wrong on a conceptual
>>> level.
>>>
>>> ~Maarten
>>>
>> ....From another email
>>
>>>> As previously discussed, I'm unfortunately not prepared to accept removal of the reserve-lru atomicity
>>>>    into the TTM code at this point.
>>>> The current code is based on this assumption and removing it will end up with
>>>> efficiencies, breaking the delayed delete code and probably a locking nightmare when trying to write
>>>> new TTM code.
>>> The lru lock removal patch fixed the delayed delete code, it really is not different from the current
>>> situation. In fact it is more clear without the guarantee what various parts are trying to protect.
>>>
>>> Nothing prevents you from holding the lru_lock while trylocking,
>> [1]
>> While this would not cause any deadlocks, Any decent lockdep code would establish lru->reserve as the locking
>> order once a lru- reserve trylock succeeds, but the locking order is really reserve->lru for obvious reasons, which
>> means we will get a lot of lockdep errors? Yes, there are a two reversals like these already in the TTM code, and I'm
>> not very proud of them.
> I was doing a evil hack where I 'released' lru_lock to lockdep before doing the annotation
> for a blocking acquire, and left trylock annotations as they were. This made lockdep do the
> right thing.
I've never looked into how lockdep works. Is this something that can be 
done permanently or just for testing
purposes? Although not related to this, is it possible to do something 
similar to the trylock reversal in the
fault() code where mmap_sem() and reserve() change order using a reserve 
trylock?

>> And this is even before it starts to get interesting, like how you guarantee that when you release a buffer from
>> the delayed delete list, you're the only process having a reference?
> l thought list_kref made sure of that? Even if not the only one with a reference, the list_empty check would
> make sure it would only run once. I'l fix it up again so it doesn't become a WARN_ON_ONCE, I didn't know
> it could run multiple times at the time, so I'll change it back to unlikely.
Yes, you've probably right. A case we've seen earlier (before the 
atomicity was introduced) was one or more threads
picked up a bo from the LRU list and prepared to reserve it, while the 
delayed delete function removed them from the
ddestroy list. Then the first thread queued an accelerated eviction, 
adding a new fence and the bo was left hanging.
I don't think that can happen with the reserve trylocks within the lru 
spinlock, though.

>
>> Now, it's probably possible to achieve what you're trying to do, if we accept the lock reversal in
>> [1], but since I have newborn twins and I have about one hour of spare time a week with I now spent on this
>> review and I guess there are countless more hours before this can work. (These code paths were never tested, right?)
>> One of the biggest TTM reworks was to introduce the atomicity assumption and remove a lot of code that was
>> prone to deadlocks, races and buffer leaks. I'm not prepared to revert that work without an extremely
>> good reason, and "It can be done" is not such a reason.
> Deepest apologies, I only did a quick glance at the code part of this email, overlooked this part since
> I was swamped with other things and meant to do a full reply on monday. I didn't mean to make it sound
> like I only cared blindly about merging my code, just wanted to find a good solution.
>> We *need* to carefully weigh it against any benefits you have in your work, and you need to test these codepaths
>> in parallell cases subject to heavy aperture / vram thrashing and frequent signals causing interrupted waits.
> Agreed, is there already a tester for this or should I write my own?
Although I think it would be nice to have a highly parallel execbuf 
implementation on an extremely simple software GPU,
what I typically do is to take an existing driver (none of them 
implements parallel reserve yet, but vmware is about to soon)

a) Use an application that frequently recycles buffers, so that the 
delayed-delete code gets busy (Perhaps google-earth, panning over a 
landscape not too high above the earth)
b) Hack the drivers aperture / vram sizes to something small, so that 
you can see that the eviction code gets hit.
c) Adjust the memory limits in TTM sysfs memory accounting (You can 
write and change on the fly), so that you can see that the swapping
code gets hit.
d) Code a signal delivery so that every 20th time or so the eviction 
code is about to wait, it receives an -ERESTARTSYS with a harmless signal.
e) start another instance of google-earth.


/Thomas

>> And I think you need to present the gains in your work that can motivate the testing-and review time for this.
> Agreed.
>
> ~Maarten
Daniel Vetter Oct. 2, 2012, 8:03 a.m. UTC | #9
On Tue, Oct 02, 2012 at 08:46:32AM +0200, Thomas Hellstrom wrote:
> On 10/01/2012 11:47 AM, Maarten Lankhorst wrote:
> >I was doing a evil hack where I 'released' lru_lock to lockdep before doing the annotation
> >for a blocking acquire, and left trylock annotations as they were. This made lockdep do the
> >right thing.
> I've never looked into how lockdep works. Is this something that can
> be done permanently or just for testing
> purposes? Although not related to this, is it possible to do
> something similar to the trylock reversal in the
> fault() code where mmap_sem() and reserve() change order using a
> reserve trylock?

lockdep just requires a bunch of annotations, is a compile-time configure
option CONFIG_PROVE_LOCKING and if disabled, has zero overhead. And it's
rather awesome in detected deadlocks and handling crazy locking schemes
correctly:
- correctly handles trylocks
- correctly handles nested locking (i.e. grabbing a global lock, then
  grabbing subordinate locks in an unordered sequence since the global
  lock ensures that no deadlocks can happen).
- any kinds of inversions with special contexts like hardirq, softirq
- same for page-reclaim, i.e. it will yell if you could (potentially)
  deadlock because your shrinker grabs a lock that you hold while calling
  kmalloc.
- there are special annotates for various subsystems, e.g. to check for
  del_timer_sync vs. locks held by that timer. Or the console_lock
  annotations I've just recently submitted.
- all that with a really flexible set of annotation primitives that afaics
  should work for almost any insane locking scheme. The fact that Maarten
  could come up with proper reservation annotations without any changes to
  lockdep testifies this (he only had to fix a tiny thing to make it a bit
  more strict in a corner case).

In short I think it's made of awesome. The only downside is that it lacks
documentation, you have to read the code to understand it :(

The reason I've suggested to Maarten to abolish the trylock_reservation
within the lru_lock is that in that way lockdep only ever sees the
trylock, and hence is less strict about complainig about deadlocks. But
semantically it's an unconditional reserve. Maarten had some horrible
hacks that leaked the lockdep annotations out of the new reservation code,
which allowed ttm to be properly annotated.  But those also reduced the
usefulness for any other users of the reservation code, and so Maarten
looked into whether he could remove that trylock dance in ttm.

Imo having excellent lockdep support for cross-device reservations is a
requirment, and ending up with less strict annotations for either ttm
based drivers or other drivers is not good. And imo the ugly layering that
Maarten had in his first proof-of-concept also indicates that something is
amiss in the design.

[I'll refrain from comment on ttm specifics to not make a fool of me ;-)]

> >>And this is even before it starts to get interesting, like how you guarantee that when you release a buffer from
> >>the delayed delete list, you're the only process having a reference?
> >l thought list_kref made sure of that? Even if not the only one with a reference, the list_empty check would
> >make sure it would only run once. I'l fix it up again so it doesn't become a WARN_ON_ONCE, I didn't know
> >it could run multiple times at the time, so I'll change it back to unlikely.
> Yes, you've probably right. A case we've seen earlier (before the
> atomicity was introduced) was one or more threads
> picked up a bo from the LRU list and prepared to reserve it, while
> the delayed delete function removed them from the
> ddestroy list. Then the first thread queued an accelerated eviction,
> adding a new fence and the bo was left hanging.
> I don't think that can happen with the reserve trylocks within the
> lru spinlock, though.
> 
> >
> >>Now, it's probably possible to achieve what you're trying to do, if we accept the lock reversal in
> >>[1], but since I have newborn twins and I have about one hour of spare time a week with I now spent on this
> >>review and I guess there are countless more hours before this can work. (These code paths were never tested, right?)
> >>One of the biggest TTM reworks was to introduce the atomicity assumption and remove a lot of code that was
> >>prone to deadlocks, races and buffer leaks. I'm not prepared to revert that work without an extremely
> >>good reason, and "It can be done" is not such a reason.
> >Deepest apologies, I only did a quick glance at the code part of this email, overlooked this part since
> >I was swamped with other things and meant to do a full reply on monday. I didn't mean to make it sound
> >like I only cared blindly about merging my code, just wanted to find a good solution.
> >>We *need* to carefully weigh it against any benefits you have in your work, and you need to test these codepaths
> >>in parallell cases subject to heavy aperture / vram thrashing and frequent signals causing interrupted waits.
> >Agreed, is there already a tester for this or should I write my own?
> Although I think it would be nice to have a highly parallel execbuf
> implementation on an extremely simple software GPU,
> what I typically do is to take an existing driver (none of them
> implements parallel reserve yet, but vmware is about to soon)
> 
> a) Use an application that frequently recycles buffers, so that the
> delayed-delete code gets busy (Perhaps google-earth, panning over a
> landscape not too high above the earth)
> b) Hack the drivers aperture / vram sizes to something small, so
> that you can see that the eviction code gets hit.
> c) Adjust the memory limits in TTM sysfs memory accounting (You can
> write and change on the fly), so that you can see that the swapping
> code gets hit.
> d) Code a signal delivery so that every 20th time or so the eviction
> code is about to wait, it receives an -ERESTARTSYS with a harmless
> signal.
> e) start another instance of google-earth.

tbh, this should be a simple testsuite that you can just run. Like we're
(slowly) building up for drm/i915 in intel-gpu-tools. At least that'll be
one of the merge requirements for i915.ko.

Cheers, Daniel
Thomas Hellstrom Oct. 3, 2012, 7:45 a.m. UTC | #10
On 10/02/2012 10:03 AM, Daniel Vetter wrote:
> On Tue, Oct 02, 2012 at 08:46:32AM +0200, Thomas Hellstrom wrote:
>> On 10/01/2012 11:47 AM, Maarten Lankhorst wrote:
>>> I was doing a evil hack where I 'released' lru_lock to lockdep before doing the annotation
>>> for a blocking acquire, and left trylock annotations as they were. This made lockdep do the
>>> right thing.
>> I've never looked into how lockdep works. Is this something that can
>> be done permanently or just for testing
>> purposes? Although not related to this, is it possible to do
>> something similar to the trylock reversal in the
>> fault() code where mmap_sem() and reserve() change order using a
>> reserve trylock?
> lockdep just requires a bunch of annotations, is a compile-time configure
> option CONFIG_PROVE_LOCKING and if disabled, has zero overhead. And it's
> rather awesome in detected deadlocks and handling crazy locking schemes
> correctly:
> - correctly handles trylocks
> - correctly handles nested locking (i.e. grabbing a global lock, then
>    grabbing subordinate locks in an unordered sequence since the global
>    lock ensures that no deadlocks can happen).
> - any kinds of inversions with special contexts like hardirq, softirq
> - same for page-reclaim, i.e. it will yell if you could (potentially)
>    deadlock because your shrinker grabs a lock that you hold while calling
>    kmalloc.
> - there are special annotates for various subsystems, e.g. to check for
>    del_timer_sync vs. locks held by that timer. Or the console_lock
>    annotations I've just recently submitted.
> - all that with a really flexible set of annotation primitives that afaics
>    should work for almost any insane locking scheme. The fact that Maarten
>    could come up with proper reservation annotations without any changes to
>    lockdep testifies this (he only had to fix a tiny thing to make it a bit
>    more strict in a corner case).
>
> In short I think it's made of awesome. The only downside is that it lacks
> documentation, you have to read the code to understand it :(
>
> The reason I've suggested to Maarten to abolish the trylock_reservation
> within the lru_lock is that in that way lockdep only ever sees the
> trylock, and hence is less strict about complainig about deadlocks. But
> semantically it's an unconditional reserve. Maarten had some horrible
> hacks that leaked the lockdep annotations out of the new reservation code,
> which allowed ttm to be properly annotated.  But those also reduced the
> usefulness for any other users of the reservation code, and so Maarten
> looked into whether he could remove that trylock dance in ttm.
>
> Imo having excellent lockdep support for cross-device reservations is a
> requirment, and ending up with less strict annotations for either ttm
> based drivers or other drivers is not good. And imo the ugly layering that
> Maarten had in his first proof-of-concept also indicates that something is
> amiss in the design.
>
>
So if I understand you correctly, the reservation changes in TTM are 
motivated by the
fact that otherwise, in the generic reservation code, lockdep can only be
annotated for a trylock and not a waiting lock, when it *is* in fact a 
waiting lock.

I'm completely unfamiliar with setting up lockdep annotations, but the 
only place a
deadlock might occur is if the trylock fails and we do a 
wait_for_unreserve().
Isn't it possible to annotate the call to wait_for_unreserve() just like 
an interruptible waiting lock
(that is always interrupted, but at least any deadlock will be catched?).

/Thomas
Daniel Vetter Oct. 3, 2012, 7:54 a.m. UTC | #11
On Wed, Oct 3, 2012 at 9:45 AM, Thomas Hellstrom <thellstrom@vmware.com> wrote:
> On 10/02/2012 10:03 AM, Daniel Vetter wrote:
>>
>> On Tue, Oct 02, 2012 at 08:46:32AM +0200, Thomas Hellstrom wrote:
>>>
>>> On 10/01/2012 11:47 AM, Maarten Lankhorst wrote:
>>>>
>>>> I was doing a evil hack where I 'released' lru_lock to lockdep before
>>>> doing the annotation
>>>> for a blocking acquire, and left trylock annotations as they were. This
>>>> made lockdep do the
>>>> right thing.
>>>
>>> I've never looked into how lockdep works. Is this something that can
>>> be done permanently or just for testing
>>> purposes? Although not related to this, is it possible to do
>>> something similar to the trylock reversal in the
>>> fault() code where mmap_sem() and reserve() change order using a
>>> reserve trylock?
>>
>> lockdep just requires a bunch of annotations, is a compile-time configure
>> option CONFIG_PROVE_LOCKING and if disabled, has zero overhead. And it's
>> rather awesome in detected deadlocks and handling crazy locking schemes
>> correctly:
>> - correctly handles trylocks
>> - correctly handles nested locking (i.e. grabbing a global lock, then
>>    grabbing subordinate locks in an unordered sequence since the global
>>    lock ensures that no deadlocks can happen).
>> - any kinds of inversions with special contexts like hardirq, softirq
>> - same for page-reclaim, i.e. it will yell if you could (potentially)
>>    deadlock because your shrinker grabs a lock that you hold while calling
>>    kmalloc.
>> - there are special annotates for various subsystems, e.g. to check for
>>    del_timer_sync vs. locks held by that timer. Or the console_lock
>>    annotations I've just recently submitted.
>> - all that with a really flexible set of annotation primitives that afaics
>>    should work for almost any insane locking scheme. The fact that Maarten
>>    could come up with proper reservation annotations without any changes
>> to
>>    lockdep testifies this (he only had to fix a tiny thing to make it a
>> bit
>>    more strict in a corner case).
>>
>> In short I think it's made of awesome. The only downside is that it lacks
>> documentation, you have to read the code to understand it :(
>>
>> The reason I've suggested to Maarten to abolish the trylock_reservation
>> within the lru_lock is that in that way lockdep only ever sees the
>> trylock, and hence is less strict about complainig about deadlocks. But
>> semantically it's an unconditional reserve. Maarten had some horrible
>> hacks that leaked the lockdep annotations out of the new reservation code,
>> which allowed ttm to be properly annotated.  But those also reduced the
>> usefulness for any other users of the reservation code, and so Maarten
>> looked into whether he could remove that trylock dance in ttm.
>>
>> Imo having excellent lockdep support for cross-device reservations is a
>> requirment, and ending up with less strict annotations for either ttm
>> based drivers or other drivers is not good. And imo the ugly layering that
>> Maarten had in his first proof-of-concept also indicates that something is
>> amiss in the design.
>>
>>
> So if I understand you correctly, the reservation changes in TTM are
> motivated by the
> fact that otherwise, in the generic reservation code, lockdep can only be
> annotated for a trylock and not a waiting lock, when it *is* in fact a
> waiting lock.
>
> I'm completely unfamiliar with setting up lockdep annotations, but the only
> place a
> deadlock might occur is if the trylock fails and we do a
> wait_for_unreserve().
> Isn't it possible to annotate the call to wait_for_unreserve() just like an
> interruptible waiting lock
> (that is always interrupted, but at least any deadlock will be catched?).

Hm, I have to admit that idea hasn't crossed my mind, but it's indeed
a hole in our current reservation lockdep annotations - since we're
blocking for the unreserve, other threads could potential block
waiting on us to release a lock we're holding already, resulting in a
deadlock.

Since no other locking primitive that I know of has this
wait_for_unlocked interface, I don't know how we could map this in
lockdep. One idea is to grab the lock and release it again immediately
(only in the annotations, not the real lock ofc). But I need to check
the lockdep code to see whether that doesn't trip it up.

Cheers, Daniel
Maarten Lankhorst Oct. 3, 2012, 7:57 a.m. UTC | #12
Hey,

Op 03-10-12 09:45, Thomas Hellstrom schreef:
> On 10/02/2012 10:03 AM, Daniel Vetter wrote:
>> On Tue, Oct 02, 2012 at 08:46:32AM +0200, Thomas Hellstrom wrote:
>>> On 10/01/2012 11:47 AM, Maarten Lankhorst wrote:
>>>> I was doing a evil hack where I 'released' lru_lock to lockdep before doing the annotation
>>>> for a blocking acquire, and left trylock annotations as they were. This made lockdep do the
>>>> right thing.
>>> I've never looked into how lockdep works. Is this something that can
>>> be done permanently or just for testing
>>> purposes? Although not related to this, is it possible to do
>>> something similar to the trylock reversal in the
>>> fault() code where mmap_sem() and reserve() change order using a
>>> reserve trylock?
>> lockdep just requires a bunch of annotations, is a compile-time configure
>> option CONFIG_PROVE_LOCKING and if disabled, has zero overhead. And it's
>> rather awesome in detected deadlocks and handling crazy locking schemes
>> correctly:
>> - correctly handles trylocks
>> - correctly handles nested locking (i.e. grabbing a global lock, then
>>    grabbing subordinate locks in an unordered sequence since the global
>>    lock ensures that no deadlocks can happen).
>> - any kinds of inversions with special contexts like hardirq, softirq
>> - same for page-reclaim, i.e. it will yell if you could (potentially)
>>    deadlock because your shrinker grabs a lock that you hold while calling
>>    kmalloc.
>> - there are special annotates for various subsystems, e.g. to check for
>>    del_timer_sync vs. locks held by that timer. Or the console_lock
>>    annotations I've just recently submitted.
>> - all that with a really flexible set of annotation primitives that afaics
>>    should work for almost any insane locking scheme. The fact that Maarten
>>    could come up with proper reservation annotations without any changes to
>>    lockdep testifies this (he only had to fix a tiny thing to make it a bit
>>    more strict in a corner case).
>>
>> In short I think it's made of awesome. The only downside is that it lacks
>> documentation, you have to read the code to understand it :(
>>
>> The reason I've suggested to Maarten to abolish the trylock_reservation
>> within the lru_lock is that in that way lockdep only ever sees the
>> trylock, and hence is less strict about complainig about deadlocks. But
>> semantically it's an unconditional reserve. Maarten had some horrible
>> hacks that leaked the lockdep annotations out of the new reservation code,
>> which allowed ttm to be properly annotated.  But those also reduced the
>> usefulness for any other users of the reservation code, and so Maarten
>> looked into whether he could remove that trylock dance in ttm.
>>
>> Imo having excellent lockdep support for cross-device reservations is a
>> requirment, and ending up with less strict annotations for either ttm
>> based drivers or other drivers is not good. And imo the ugly layering that
>> Maarten had in his first proof-of-concept also indicates that something is
>> amiss in the design.
>>
>>
> So if I understand you correctly, the reservation changes in TTM are motivated by the
> fact that otherwise, in the generic reservation code, lockdep can only be
> annotated for a trylock and not a waiting lock, when it *is* in fact a waiting lock.
>
> I'm completely unfamiliar with setting up lockdep annotations, but the only place a
> deadlock might occur is if the trylock fails and we do a wait_for_unreserve().
> Isn't it possible to annotate the call to wait_for_unreserve() just like an interruptible waiting lock
> (that is always interrupted, but at least any deadlock will be catched?).
That would not find all bugs, lockdep is meant to find even theoretical bugs, so annotating it as a
waiting lock makes more sense. Otherwise lockdep will only barf when the initial trylock fails.

~Maarten
Thomas Hellstrom Oct. 3, 2012, 8:35 a.m. UTC | #13
On 10/03/2012 09:57 AM, Maarten Lankhorst wrote:
> Hey,
>
> Op 03-10-12 09:45, Thomas Hellstrom schreef:
>> On 10/02/2012 10:03 AM, Daniel Vetter wrote:
>>> On Tue, Oct 02, 2012 at 08:46:32AM +0200, Thomas Hellstrom wrote:
>>>> On 10/01/2012 11:47 AM, Maarten Lankhorst wrote:
>>>>> I was doing a evil hack where I 'released' lru_lock to lockdep before doing the annotation
>>>>> for a blocking acquire, and left trylock annotations as they were. This made lockdep do the
>>>>> right thing.
>>>> I've never looked into how lockdep works. Is this something that can
>>>> be done permanently or just for testing
>>>> purposes? Although not related to this, is it possible to do
>>>> something similar to the trylock reversal in the
>>>> fault() code where mmap_sem() and reserve() change order using a
>>>> reserve trylock?
>>> lockdep just requires a bunch of annotations, is a compile-time configure
>>> option CONFIG_PROVE_LOCKING and if disabled, has zero overhead. And it's
>>> rather awesome in detected deadlocks and handling crazy locking schemes
>>> correctly:
>>> - correctly handles trylocks
>>> - correctly handles nested locking (i.e. grabbing a global lock, then
>>>     grabbing subordinate locks in an unordered sequence since the global
>>>     lock ensures that no deadlocks can happen).
>>> - any kinds of inversions with special contexts like hardirq, softirq
>>> - same for page-reclaim, i.e. it will yell if you could (potentially)
>>>     deadlock because your shrinker grabs a lock that you hold while calling
>>>     kmalloc.
>>> - there are special annotates for various subsystems, e.g. to check for
>>>     del_timer_sync vs. locks held by that timer. Or the console_lock
>>>     annotations I've just recently submitted.
>>> - all that with a really flexible set of annotation primitives that afaics
>>>     should work for almost any insane locking scheme. The fact that Maarten
>>>     could come up with proper reservation annotations without any changes to
>>>     lockdep testifies this (he only had to fix a tiny thing to make it a bit
>>>     more strict in a corner case).
>>>
>>> In short I think it's made of awesome. The only downside is that it lacks
>>> documentation, you have to read the code to understand it :(
>>>
>>> The reason I've suggested to Maarten to abolish the trylock_reservation
>>> within the lru_lock is that in that way lockdep only ever sees the
>>> trylock, and hence is less strict about complainig about deadlocks. But
>>> semantically it's an unconditional reserve. Maarten had some horrible
>>> hacks that leaked the lockdep annotations out of the new reservation code,
>>> which allowed ttm to be properly annotated.  But those also reduced the
>>> usefulness for any other users of the reservation code, and so Maarten
>>> looked into whether he could remove that trylock dance in ttm.
>>>
>>> Imo having excellent lockdep support for cross-device reservations is a
>>> requirment, and ending up with less strict annotations for either ttm
>>> based drivers or other drivers is not good. And imo the ugly layering that
>>> Maarten had in his first proof-of-concept also indicates that something is
>>> amiss in the design.
>>>
>>>
>> So if I understand you correctly, the reservation changes in TTM are motivated by the
>> fact that otherwise, in the generic reservation code, lockdep can only be
>> annotated for a trylock and not a waiting lock, when it *is* in fact a waiting lock.
>>
>> I'm completely unfamiliar with setting up lockdep annotations, but the only place a
>> deadlock might occur is if the trylock fails and we do a wait_for_unreserve().
>> Isn't it possible to annotate the call to wait_for_unreserve() just like an interruptible waiting lock
>> (that is always interrupted, but at least any deadlock will be catched?).
> That would not find all bugs, lockdep is meant to find even theoretical bugs, so annotating it as a
> waiting lock makes more sense. Otherwise lockdep will only barf when the initial trylock fails.

Really, starting a waiting reserve with a call to wait_for_unreserve() 
if CONFIG_LOCKDEP is set
shouldn't be that bad :)? That would catch also the the theoretical errors.
In fact, it should suffice with annotating for such a call?

/Thomas

> ~Maarten
>
Thomas Hellstrom Oct. 3, 2012, 8:37 a.m. UTC | #14
On 10/03/2012 09:54 AM, Daniel Vetter wrote:
> On Wed, Oct 3, 2012 at 9:45 AM, Thomas Hellstrom <thellstrom@vmware.com> wrote:
>> On 10/02/2012 10:03 AM, Daniel Vetter wrote:
>>> On Tue, Oct 02, 2012 at 08:46:32AM +0200, Thomas Hellstrom wrote:
>>>> On 10/01/2012 11:47 AM, Maarten Lankhorst wrote:
>>>>> I was doing a evil hack where I 'released' lru_lock to lockdep before
>>>>> doing the annotation
>>>>> for a blocking acquire, and left trylock annotations as they were. This
>>>>> made lockdep do the
>>>>> right thing.
>>>> I've never looked into how lockdep works. Is this something that can
>>>> be done permanently or just for testing
>>>> purposes? Although not related to this, is it possible to do
>>>> something similar to the trylock reversal in the
>>>> fault() code where mmap_sem() and reserve() change order using a
>>>> reserve trylock?
>>> lockdep just requires a bunch of annotations, is a compile-time configure
>>> option CONFIG_PROVE_LOCKING and if disabled, has zero overhead. And it's
>>> rather awesome in detected deadlocks and handling crazy locking schemes
>>> correctly:
>>> - correctly handles trylocks
>>> - correctly handles nested locking (i.e. grabbing a global lock, then
>>>     grabbing subordinate locks in an unordered sequence since the global
>>>     lock ensures that no deadlocks can happen).
>>> - any kinds of inversions with special contexts like hardirq, softirq
>>> - same for page-reclaim, i.e. it will yell if you could (potentially)
>>>     deadlock because your shrinker grabs a lock that you hold while calling
>>>     kmalloc.
>>> - there are special annotates for various subsystems, e.g. to check for
>>>     del_timer_sync vs. locks held by that timer. Or the console_lock
>>>     annotations I've just recently submitted.
>>> - all that with a really flexible set of annotation primitives that afaics
>>>     should work for almost any insane locking scheme. The fact that Maarten
>>>     could come up with proper reservation annotations without any changes
>>> to
>>>     lockdep testifies this (he only had to fix a tiny thing to make it a
>>> bit
>>>     more strict in a corner case).
>>>
>>> In short I think it's made of awesome. The only downside is that it lacks
>>> documentation, you have to read the code to understand it :(
>>>
>>> The reason I've suggested to Maarten to abolish the trylock_reservation
>>> within the lru_lock is that in that way lockdep only ever sees the
>>> trylock, and hence is less strict about complainig about deadlocks. But
>>> semantically it's an unconditional reserve. Maarten had some horrible
>>> hacks that leaked the lockdep annotations out of the new reservation code,
>>> which allowed ttm to be properly annotated.  But those also reduced the
>>> usefulness for any other users of the reservation code, and so Maarten
>>> looked into whether he could remove that trylock dance in ttm.
>>>
>>> Imo having excellent lockdep support for cross-device reservations is a
>>> requirment, and ending up with less strict annotations for either ttm
>>> based drivers or other drivers is not good. And imo the ugly layering that
>>> Maarten had in his first proof-of-concept also indicates that something is
>>> amiss in the design.
>>>
>>>
>> So if I understand you correctly, the reservation changes in TTM are
>> motivated by the
>> fact that otherwise, in the generic reservation code, lockdep can only be
>> annotated for a trylock and not a waiting lock, when it *is* in fact a
>> waiting lock.
>>
>> I'm completely unfamiliar with setting up lockdep annotations, but the only
>> place a
>> deadlock might occur is if the trylock fails and we do a
>> wait_for_unreserve().
>> Isn't it possible to annotate the call to wait_for_unreserve() just like an
>> interruptible waiting lock
>> (that is always interrupted, but at least any deadlock will be catched?).
> Hm, I have to admit that idea hasn't crossed my mind, but it's indeed
> a hole in our current reservation lockdep annotations - since we're
> blocking for the unreserve, other threads could potential block
> waiting on us to release a lock we're holding already, resulting in a
> deadlock.
>
> Since no other locking primitive that I know of has this
> wait_for_unlocked interface, I don't know how we could map this in
> lockdep. One idea is to grab the lock and release it again immediately
> (only in the annotations, not the real lock ofc). But I need to check
> the lockdep code to see whether that doesn't trip it up.

I imagine doing the same as mutex_lock_interruptible() does in the
interrupted path should work...

>
> Cheers, Daniel
Daniel Vetter Oct. 3, 2012, 8:53 a.m. UTC | #15
On Wed, Oct 3, 2012 at 10:37 AM, Thomas Hellstrom <thellstrom@vmware.com> wrote:
>>> So if I understand you correctly, the reservation changes in TTM are
>>> motivated by the
>>> fact that otherwise, in the generic reservation code, lockdep can only be
>>> annotated for a trylock and not a waiting lock, when it *is* in fact a
>>> waiting lock.
>>>
>>> I'm completely unfamiliar with setting up lockdep annotations, but the
>>> only
>>> place a
>>> deadlock might occur is if the trylock fails and we do a
>>> wait_for_unreserve().
>>> Isn't it possible to annotate the call to wait_for_unreserve() just like
>>> an
>>> interruptible waiting lock
>>> (that is always interrupted, but at least any deadlock will be catched?).
>>
>> Hm, I have to admit that idea hasn't crossed my mind, but it's indeed
>> a hole in our current reservation lockdep annotations - since we're
>> blocking for the unreserve, other threads could potential block
>> waiting on us to release a lock we're holding already, resulting in a
>> deadlock.
>>
>> Since no other locking primitive that I know of has this
>> wait_for_unlocked interface, I don't know how we could map this in
>> lockdep. One idea is to grab the lock and release it again immediately
>> (only in the annotations, not the real lock ofc). But I need to check
>> the lockdep code to see whether that doesn't trip it up.
>
>
> I imagine doing the same as mutex_lock_interruptible() does in the
> interrupted path should work...

It simply calls the unlock lockdep annotation function if it breaks
out. So doing a lock/unlock cycle in wait_unreserve should do what we
want.

And to properly annotate the ttm reserve paths we could just add an
unconditional wait_unreserve call at the beginning like you suggested
(maybe with #ifdef CONFIG_PROVE_LOCKING in case ppl freak out about
the added atomic read in the uncontended case).
-Daniel
Thomas Hellstrom Oct. 3, 2012, 10:53 a.m. UTC | #16
On 10/03/2012 10:53 AM, Daniel Vetter wrote:
> On Wed, Oct 3, 2012 at 10:37 AM, Thomas Hellstrom <thellstrom@vmware.com> wrote:
>>>> So if I understand you correctly, the reservation changes in TTM are
>>>> motivated by the
>>>> fact that otherwise, in the generic reservation code, lockdep can only be
>>>> annotated for a trylock and not a waiting lock, when it *is* in fact a
>>>> waiting lock.
>>>>
>>>> I'm completely unfamiliar with setting up lockdep annotations, but the
>>>> only
>>>> place a
>>>> deadlock might occur is if the trylock fails and we do a
>>>> wait_for_unreserve().
>>>> Isn't it possible to annotate the call to wait_for_unreserve() just like
>>>> an
>>>> interruptible waiting lock
>>>> (that is always interrupted, but at least any deadlock will be catched?).
>>> Hm, I have to admit that idea hasn't crossed my mind, but it's indeed
>>> a hole in our current reservation lockdep annotations - since we're
>>> blocking for the unreserve, other threads could potential block
>>> waiting on us to release a lock we're holding already, resulting in a
>>> deadlock.
>>>
>>> Since no other locking primitive that I know of has this
>>> wait_for_unlocked interface, I don't know how we could map this in
>>> lockdep. One idea is to grab the lock and release it again immediately
>>> (only in the annotations, not the real lock ofc). But I need to check
>>> the lockdep code to see whether that doesn't trip it up.
>>
>> I imagine doing the same as mutex_lock_interruptible() does in the
>> interrupted path should work...
> It simply calls the unlock lockdep annotation function if it breaks
> out. So doing a lock/unlock cycle in wait_unreserve should do what we
> want.
>
> And to properly annotate the ttm reserve paths we could just add an
> unconditional wait_unreserve call at the beginning like you suggested
> (maybe with #ifdef CONFIG_PROVE_LOCKING in case ppl freak out about
> the added atomic read in the uncontended case).
> -Daniel

I think atomic_read()s are cheap, at least on intel as IIRC they don't 
require bus locking,
still I think we should keep it within CONFIG_PROVE_LOCKING

which btw reminds me there's an optimization that can be done in that 
one should really only
call atomic_cmpxchg() if a preceding atomic_read() hints that it will 
succeed.

Now, does this mean TTM can keep the atomic reserve <-> lru list removal?

Thanks,
Thomas
Maarten Lankhorst Oct. 3, 2012, 12:46 p.m. UTC | #17
Op 03-10-12 12:53, Thomas Hellstrom schreef:
> On 10/03/2012 10:53 AM, Daniel Vetter wrote:
>> On Wed, Oct 3, 2012 at 10:37 AM, Thomas Hellstrom <thellstrom@vmware.com> wrote:
>>>>> So if I understand you correctly, the reservation changes in TTM are
>>>>> motivated by the
>>>>> fact that otherwise, in the generic reservation code, lockdep can only be
>>>>> annotated for a trylock and not a waiting lock, when it *is* in fact a
>>>>> waiting lock.
>>>>>
>>>>> I'm completely unfamiliar with setting up lockdep annotations, but the
>>>>> only
>>>>> place a
>>>>> deadlock might occur is if the trylock fails and we do a
>>>>> wait_for_unreserve().
>>>>> Isn't it possible to annotate the call to wait_for_unreserve() just like
>>>>> an
>>>>> interruptible waiting lock
>>>>> (that is always interrupted, but at least any deadlock will be catched?).
>>>> Hm, I have to admit that idea hasn't crossed my mind, but it's indeed
>>>> a hole in our current reservation lockdep annotations - since we're
>>>> blocking for the unreserve, other threads could potential block
>>>> waiting on us to release a lock we're holding already, resulting in a
>>>> deadlock.
>>>>
>>>> Since no other locking primitive that I know of has this
>>>> wait_for_unlocked interface, I don't know how we could map this in
>>>> lockdep. One idea is to grab the lock and release it again immediately
>>>> (only in the annotations, not the real lock ofc). But I need to check
>>>> the lockdep code to see whether that doesn't trip it up.
>>>
>>> I imagine doing the same as mutex_lock_interruptible() does in the
>>> interrupted path should work...
>> It simply calls the unlock lockdep annotation function if it breaks
>> out. So doing a lock/unlock cycle in wait_unreserve should do what we
>> want.
>>
>> And to properly annotate the ttm reserve paths we could just add an
>> unconditional wait_unreserve call at the beginning like you suggested
>> (maybe with #ifdef CONFIG_PROVE_LOCKING in case ppl freak out about
>> the added atomic read in the uncontended case).
>> -Daniel
>
> I think atomic_read()s are cheap, at least on intel as IIRC they don't require bus locking,
> still I think we should keep it within CONFIG_PROVE_LOCKING
>
> which btw reminds me there's an optimization that can be done in that one should really only
> call atomic_cmpxchg() if a preceding atomic_read() hints that it will succeed.
>
> Now, does this mean TTM can keep the atomic reserve <-> lru list removal?
I don't think it would be a good idea to keep this across devices, there's currently no
callback to remove buffers off the lru list.

However I am convinced that the current behavior where swapout and
eviction/destruction never ever do a blocking reserve should be preserved. I looked
more into it and it seems to allow to recursely quite a few times between all the
related commands, and it wouldn't surprise me if that turned out to be cause of the
lockups before moving to the current code.
no_wait_reserve in those functions should be removed and always treated as true.

Atomic lru_lock + reserve can still be done in the places where it matters though,
but it might have to try the list for multiple bo's before it succeeds. As long as no
blocking is done the effective behavior would stay the same.

~Maarten
Thomas Hellstrom Oct. 3, 2012, 12:56 p.m. UTC | #18
On 10/03/2012 02:46 PM, Maarten Lankhorst wrote:
> Op 03-10-12 12:53, Thomas Hellstrom schreef:
>> On 10/03/2012 10:53 AM, Daniel Vetter wrote:
>>> On Wed, Oct 3, 2012 at 10:37 AM, Thomas Hellstrom <thellstrom@vmware.com> wrote:
>>>>>> So if I understand you correctly, the reservation changes in TTM are
>>>>>> motivated by the
>>>>>> fact that otherwise, in the generic reservation code, lockdep can only be
>>>>>> annotated for a trylock and not a waiting lock, when it *is* in fact a
>>>>>> waiting lock.
>>>>>>
>>>>>> I'm completely unfamiliar with setting up lockdep annotations, but the
>>>>>> only
>>>>>> place a
>>>>>> deadlock might occur is if the trylock fails and we do a
>>>>>> wait_for_unreserve().
>>>>>> Isn't it possible to annotate the call to wait_for_unreserve() just like
>>>>>> an
>>>>>> interruptible waiting lock
>>>>>> (that is always interrupted, but at least any deadlock will be catched?).
>>>>> Hm, I have to admit that idea hasn't crossed my mind, but it's indeed
>>>>> a hole in our current reservation lockdep annotations - since we're
>>>>> blocking for the unreserve, other threads could potential block
>>>>> waiting on us to release a lock we're holding already, resulting in a
>>>>> deadlock.
>>>>>
>>>>> Since no other locking primitive that I know of has this
>>>>> wait_for_unlocked interface, I don't know how we could map this in
>>>>> lockdep. One idea is to grab the lock and release it again immediately
>>>>> (only in the annotations, not the real lock ofc). But I need to check
>>>>> the lockdep code to see whether that doesn't trip it up.
>>>> I imagine doing the same as mutex_lock_interruptible() does in the
>>>> interrupted path should work...
>>> It simply calls the unlock lockdep annotation function if it breaks
>>> out. So doing a lock/unlock cycle in wait_unreserve should do what we
>>> want.
>>>
>>> And to properly annotate the ttm reserve paths we could just add an
>>> unconditional wait_unreserve call at the beginning like you suggested
>>> (maybe with #ifdef CONFIG_PROVE_LOCKING in case ppl freak out about
>>> the added atomic read in the uncontended case).
>>> -Daniel
>> I think atomic_read()s are cheap, at least on intel as IIRC they don't require bus locking,
>> still I think we should keep it within CONFIG_PROVE_LOCKING
>>
>> which btw reminds me there's an optimization that can be done in that one should really only
>> call atomic_cmpxchg() if a preceding atomic_read() hints that it will succeed.
>>
>> Now, does this mean TTM can keep the atomic reserve <-> lru list removal?
> I don't think it would be a good idea to keep this across devices,
Why?

>   there's currently no
> callback to remove buffers off the lru list.

So why don't we add one, and one to put them on the *correct* LRU list while
unreserving.

/Thomas
diff mbox

Patch

diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index eb48f38..bd2e52c 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -156,7 +156,6 @@  static inline void get_dma_buf(struct dma_buf *dmabuf)
 	get_file(dmabuf->file);
 }
 
-#ifdef CONFIG_DMA_SHARED_BUFFER
 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 							struct device *dev);
 void dma_buf_detach(struct dma_buf *dmabuf,
@@ -184,103 +183,5 @@  int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
 		 unsigned long);
 void *dma_buf_vmap(struct dma_buf *);
 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
-#else
-
-static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
-							struct device *dev)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-static inline void dma_buf_detach(struct dma_buf *dmabuf,
-				  struct dma_buf_attachment *dmabuf_attach)
-{
-	return;
-}
-
-static inline struct dma_buf *dma_buf_export(void *priv,
-					     const struct dma_buf_ops *ops,
-					     size_t size, int flags)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
-{
-	return -ENODEV;
-}
-
-static inline struct dma_buf *dma_buf_get(int fd)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-static inline void dma_buf_put(struct dma_buf *dmabuf)
-{
-	return;
-}
-
-static inline struct sg_table *dma_buf_map_attachment(
-	struct dma_buf_attachment *attach, enum dma_data_direction write)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
-			struct sg_table *sg, enum dma_data_direction dir)
-{
-	return;
-}
-
-static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
-					   size_t start, size_t len,
-					   enum dma_data_direction dir)
-{
-	return -ENODEV;
-}
-
-static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
-					  size_t start, size_t len,
-					  enum dma_data_direction dir)
-{
-}
-
-static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
-					unsigned long pnum)
-{
-	return NULL;
-}
-
-static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
-					 unsigned long pnum, void *vaddr)
-{
-}
-
-static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
-{
-	return NULL;
-}
-
-static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
-				  unsigned long pnum, void *vaddr)
-{
-}
-
-static inline int dma_buf_mmap(struct dma_buf *dmabuf,
-			       struct vm_area_struct *vma,
-			       unsigned long pgoff)
-{
-	return -ENODEV;
-}
-
-static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
-{
-	return NULL;
-}
-
-static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
-{
-}
-#endif /* CONFIG_DMA_SHARED_BUFFER */
 
 #endif /* __DMA_BUF_H__ */