diff mbox

drm/ttm: remove EBUSY handling in ttm_execbuf_util

Message ID 50323EAC.5060107@canonical.com (mailing list archive)
State New, archived
Headers show

Commit Message

Maarten Lankhorst Aug. 20, 2012, 1:42 p.m. UTC
How is this different from just calling with no_wait == false?
As far as I can tell, both paths end up with the same result..
    
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>

Comments

Jerome Glisse Aug. 20, 2012, 3:15 p.m. UTC | #1
On Mon, Aug 20, 2012 at 9:42 AM, Maarten Lankhorst
<maarten.lankhorst@canonical.com> wrote:
> How is this different from just calling with no_wait == false?
> As far as I can tell, both paths end up with the same result..
>
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>

NAK this seriously modify the behavior. The ttm_eu_del_from_lru_locked
part is important. It must happen with lru lock held and without any
dropping of this lock prior to wait for bo unreserve.

Cheers,
Jerome

>
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index 3f48a46..4e7b596 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
>         }
>  }
>
> -static int ttm_eu_wait_unreserved_locked(struct list_head *list,
> -                                        struct ttm_buffer_object *bo)
> -{
> -       struct ttm_bo_global *glob = bo->glob;
> -       int ret;
> -
> -       ttm_eu_del_from_lru_locked(list);
> -       spin_unlock(&glob->lru_lock);
> -       ret = ttm_bo_wait_unreserved(bo, true);
> -       spin_lock(&glob->lru_lock);
> -       if (unlikely(ret != 0))
> -               ttm_eu_backoff_reservation_locked(list);
> -       return ret;
> -}
> -
> -
>  void ttm_eu_backoff_reservation(struct list_head *list)
>  {
>         struct ttm_validate_buffer *entry;
> @@ -152,19 +136,10 @@ retry:
>         list_for_each_entry(entry, list, head) {
>                 struct ttm_buffer_object *bo = entry->bo;
>
> -retry_this_bo:
> -               ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
> +               ret = ttm_bo_reserve_locked(bo, true, false, true, val_seq);
>                 switch (ret) {
>                 case 0:
>                         break;
> -               case -EBUSY:
> -                       ret = ttm_eu_wait_unreserved_locked(list, bo);
> -                       if (unlikely(ret != 0)) {
> -                               spin_unlock(&glob->lru_lock);
> -                               ttm_eu_list_ref_sub(list);
> -                               return ret;
> -                       }
> -                       goto retry_this_bo;
>                 case -EAGAIN:
>                         ttm_eu_backoff_reservation_locked(list);
>                         spin_unlock(&glob->lru_lock);
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Maarten Lankhorst Aug. 20, 2012, 5:55 p.m. UTC | #2
Hey,

Op 20-08-12 17:15, Jerome Glisse schreef:
> On Mon, Aug 20, 2012 at 9:42 AM, Maarten Lankhorst
> <maarten.lankhorst@canonical.com> wrote:
>> How is this different from just calling with no_wait == false?
>> As far as I can tell, both paths end up with the same result..
>>
>> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
> NAK this seriously modify the behavior. The ttm_eu_del_from_lru_locked
> part is important. It must happen with lru lock held and without any
> dropping of this lock prior to wait for bo unreserve.
>

You're right, I missed the part where it removed those, causing the later
patch to be wrong too. However I  still think the code can be made more
readable. Wouldn't it be better if it used the unlocked variants instead?
It would save a lot of extra list traversals, and you could drop
removed, reserved and put_count from ttm_validate_buffer.

~Maarten
Jerome Glisse Aug. 20, 2012, 6:04 p.m. UTC | #3
On Mon, Aug 20, 2012 at 1:55 PM, Maarten Lankhorst
<maarten.lankhorst@canonical.com> wrote:
> Hey,
>
> Op 20-08-12 17:15, Jerome Glisse schreef:
>> On Mon, Aug 20, 2012 at 9:42 AM, Maarten Lankhorst
>> <maarten.lankhorst@canonical.com> wrote:
>>> How is this different from just calling with no_wait == false?
>>> As far as I can tell, both paths end up with the same result..
>>>
>>> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
>> NAK this seriously modify the behavior. The ttm_eu_del_from_lru_locked
>> part is important. It must happen with lru lock held and without any
>> dropping of this lock prior to wait for bo unreserve.
>>
>
> You're right, I missed the part where it removed those, causing the later
> patch to be wrong too. However I  still think the code can be made more
> readable. Wouldn't it be better if it used the unlocked variants instead?
> It would save a lot of extra list traversals, and you could drop
> removed, reserved and put_count from ttm_validate_buffer.
>
> ~Maarten
>

No, as i said the lock can not be drop, i don't see much
simplification of this code. Also the path you trying to modify is
taken only is some bo is reserved by some other process, which is
supposed to be rare event.

Cheers,
Jerome
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3f48a46..4e7b596 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@  static void ttm_eu_list_ref_sub(struct list_head *list)
 	}
 }
 
-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
-					 struct ttm_buffer_object *bo)
-{
-	struct ttm_bo_global *glob = bo->glob;
-	int ret;
-
-	ttm_eu_del_from_lru_locked(list);
-	spin_unlock(&glob->lru_lock);
-	ret = ttm_bo_wait_unreserved(bo, true);
-	spin_lock(&glob->lru_lock);
-	if (unlikely(ret != 0))
-		ttm_eu_backoff_reservation_locked(list);
-	return ret;
-}
-
-
 void ttm_eu_backoff_reservation(struct list_head *list)
 {
 	struct ttm_validate_buffer *entry;
@@ -152,19 +136,10 @@  retry:
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
-retry_this_bo:
-		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+		ret = ttm_bo_reserve_locked(bo, true, false, true, val_seq);
 		switch (ret) {
 		case 0:
 			break;
-		case -EBUSY:
-			ret = ttm_eu_wait_unreserved_locked(list, bo);
-			if (unlikely(ret != 0)) {
-				spin_unlock(&glob->lru_lock);
-				ttm_eu_list_ref_sub(list);
-				return ret;
-			}
-			goto retry_this_bo;
 		case -EAGAIN:
 			ttm_eu_backoff_reservation_locked(list);
 			spin_unlock(&glob->lru_lock);