From patchwork Mon Nov 12 14:00:06 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 1728361 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork2.kernel.org (Postfix) with ESMTP id 5D630DFE80 for ; Mon, 12 Nov 2012 14:04:26 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 4F3419F0EC for ; Mon, 12 Nov 2012 06:04:26 -0800 (PST) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from mail-we0-f177.google.com (mail-we0-f177.google.com [74.125.82.177]) by gabe.freedesktop.org (Postfix) with ESMTP id DB94A9F03A for ; Mon, 12 Nov 2012 06:00:41 -0800 (PST) Received: by mail-we0-f177.google.com with SMTP id u50so2715708wey.36 for ; Mon, 12 Nov 2012 06:00:41 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; bh=IpVZHPRA5SHY5mYvwmVkxHG1prQg4IqK//6nFdyZ1s8=; b=JWCtliP42QtA11d6P250W3Iw+odjtVIl4qp98yFz3QkFLsiTsEEIrsEn+zMRIH/Ig0 8Rt5kycj1X+mYsWjDZvLllBKqUxrDyVuU28OTuPp7BELTYLr2RVvn3V65/Cj4PwTN5/7 Wbb4NzW8EFD7QalZGG/2i5pJaQTHBz7/81o8Ex9VMGTHgkOnHVVnxVFZOfSHOXcAxW4I CVbvFSHtZlhRk20rwEg1/9USxfa6DigyunhCxhl1SbeXtR9vlqGqfcIzMbcJTeiTrAup tFwPrMm2ZKBijr3cWrI0XzTHTQOdFRyErZTEuFO2Cdml/G8Fp84NciQiZDTUK7fGD1GT bxIw== Received: by 10.216.197.227 with SMTP id t77mr8299873wen.146.1352728841523; Mon, 12 Nov 2012 06:00:41 -0800 (PST) Received: from localhost (5ED48CEF.cm-7-5c.dynamic.ziggo.nl. [94.212.140.239]) by mx.google.com with ESMTPS id eu8sm9241334wib.1.2012.11.12.06.00.39 (version=TLSv1/SSLv3 cipher=OTHER); Mon, 12 Nov 2012 06:00:40 -0800 (PST) Received: by localhost (sSMTP sendmail emulation); Mon, 12 Nov 2012 15:00:38 +0100 From: Maarten Lankhorst To: dri-devel@lists.freedesktop.org Subject: [PATCH 05/10] drm/ttm: add sense to ttm_bo_cleanup_refs, v4 Date: Mon, 12 Nov 2012 15:00:06 +0100 Message-Id: <1352728811-21860-5-git-send-email-maarten.lankhorst@canonical.com> X-Mailer: git-send-email 1.8.0 In-Reply-To: <1352728811-21860-1-git-send-email-maarten.lankhorst@canonical.com> References: <1352728811-21860-1-git-send-email-maarten.lankhorst@canonical.com> X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Errors-To: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Require lru_lock and reservation to be held, kill off the loop, no new sync objects should be attached at this point any more. v2: - moved upwards in patch list and fixed conflicts. v3: - rebase for fence lock, and rename to ttm_bo_cleanup_refs_and_unlock for clarity that it unlocks lru. v4: - add WARN_ON(!atomic_read(&bo->kref.refcount)) in reserve to ensure that nobody accidentally reserves a dead buffer. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/ttm/ttm_bo.c | 123 ++++++++++++++++----------------- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 1 + 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 70285ff..e6df086 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -296,6 +296,8 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, int put_count = 0; int ret; + WARN_ON(!atomic_read(&bo->kref.refcount)); + spin_lock(&glob->lru_lock); ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, sequence); @@ -522,82 +524,78 @@ queue: } /** - * function ttm_bo_cleanup_refs + * function ttm_bo_cleanup_refs_and_unlock * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, do nothing. * + * Must be called with lru_lock and reservation held, this function + * will drop both before returning. + * * @interruptible Any sleeps should occur interruptibly. - * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. */ -static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait_reserve, - bool no_wait_gpu) +static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_driver *driver = bdev->driver; struct ttm_bo_global *glob = bo->glob; int put_count; int ret = 0; - void *sync_obj; - -retry: - spin_lock(&glob->lru_lock); - - ret = ttm_bo_reserve_locked(bo, interruptible, - no_wait_reserve, false, 0); - if (unlikely(ret)) { - spin_unlock(&glob->lru_lock); - return ret; - } + spin_lock(&bdev->fence_lock); + ret = ttm_bo_wait(bo, false, false, true); - if (unlikely(list_empty(&bo->ddestroy))) { + if (ret && no_wait_gpu) { + spin_unlock(&bdev->fence_lock); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); spin_unlock(&glob->lru_lock); - return 0; - } - - spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, false, true); - if (ret) { - if (no_wait_gpu) { - spin_unlock(&bdev->fence_lock); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - spin_unlock(&glob->lru_lock); - return ret; - } + return ret; + } else if (ret) { + void *sync_obj; /** - * Take a reference to the fence and unreserve, if the wait - * was succesful and no new sync_obj was attached, - * ttm_bo_wait in retry will return ret = 0, and end the loop. + * Take a reference to the fence and unreserve, + * at this point the buffer should be dead, so + * no new sync objects can be attached. */ - sync_obj = driver->sync_obj_ref(&bo->sync_obj); spin_unlock(&bdev->fence_lock); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); spin_unlock(&glob->lru_lock); - ret = driver->sync_obj_wait(bo->sync_obj, false, interruptible); + ret = driver->sync_obj_wait(sync_obj, false, interruptible); driver->sync_obj_unref(&sync_obj); if (ret) return ret; - goto retry; + spin_lock(&glob->lru_lock); + + /* remove sync_obj with ttm_bo_wait */ + spin_lock(&bdev->fence_lock); + ret = ttm_bo_wait(bo, false, false, true); + spin_unlock(&bdev->fence_lock); + + WARN_ON(ret); + + } else { + spin_unlock(&bdev->fence_lock); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + } + + if (unlikely(list_empty(&bo->ddestroy))) { + spin_unlock(&glob->lru_lock); + return 0; } - spin_unlock(&bdev->fence_lock); put_count = ttm_bo_del_from_lru(bo); list_del_init(&bo->ddestroy); ++put_count; - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); spin_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); @@ -606,8 +604,8 @@ retry: } /** - * Traverse the delayed list, and call ttm_bo_cleanup_refs on all - * encountered buffers. + * Traverse the delayed list, and call ttm_bo_cleanup_refs_and_unlock + * on all encountered buffers. */ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) @@ -633,9 +631,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); } - spin_unlock(&glob->lru_lock); - ret = ttm_bo_cleanup_refs(entry, false, !remove_all, - !remove_all); + ret = ttm_bo_reserve_locked(entry, false, !remove_all, + false, 0); + if (ret) + spin_unlock(&glob->lru_lock); + else + ret = ttm_bo_cleanup_refs_and_unlock(entry, false, + !remove_all); + kref_put(&entry->list_kref, ttm_bo_release_list); entry = nentry; @@ -788,15 +791,6 @@ retry: bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); - if (!list_empty(&bo->ddestroy)) { - spin_unlock(&glob->lru_lock); - ret = ttm_bo_cleanup_refs(bo, interruptible, - no_wait_reserve, no_wait_gpu); - kref_put(&bo->list_kref, ttm_bo_release_list); - - return ret; - } - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) { @@ -815,6 +809,13 @@ retry: goto retry; } + if (!list_empty(&bo->ddestroy)) { + ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, + no_wait_gpu); + kref_put(&bo->list_kref, ttm_bo_release_list); + return ret; + } + put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); @@ -1778,14 +1779,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) struct ttm_buffer_object, swap); kref_get(&bo->list_kref); - if (!list_empty(&bo->ddestroy)) { - spin_unlock(&glob->lru_lock); - (void) ttm_bo_cleanup_refs(bo, false, false, false); - kref_put(&bo->list_kref, ttm_bo_release_list); - spin_lock(&glob->lru_lock); - continue; - } - /** * Reserve buffer. Since we unlock while sleeping, we need * to re-check that nobody removed us from the swap-list while @@ -1801,6 +1794,12 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) } } + if (!list_empty(&bo->ddestroy)) { + ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + return ret; + } + BUG_ON(ret != 0); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index cd9e452..5490492 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -152,6 +152,7 @@ retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; + WARN_ON(!atomic_read(&bo->kref.refcount)); retry_this_bo: ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); switch (ret) {