From patchwork Wed Aug 4 11:20:36 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chris Wilson X-Patchwork-Id: 116996 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by demeter.kernel.org (8.14.4/8.14.3) with ESMTP id o74BNGwG021718 for ; Wed, 4 Aug 2010 11:23:51 GMT Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id AEC7C9EEF1 for ; Wed, 4 Aug 2010 04:23:16 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from fireflyinternet.com (server109-228-4-14.live-servers.net [109.228.4.14]) by gabe.freedesktop.org (Postfix) with ESMTP id 88B7B9EF2C for ; Wed, 4 Aug 2010 04:21:42 -0700 (PDT) X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.66.37; Received: from arrandale.alporthouse.com (unverified [78.156.66.37]) by fireflyinternet.com (Firefly Internet SMTP) with ESMTP id 1744028-1500048 for multiple; Wed, 04 Aug 2010 12:23:15 +0100 From: Chris Wilson To: intel-gfx@lists.freedesktop.org Date: Wed, 4 Aug 2010 12:20:36 +0100 Message-Id: <1280920837-20910-5-git-send-email-chris@chris-wilson.co.uk> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1280920837-20910-1-git-send-email-chris@chris-wilson.co.uk> References: <1280920837-20910-1-git-send-email-chris@chris-wilson.co.uk> X-Originating-IP: 78.156.66.37 Subject: [Intel-gfx] [PATCH 4/5] drm/i915: Implement fair lru eviction across both rings. X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.11 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Errors-To: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Wed, 04 Aug 2010 11:23:52 +0000 (UTC) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 75fe397..61a8c90 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -668,6 +668,8 @@ struct drm_i915_gem_object { struct list_head list; /** This object's place on GPU write list */ struct list_head gpu_write_list; + /** This object's place on eviction list */ + struct list_head evict_list; /** * This is set if the object is on the active or flushing lists diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 479e450..4e5af69 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,167 +31,146 @@ #include "i915_drv.h" #include "i915_drm.h" -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) -{ - return obj_priv->madv == I915_MADV_DONTNEED; -} - -static int -i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size, - unsigned alignment, int *found) +static struct drm_i915_gem_object * +i915_gem_next_active_object(struct drm_device *dev, + struct list_head **render_iter, + struct list_head **bsd_iter) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *best = NULL; - struct drm_gem_object *first = NULL; + struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; - /* Try to find the smallest clean object */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - struct drm_gem_object *obj = &obj_priv->base; - if (obj->size >= min_size) { - if ((!obj_priv->dirty || - i915_gem_object_is_purgeable(obj_priv)) && - (!best || obj->size < best->size)) { - best = obj; - if (best->size == min_size) - break; - } - if (!first) - first = obj; - } - } - - obj = best ? best : first; - - if (!obj) { - *found = 0; - return 0; - } + if (*render_iter != &dev_priv->render_ring.active_list) + render_obj = list_entry(*render_iter, + struct drm_i915_gem_object, + list); - *found = 1; + if (HAS_BSD(dev)) { + if (*bsd_iter != &dev_priv->bsd_ring.active_list) + bsd_obj = list_entry(*bsd_iter, + struct drm_i915_gem_object, + list); -#if WATCH_LRU - DRM_INFO("%s: evicting %p\n", __func__, obj); -#endif - obj_priv = to_intel_bo(obj); - BUG_ON(obj_priv->pin_count != 0); - BUG_ON(obj_priv->active); + if (render_obj == NULL) { + *bsd_iter = (*bsd_iter)->next; + return bsd_obj; + } - /* Wait on the rendering and unbind the buffer. */ - return i915_gem_object_unbind(obj); -} + if (bsd_obj == NULL) { + *render_iter = (*render_iter)->next; + return render_obj; + } -static void -i915_gem_flush_ring(struct drm_device *dev, - uint32_t invalidate_domains, - uint32_t flush_domains, - struct intel_ring_buffer *ring) -{ - if (flush_domains & I915_GEM_DOMAIN_CPU) - drm_agp_chipset_flush(dev); - ring->flush(dev, ring, - invalidate_domains, - flush_domains); + /* XXX can we handle seqno wrapping? */ + if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { + *render_iter = (*render_iter)->next; + return render_obj; + } else { + *bsd_iter = (*bsd_iter)->next; + return bsd_obj; + } + } else { + *render_iter = (*render_iter)->next; + return render_obj; + } } int -i915_gem_evict_something(struct drm_device *dev, - int min_size, unsigned alignment) +i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) { drm_i915_private_t *dev_priv = dev->dev_private; - int ret, found; - - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; - struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; - for (;;) { - i915_gem_retire_requests(dev); - - /* If there's an inactive buffer available now, grab it - * and be done. - */ - ret = i915_gem_scan_inactive_list_and_evict(dev, min_size, - alignment, - &found); - if (found) - return ret; + struct list_head eviction_list, unwind_list; + struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; + struct list_head *render_iter, *bsd_iter; + int ret = 0; + + i915_gem_retire_requests(dev); - /* If we didn't get anything, but the ring is still processing - * things, wait for the next to finish and hopefully leave us - * a buffer to evict. - */ - if (!list_empty(&render_ring->request_list)) { - struct drm_i915_gem_request *request; + /* Re-check for free space after retiring requests */ + if (drm_mm_search_free(&dev_priv->mm.gtt_space, + min_size, alignment, 0)) + return 0; - request = list_first_entry(&render_ring->request_list, - struct drm_i915_gem_request, - list); + INIT_LIST_HEAD(&unwind_list); + drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); - ret = i915_do_wait_request(dev, request->seqno, true, request->ring); - if (ret) - return ret; + /* First see if there is a large enough contiguous idle region... */ + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_add(&obj_priv->evict_list, &unwind_list); + if (drm_mm_scan_add_block(obj_priv->gtt_space)) + goto found; + } + /* Now merge in the soon-to-be-expired objects... */ + render_iter = dev_priv->render_ring.active_list.next; + bsd_iter = dev_priv->bsd_ring.active_list.next; + while ((obj_priv = i915_gem_next_active_object(dev, &render_iter, &bsd_iter)) != NULL) { + /* Does the object require an outstanding flush? */ + if (obj_priv->base.write_domain || obj_priv->pin_count) continue; - } - - if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { - struct drm_i915_gem_request *request; - request = list_first_entry(&bsd_ring->request_list, - struct drm_i915_gem_request, - list); + list_add(&obj_priv->evict_list, &unwind_list); + if (drm_mm_scan_add_block(obj_priv->gtt_space)) + goto found; + } - ret = i915_do_wait_request(dev, request->seqno, true, request->ring); - if (ret) - return ret; + /* Finally add anything with a pending flush (in order of retirement). */ + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + if (obj_priv->pin_count) + continue; + list_add(&obj_priv->evict_list, &unwind_list); + if (drm_mm_scan_add_block(obj_priv->gtt_space)) + goto found; + } + render_iter = dev_priv->render_ring.active_list.next; + bsd_iter = dev_priv->bsd_ring.active_list.next; + while ((obj_priv = i915_gem_next_active_object(dev, &render_iter, &bsd_iter)) != NULL) { + if (! obj_priv->base.write_domain || obj_priv->pin_count) continue; - } - /* If we didn't have anything on the request list but there - * are buffers awaiting a flush, emit one and try again. - * When we wait on it, those buffers waiting for that flush - * will get moved to inactive. - */ - if (!list_empty(&dev_priv->mm.flushing_list)) { - struct drm_gem_object *obj = NULL; - struct drm_i915_gem_object *obj_priv; - - /* Find an object that we can immediately reuse */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { - obj = &obj_priv->base; - if (obj->size >= min_size) - break; - - obj = NULL; - } - - if (obj != NULL) { - uint32_t seqno; - - i915_gem_flush_ring(dev, - obj->write_domain, - obj->write_domain, - obj_priv->ring); - seqno = i915_add_request(dev, NULL, - obj->write_domain, - obj_priv->ring); - if (seqno == 0) - return -ENOMEM; - continue; - } + list_add(&obj_priv->evict_list, &unwind_list); + if (drm_mm_scan_add_block(obj_priv->gtt_space)) + goto found; + } + + /* Nothing found, clean up and bail out! */ + list_for_each_entry(obj_priv, &unwind_list, evict_list) { + ret = drm_mm_scan_remove_block(obj_priv->gtt_space); + BUG_ON(ret); + } + + return i915_gem_evict_everything(dev); + +found: + INIT_LIST_HEAD(&eviction_list); + list_for_each_entry_safe(obj_priv, tmp_obj_priv, &unwind_list, evict_list) { + if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { + /* drm_mm doesn't allow any other other operations while + * scanning, therefore store to be evicted objects on a + * temporary list. */ + list_move(&obj_priv->evict_list, &eviction_list); } + } - /* If we didn't do any of the above, there's no single buffer - * large enough to swap out for the new one, so just evict - * everything and start again. (This should be rare.) - */ - if (!list_empty(&dev_priv->mm.inactive_list)) - return i915_gem_evict_inactive(dev); - else - return i915_gem_evict_everything(dev); + /* Unbinding will emit any required flushes */ + list_for_each_entry_safe(obj_priv, tmp_obj_priv, + &eviction_list, evict_list) { +#if WATCH_LRU + DRM_INFO("%s: evicting %p\n", __func__, obj); +#endif + ret = i915_gem_object_unbind(&obj_priv->base); + if (ret) + return ret; } + + /* The just created free hole should be on the top of the free stack + * maintained by drm_mm, so this BUG_ON actually executes in O(1). + * Furthermore all accessed data has just recently been used, so it + * should be really fast, too. */ + BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, + alignment, 0)); + + return 0; } int