From patchwork Mon Feb 1 12:59:23 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Daniel Vetter X-Patchwork-Id: 76062 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o11D5PUH018553 for ; Mon, 1 Feb 2010 13:06:05 GMT Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 4D6B49F328; Mon, 1 Feb 2010 05:05:05 -0800 (PST) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mail.ffwll.ch (cable-static-49-187.intergga.ch [157.161.49.187]) by gabe.freedesktop.org (Postfix) with ESMTP id 303B29EF5A for ; Mon, 1 Feb 2010 05:05:01 -0800 (PST) Received: by mail.ffwll.ch (Postfix, from userid 1000) id F381E20C2C3; Mon, 1 Feb 2010 14:05:00 +0100 (CET) X-Spam-ASN: X-Spam-Checker-Version: SpamAssassin 3.2.5 (2008-06-10) on orange.ffwll.ch X-Spam-Level: X-Spam-Hammy: 0.000-+--UD:show_bug.cgi, 0.000-+--show_bugcgi, 0.000-+--show_bug.cgi X-Spam-Status: No, score=-1.2 required=6.0 tests=ALL_TRUSTED,BAYES_00, FH_DATE_PAST_20XX autolearn=no version=3.2.5 X-Spam-Spammy: Received: from biene (unknown [192.168.23.129]) by mail.ffwll.ch (Postfix) with ESMTP id BD46A20C2BA; Mon, 1 Feb 2010 14:04:42 +0100 (CET) Received: from daniel by biene with local (Exim 4.71) (envelope-from ) id 1NbvsM-00026r-Lf; Mon, 01 Feb 2010 13:59:34 +0100 From: Daniel Vetter To: intel-gfx@lists.freedesktop.org Date: Mon, 1 Feb 2010 13:59:23 +0100 Message-Id: <2d1d0aa645b8826498258b4a945598ab649e6a39.1265028983.git.daniel.vetter@ffwll.ch> X-Mailer: git-send-email 1.6.6.1 In-Reply-To: References: <10bf8c49d793b2c3055ec6159fb53afd5d688c87.1265028983.git.daniel.vetter@ffwll.ch> <1264b12229d1821821a5f9a1a79828bf7f12de24.1265028983.git.daniel.vetter@ffwll.ch> <9251de0a8ec96b2cf4cf7106d97c3b41f98f0770.1265028983.git.daniel.vetter@ffwll.ch> <37c87feb08470fea8d5e062f82bbddeaef0bf47f.1265028983.git.daniel.vetter@ffwll.ch> In-Reply-To: References: Cc: Daniel Vetter , stable@kernel.org Subject: [Intel-gfx] [PATCH 08/11] drm/i915: Update write_domains on active list after flush. X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.9 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces@lists.freedesktop.org Errors-To: intel-gfx-bounces@lists.freedesktop.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Mon, 01 Feb 2010 13:06:05 +0000 (UTC) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6d9f83c..cdb8b8e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -494,6 +494,15 @@ typedef struct drm_i915_private { struct list_head flushing_list; /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + + /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * @@ -599,6 +608,8 @@ struct drm_i915_gem_object { /** This object's place on the active/flushing/inactive lists */ struct list_head list; + /** This object's place on GPU write list */ + struct list_head gpu_write_list; /** This object's place on the fenced object LRU */ struct list_head fence_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 73e88b6..b154f63 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1568,6 +1568,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + BUG_ON(!list_empty(&obj_priv->gpu_write_list)); + obj_priv->last_rendering_seqno = 0; if (obj_priv->active) { obj_priv->active = 0; @@ -1584,7 +1586,8 @@ i915_gem_process_flushing_list(struct drm_device *dev, struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.flushing_list, list) { + &dev_priv->mm.gpu_write_list, + gpu_write_list) { struct drm_gem_object *obj = obj_priv->obj; if ((obj->write_domain & flush_domains) == @@ -1592,6 +1595,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); i915_gem_object_move_to_active(obj, seqno); /* update the fence lru list */ @@ -2138,7 +2142,6 @@ static int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t seqno; int ret; bool lists_empty; @@ -2156,6 +2159,8 @@ i915_gem_evict_everything(struct drm_device *dev) if (ret) return ret; + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + ret = i915_gem_evict_from_inactive_list(dev); if (ret) return ret; @@ -2750,7 +2755,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); seqno = i915_add_request(dev, NULL, obj->write_domain); - obj->write_domain = 0; + BUG_ON(obj->write_domain); i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, @@ -3896,16 +3901,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains); - if (dev->flush_domains) + if (dev->flush_domains & I915_GEM_GPU_DOMAINS) (void)i915_add_request(dev, file_priv, dev->flush_domains); } for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t old_write_domain = obj->write_domain; obj->write_domain = obj->pending_write_domain; + if (obj->write_domain) + list_move_tail(&obj_priv->gpu_write_list, + &dev_priv->mm.gpu_write_list); + else + list_del_init(&obj_priv->gpu_write_list); + trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); @@ -4416,6 +4428,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj_priv->obj = obj; obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); + INIT_LIST_HEAD(&obj_priv->gpu_write_list); INIT_LIST_HEAD(&obj_priv->fence_list); obj_priv->madv = I915_MADV_WILLNEED; @@ -4787,6 +4800,7 @@ i915_gem_load(struct drm_device *dev) spin_lock_init(&dev_priv->mm.active_list_lock); INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); + INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.request_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list);