From patchwork Thu Jun 27 23:30:40 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 2796391 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 418A4BF4A1 for ; Thu, 27 Jun 2013 23:47:51 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 3D1E52020E for ; Thu, 27 Jun 2013 23:47:50 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 4B1572020C for ; Thu, 27 Jun 2013 23:47:49 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 24CC7E6479 for ; Thu, 27 Jun 2013 16:47:49 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from shiva.localdomain (unknown [209.20.75.48]) by gabe.freedesktop.org (Postfix) with ESMTP id 5EC73E6265 for ; Thu, 27 Jun 2013 16:28:31 -0700 (PDT) Received: by shiva.localdomain (Postfix, from userid 99) id 48A41886A4; Thu, 27 Jun 2013 23:28:31 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Spam-Level: X-Spam-Status: No, score=-5.5 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD,UNPARSEABLE_RELAY autolearn=ham version=3.3.1 Received: from lundgren.jf.intel.com (jfdmzpr02-ext.jf.intel.com [134.134.137.71]) by shiva.localdomain (Postfix) with ESMTPSA id 204F68867F; Thu, 27 Jun 2013 23:28:29 +0000 (UTC) From: Ben Widawsky To: Intel GFX Date: Thu, 27 Jun 2013 16:30:40 -0700 Message-Id: <1372375867-1003-40-git-send-email-ben@bwidawsk.net> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1372375867-1003-1-git-send-email-ben@bwidawsk.net> References: <1372375867-1003-1-git-send-email-ben@bwidawsk.net> Cc: Ben Widawsky Subject: [Intel-gfx] [PATCH 39/66] drm/i915: Move active to vma X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Errors-To: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org X-Virus-Scanned: ClamAV using ClamSMTP Probably need to squash whole thing, or just the inactive part, tbd... Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_drv.h | 14 ++++++------ drivers/gpu/drm/i915/i915_gem.c | 47 ++++++++++++++++++++++++----------------- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b3eb067..247a124 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -545,6 +545,13 @@ struct i915_vma { /* Page aligned offset (helper for stolen) */ unsigned long deferred_offset; + /** + * This is set if the object is on the active lists (has pending + * rendering and so a non-zero seqno), and is not set if it i s on + * inactive (ready to be unbound) list. + */ + unsigned int active:1; + /** This object's place on the active/inactive lists */ struct list_head mm_list; @@ -1245,13 +1252,6 @@ struct drm_i915_gem_object { struct list_head exec_list; /** - * This is set if the object is on the active lists (has pending - * rendering and so a non-zero seqno), and is not set if it i s on - * inactive (ready to be unbound) list. - */ - unsigned int active:1; - - /** * This is set if the object has been written to since last bound * to the GTT */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f448804..a3e8c26 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -137,7 +137,13 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) /* NB: Not the same as !i915_gem_object_is_inactive */ bool i915_gem_object_is_active(struct drm_i915_gem_object *obj) { - return obj->active; + struct i915_vma *vma; + + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->active) + return true; + + return false; } static inline bool @@ -1889,14 +1895,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, BUG_ON(ring == NULL); obj->ring = ring; + /* Move from whatever list we were on to the tail of execution. */ + vma = i915_gem_obj_to_vma(obj, vm); /* Add a reference if we're newly entering the active list. */ - if (!i915_gem_object_is_active(obj)) { + if (!vma->active) { drm_gem_object_reference(&obj->base); - obj->active = 1; + vma->active = 1; } - /* Move from whatever list we were on to the tail of execution. */ - vma = i915_gem_obj_to_vma(obj, vm); list_move_tail(&vma->mm_list, &vm->active_list); list_move_tail(&obj->ring_list, &ring->active_list); @@ -1917,16 +1923,23 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, } static void -i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_address_space *vm; struct i915_vma *vma; + int i = 0; BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); - BUG_ON(!i915_gem_object_is_active(obj)); - vma = i915_gem_obj_to_vma(obj, vm); - list_move_tail(&vma->mm_list, &vm->inactive_list); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma || !vma->active) + continue; + list_move_tail(&vma->mm_list, &vm->inactive_list); + vma->active = 0; + i++; + } list_del_init(&obj->ring_list); obj->ring = NULL; @@ -1938,8 +1951,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj, obj->last_fenced_seqno = 0; obj->fenced_gpu_access = false; - obj->active = 0; - drm_gem_object_unreference(&obj->base); + while (i--) + drm_gem_object_unreference(&obj->base); WARN_ON(i915_verify_lists(dev)); } @@ -2273,15 +2286,13 @@ static bool i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } while (!list_empty(&ring->active_list)) { - struct i915_address_space *vm; struct drm_i915_gem_object *obj; obj = list_first_entry(&ring->active_list, struct drm_i915_gem_object, ring_list); - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - i915_gem_object_move_to_inactive(obj, vm); + i915_gem_object_move_to_inactive(obj); } return ctx_banned; @@ -2365,8 +2376,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) * by the ringbuffer to the flushing/inactive lists as appropriate. */ while (!list_empty(&ring->active_list)) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct i915_address_space *vm; struct drm_i915_gem_object *obj; obj = list_first_entry(&ring->active_list, @@ -2376,8 +2385,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) if (!i915_seqno_passed(seqno, obj->last_read_seqno)) break; - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - i915_gem_object_move_to_inactive(obj, vm); + BUG_ON(!i915_gem_object_is_active(obj)); + i915_gem_object_move_to_inactive(obj); } if (unlikely(ring->trace_irq_seqno &&