From patchwork Sat May 25 19:26:59 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 2614161 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork2.kernel.org (Postfix) with ESMTP id B2FA3DF2A2 for ; Sat, 25 May 2013 19:37:24 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 9D6AFE5D1E for ; Sat, 25 May 2013 12:37:24 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from shiva.localdomain (unknown [209.20.75.48]) by gabe.freedesktop.org (Postfix) with ESMTP id 4A107E5E13 for ; Sat, 25 May 2013 12:24:55 -0700 (PDT) Received: by shiva.localdomain (Postfix, from userid 1005) id E406C8874F; Sat, 25 May 2013 19:24:54 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.2 (2011-06-06) on shiva.chad-versace.us X-Spam-Level: X-Spam-Status: No, score=-2.9 required=5.0 tests=ALL_TRUSTED,BAYES_00, URIBL_BLOCKED autolearn=unavailable version=3.3.2 Received: from lundgren.kumite (c-24-21-100-90.hsd1.or.comcast.net [24.21.100.90]) by shiva.localdomain (Postfix) with ESMTPSA id A75FB88753; Sat, 25 May 2013 19:24:49 +0000 (UTC) From: Ben Widawsky To: Intel GFX Date: Sat, 25 May 2013 12:26:59 -0700 Message-Id: <1369510028-3343-26-git-send-email-ben@bwidawsk.net> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1369510028-3343-1-git-send-email-ben@bwidawsk.net> References: <1369510028-3343-1-git-send-email-ben@bwidawsk.net> Cc: Ben Widawsky Subject: [Intel-gfx] [PATCH 25/34] drm/i915: Put the mm in the parent address space X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Errors-To: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Every address space should support object allocation. It therefore makes sense to have the allocator be part of the "superclass" which GGTT and PPGTT will derive. Since our maximum address space size is only 2GB we're not yet able to avoid doing allocation/eviction; but we'd hope one day this becomes almost irrelvant. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 3 +-- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_evict.c | 10 +++++----- drivers/gpu/drm/i915/i915_gem_gtt.c | 20 ++++++++++++-------- drivers/gpu/drm/i915/i915_gem_stolen.c | 4 ++-- 6 files changed, 24 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 003e4e7..b36be25 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1361,7 +1361,7 @@ cleanup_gem: i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); - drm_mm_takedown(&dev_priv->mm.gtt_space); + drm_mm_takedown(&i915_gtt_vm->mm); cleanup_irq: drm_irq_uninstall(dev); cleanup_gem_stolen: @@ -1778,7 +1778,7 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } - drm_mm_takedown(&dev_priv->mm.gtt_space); + drm_mm_takedown(&i915_gtt_vm->mm); if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a6ef1ae..d9c9b7b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -401,6 +401,7 @@ enum i915_cache_level { typedef uint32_t gen6_gtt_pte_t; struct i915_address_space { + struct drm_mm mm; struct drm_device *dev; unsigned long start; /* Start offset always 0 for dri2 */ size_t total; /* size addr space maps (ex. 2GB for ggtt) */ @@ -776,8 +777,6 @@ struct intel_l3_parity { }; struct i915_gem_mm { - /** Memory allocator for GTT */ - struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head bound_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0e7bfe3..5d476a7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2995,7 +2995,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, } search_free: - ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, + ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, node, size, alignment, obj->cache_level, 0, max, DRM_MM_CREATE_DEFAULT, @@ -4077,7 +4077,7 @@ int i915_gem_init(struct drm_device *dev) i915_gtt_vm->total, false); i915_gem_context_init(dev); if (dev_priv->hw_contexts_disabled) { - drm_mm_takedown(&dev_priv->mm.gtt_space); + drm_mm_takedown(&i915_gtt_vm->mm); goto ggtt_only; } } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index c86d5d9..6e620f86 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -78,12 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, INIT_LIST_HEAD(&unwind_list); if (mappable) - drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level, - 0, dev_priv->gtt.mappable_end); + drm_mm_init_scan_with_range(&i915_gtt_vm->mm, min_size, + alignment, cache_level, 0, + dev_priv->gtt.mappable_end); else - drm_mm_init_scan(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level); + drm_mm_init_scan(&i915_gtt_vm->mm, min_size, alignment, + cache_level); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0b08a65..a161213 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -247,6 +247,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) int i; drm_mm_remove_node(&ppgtt->node); + drm_mm_takedown(&ppgtt->base.mm); if (ppgtt->pt_dma_addr) { for (i = 0; i < ppgtt->num_pd_entries; i++) @@ -275,8 +276,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) * multiplied by page size. We allocate at the top of the GTT to avoid * fragmentation. */ - BUG_ON(!drm_mm_initialized(&dev_priv->mm.gtt_space)); - ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, + BUG_ON(!drm_mm_initialized(&i915_gtt_vm->mm)); + ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, &ppgtt->node, GEN6_PD_SIZE, GEN6_PD_ALIGN, 0, dev_priv->gtt.mappable_end, @@ -371,6 +372,10 @@ int i915_gem_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) else BUG(); + if (!ret) + drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, + ppgtt->base.total); + return ret; } @@ -643,13 +648,12 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, BUG_ON(mappable_end > end); if (!guard_page) - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); + drm_mm_init(&i915_gtt_vm->mm, start, end - start); else - drm_mm_init(&dev_priv->mm.gtt_space, start, - end - start - PAGE_SIZE); /* Guard page */ + drm_mm_init(&i915_gtt_vm->mm, start, end - start - PAGE_SIZE); if (!HAS_LLC(dev)) - dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; + i915_gtt_vm->mm.color_adjust = i915_gtt_color_adjust; /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { @@ -657,7 +661,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, obj->gtt_offset, obj->base.size); BUG_ON(obj->gtt_space != I915_GTT_RESERVED); - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, + obj->gtt_space = drm_mm_create_block(&i915_gtt_vm->mm, obj->gtt_offset, obj->base.size, false); @@ -668,7 +672,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, i915_gtt_vm->total = end - start; /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, + drm_mm_for_each_hole(entry, &i915_gtt_vm->mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 1b7c604..d398333 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -357,8 +357,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, * setting up the GTT space. The actual reservation will occur * later. */ - if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, + if (drm_mm_initialized(&i915_gtt_vm->mm)) { + obj->gtt_space = drm_mm_create_block(&i915_gtt_vm->mm, gtt_offset, size, false); if (obj->gtt_space == NULL) {