@@ -1361,7 +1361,7 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ drm_mm_takedown(&i915_gtt_vm->mm);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1778,7 +1778,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ drm_mm_takedown(&i915_gtt_vm->mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
@@ -401,6 +401,7 @@ enum i915_cache_level {
typedef uint32_t gen6_gtt_pte_t;
struct i915_address_space {
+ struct drm_mm mm;
struct drm_device *dev;
unsigned long start; /* Start offset always 0 for dri2 */
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
@@ -776,8 +777,6 @@ struct intel_l3_parity {
};
struct i915_gem_mm {
- /** Memory allocator for GTT */
- struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -2995,7 +2995,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
}
search_free:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, node,
size, alignment,
obj->cache_level, 0, max,
DRM_MM_CREATE_DEFAULT,
@@ -4077,7 +4077,7 @@ int i915_gem_init(struct drm_device *dev)
i915_gtt_vm->total, false);
i915_gem_context_init(dev);
if (dev_priv->hw_contexts_disabled) {
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ drm_mm_takedown(&i915_gtt_vm->mm);
goto ggtt_only;
}
}
@@ -78,12 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level,
- 0, dev_priv->gtt.mappable_end);
+ drm_mm_init_scan_with_range(&i915_gtt_vm->mm, min_size,
+ alignment, cache_level, 0,
+ dev_priv->gtt.mappable_end);
else
- drm_mm_init_scan(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level);
+ drm_mm_init_scan(&i915_gtt_vm->mm, min_size, alignment,
+ cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -247,6 +247,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
int i;
drm_mm_remove_node(&ppgtt->node);
+ drm_mm_takedown(&ppgtt->base.mm);
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
@@ -275,8 +276,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* multiplied by page size. We allocate at the top of the GTT to avoid
* fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&dev_priv->mm.gtt_space));
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
+ BUG_ON(!drm_mm_initialized(&i915_gtt_vm->mm));
+ ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
dev_priv->gtt.mappable_end,
@@ -371,6 +372,10 @@ int i915_gem_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
else
BUG();
+ if (!ret)
+ drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
+ ppgtt->base.total);
+
return ret;
}
@@ -643,13 +648,12 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
if (!guard_page)
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
+ drm_mm_init(&i915_gtt_vm->mm, start, end - start);
else
- drm_mm_init(&dev_priv->mm.gtt_space, start,
- end - start - PAGE_SIZE); /* Guard page */
+ drm_mm_init(&i915_gtt_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
- dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+ i915_gtt_vm->mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -657,7 +661,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
obj->gtt_offset, obj->base.size);
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ obj->gtt_space = drm_mm_create_block(&i915_gtt_vm->mm,
obj->gtt_offset,
obj->base.size,
false);
@@ -668,7 +672,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
i915_gtt_vm->total = end - start;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+ drm_mm_for_each_hole(entry, &i915_gtt_vm->mm,
hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
@@ -357,8 +357,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* setting up the GTT space. The actual reservation will occur
* later.
*/
- if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ if (drm_mm_initialized(&i915_gtt_vm->mm)) {
+ obj->gtt_space = drm_mm_create_block(&i915_gtt_vm->mm,
gtt_offset, size,
false);
if (obj->gtt_space == NULL) {
Every address space should support object allocation. It therefore makes sense to have the allocator be part of the "superclass" which GGTT and PPGTT will derive. Since our maximum address space size is only 2GB we're not yet able to avoid doing allocation/eviction; but we'd hope one day this becomes almost irrelvant. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 3 +-- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_evict.c | 10 +++++----- drivers/gpu/drm/i915/i915_gem_gtt.c | 20 ++++++++++++-------- drivers/gpu/drm/i915/i915_gem_stolen.c | 4 ++-- 6 files changed, 24 insertions(+), 21 deletions(-)