diff mbox

[2/4] drm/i915: kill obj->gtt_offset

Message ID 1302893858-8234-3-git-send-email-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show

Commit Message

Daniel Vetter April 15, 2011, 6:57 p.m. UTC
Yet another massive round of sed'ing.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |   12 +++---
 drivers/gpu/drm/i915/i915_drv.h            |    7 ----
 drivers/gpu/drm/i915/i915_gem.c            |   48 +++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_debug.c      |    6 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   10 +++---
 drivers/gpu/drm/i915/i915_gem_tiling.c     |   10 +++---
 drivers/gpu/drm/i915/i915_irq.c            |   10 +++---
 drivers/gpu/drm/i915/intel_display.c       |   22 ++++++------
 drivers/gpu/drm/i915/intel_fb.c            |    6 ++--
 drivers/gpu/drm/i915/intel_overlay.c       |   14 ++++----
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   12 +++---
 11 files changed, 74 insertions(+), 83 deletions(-)

Comments

Chris Wilson April 15, 2011, 6:56 p.m. UTC | #1
On Fri, 15 Apr 2011 20:57:36 +0200, Daniel Vetter <daniel.vetter@ffwll.ch> wrote:
> Yet another massive round of sed'ing.

The only hitch here is that in the vmap code obj->gtt_offset !=
obj->gtt_space.offset.

There obj->gtt_space.offset is the base of the page aligned region allocated
in the GTT and obj->gtt_offset is obj->gtt_space.offset +
offset_in_page(user_addr).

I haven't checked but is obj->gtt_space immutable by the caller, i.e. can
we modify obj->gtt_space.offset and drm_mm still function correctly? Bake
the page aligned assumption into drm_mm? Or simply undo the page_offset
when releasing the gtt_space...? The latter sounds like it would work
best.
-Chris
Daniel Vetter April 15, 2011, 7:19 p.m. UTC | #2
On Fri, Apr 15, 2011 at 07:56:10PM +0100, Chris Wilson wrote:
> On Fri, 15 Apr 2011 20:57:36 +0200, Daniel Vetter <daniel.vetter@ffwll.ch> wrote:
> > Yet another massive round of sed'ing.
> 
> The only hitch here is that in the vmap code obj->gtt_offset !=
> obj->gtt_space.offset.
> 
> There obj->gtt_space.offset is the base of the page aligned region allocated
> in the GTT and obj->gtt_offset is obj->gtt_space.offset +
> offset_in_page(user_addr).
> 
> I haven't checked but is obj->gtt_space immutable by the caller, i.e. can
> we modify obj->gtt_space.offset and drm_mm still function correctly? Bake
> the page aligned assumption into drm_mm? Or simply undo the page_offset
> when releasing the gtt_space...? The latter sounds like it would work
> best.

Mucking around with drm_mm_node->start is a bad idea, it's used to track
the end of the preceding free area (if there is one).

Also I find having a bo with a not-page-aligned gtt offset kinda creepy
... So if the kernel really needs to track this, could it be tracked in a
special vmap handle object? Or is this really required, because all the
normal memory mapper syscalls only work on page boundaries, too. I.e. why
can't userspace keep track of the offset?
-Daniel
Chris Wilson April 15, 2011, 8:04 p.m. UTC | #3
On Fri, 15 Apr 2011 21:19:00 +0200, Daniel Vetter <daniel@ffwll.ch> wrote:
> Mucking around with drm_mm_node->start is a bad idea, it's used to track
> the end of the preceding free area (if there is one).
> 
> Also I find having a bo with a not-page-aligned gtt offset kinda creepy
> ... So if the kernel really needs to track this, could it be tracked in a
> special vmap handle object?

All the relocation handling code is generic: gtt_offset + user delta
Since the gtt_offset is computed once, the vmap code applies the offset to
it directly. I suppose drm_i915_gem_object could grow an additional
gtt_offset_offset...

> Or is this really required, because all the
> normal memory mapper syscalls only work on page boundaries, too. I.e. why
> can't userspace keep track of the offset?

Because that is ugly. Userspace passes in user_addr + user_length which
can be precisely checked for the proposed access.

The alternative you propose is to pass in (user_page_base_addr,
user_page_offset) + user_length and then continue to track
user_page_offset in the userspace code to apply to reloc.delta as well.

In all, I'm favouring keeping gtt_offset. Can we postpone this one until
you've had a chance to review vmap, which I promise will be in the next
set for drm-intel-next-proposed...
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ad94a12..1a6783f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -136,8 +136,8 @@  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	if (drm_mm_node_allocated(&obj->gtt_space))
-		seq_printf(m, " (gtt offset: %08x, size: %08x)",
-			   obj->gtt_offset, (unsigned int)obj->gtt_space.size);
+		seq_printf(m, " (gtt offset: %08lx, size: %08x)",
+			   obj->gtt_space.start, (unsigned int)obj->gtt_space.size);
 	if (obj->pin_mappable || obj->fault_mappable) {
 		char s[3], *t = s;
 		if (obj->pin_mappable)
@@ -353,12 +353,12 @@  static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 			if (work->old_fb_obj) {
 				struct drm_i915_gem_object *obj = work->old_fb_obj;
 				if (obj)
-					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+					seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", obj->gtt_space.start);
 			}
 			if (work->pending_flip_obj) {
 				struct drm_i915_gem_object *obj = work->pending_flip_obj;
 				if (obj)
-					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+					seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", obj->gtt_space.start);
 			}
 		}
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -570,7 +570,7 @@  static void i915_dump_object(struct seq_file *m,
 	page_count = obj->base.size / PAGE_SIZE;
 	for (page = 0; page < page_count; page++) {
 		u32 *mem = io_mapping_map_wc(mapping,
-					     obj->gtt_offset + page * PAGE_SIZE);
+					     obj->gtt_space.start + page * PAGE_SIZE);
 		for (i = 0; i < PAGE_SIZE; i += 4)
 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
 		io_mapping_unmap(mem);
@@ -591,7 +591,7 @@  static int i915_batchbuffer_info(struct seq_file *m, void *data)
 
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
-		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+		    seq_printf(m, "--- gtt_offset = 0x%08lx\n", obj->gtt_space.start);
 		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21ac706..2301a6a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -820,13 +820,6 @@  struct drm_i915_gem_object {
 	unsigned long exec_handle;
 	struct drm_i915_gem_exec_object2 *exec_entry;
 
-	/**
-	 * Current offset of the object in GTT space.
-	 *
-	 * This is the same as gtt_space->start
-	 */
-	uint32_t gtt_offset;
-
 	/** Breadcrumb of last rendering to the buffer. */
 	uint32_t last_rendering_seqno;
 	struct intel_ring_buffer *ring;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 429c5d3..d08ad01 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -632,7 +632,7 @@  i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
 
-	offset = obj->gtt_offset + args->offset;
+	offset = obj->gtt_space.start + args->offset;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -721,7 +721,7 @@  i915_gem_gtt_pwrite_slow(struct drm_device *dev,
 	if (ret)
 		goto out_unpin_pages;
 
-	offset = obj->gtt_offset + args->offset;
+	offset = obj->gtt_space.start + args->offset;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -1250,7 +1250,7 @@  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 	obj->fault_mappable = true;
 
-	pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
+	pfn = ((dev->agp->base + obj->gtt_space.start) >> PAGE_SHIFT) +
 		page_offset;
 
 	/* Finally, remap it using the new GTT offset */
@@ -2245,7 +2245,7 @@  i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	obj->map_and_fenceable = true;
 
 	drm_mm_remove_node(&obj->gtt_space);
-	obj->gtt_offset = 0;
+	obj->gtt_space.start = 0;
 
 	if (i915_gem_object_is_purgeable(obj))
 		i915_gem_object_truncate(obj);
@@ -2323,9 +2323,9 @@  static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
 	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj->gtt_offset + size - 4096) &
+	val = (uint64_t)((obj->gtt_space.start + size - 4096) &
 			 0xfffff000) << 32;
-	val |= obj->gtt_offset & 0xfffff000;
+	val |= obj->gtt_space.start & 0xfffff000;
 	val |= (uint64_t)((obj->stride / 128) - 1) <<
 		SANDYBRIDGE_FENCE_PITCH_SHIFT;
 
@@ -2360,9 +2360,9 @@  static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
 	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj->gtt_offset + size - 4096) &
+	val = (uint64_t)((obj->gtt_space.start + size - 4096) &
 		    0xfffff000) << 32;
-	val |= obj->gtt_offset & 0xfffff000;
+	val |= obj->gtt_space.start & 0xfffff000;
 	val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
 	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2395,11 +2395,11 @@  static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
 	u32 fence_reg, val, pitch_val;
 	int tile_width;
 
-	if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+	if (WARN((obj->gtt_space.start & ~I915_FENCE_START_MASK) ||
 		 (size & -size) != size ||
-		 (obj->gtt_offset & (size - 1)),
-		 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-		 obj->gtt_offset, obj->map_and_fenceable, size))
+		 (obj->gtt_space.start & (size - 1)),
+		 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		 obj->gtt_space.start, obj->map_and_fenceable, size))
 		return -EINVAL;
 
 	if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
@@ -2411,7 +2411,7 @@  static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
 	pitch_val = obj->stride / tile_width;
 	pitch_val = ffs(pitch_val) - 1;
 
-	val = obj->gtt_offset;
+	val = obj->gtt_space.start;
 	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
 	val |= I915_FENCE_SIZE_BITS(size);
@@ -2450,17 +2450,17 @@  static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
 	uint32_t val;
 	uint32_t pitch_val;
 
-	if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+	if (WARN((obj->gtt_space.start & ~I830_FENCE_START_MASK) ||
 		 (size & -size) != size ||
-		 (obj->gtt_offset & (size - 1)),
-		 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
-		 obj->gtt_offset, size))
+		 (obj->gtt_space.start & (size - 1)),
+		 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+		 obj->gtt_space.start, size))
 		return -EINVAL;
 
 	pitch_val = obj->stride / 128;
 	pitch_val = ffs(pitch_val) - 1;
 
-	val = obj->gtt_offset;
+	val = obj->gtt_space.start;
 	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
 	val |= I830_FENCE_SIZE_BITS(size);
@@ -2880,14 +2880,12 @@  i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
 	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-	obj->gtt_offset = obj->gtt_space.start;
-
 	fenceable =
 		obj->gtt_space.size == fence_size &&
 		(obj->gtt_space.start & (fence_alignment -1)) == 0;
 
 	mappable =
-		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+		obj->gtt_space.start + obj->base.size <= dev_priv->mm.gtt_mappable_end;
 
 	obj->map_and_fenceable = mappable && fenceable;
 
@@ -3409,13 +3407,13 @@  i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	WARN_ON(i915_verify_lists(dev));
 
 	if (drm_mm_node_allocated(&obj->gtt_space)) {
-		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+		if ((alignment && obj->gtt_space.start & (alignment - 1)) ||
 		    (map_and_fenceable && !obj->map_and_fenceable)) {
 			WARN(obj->pin_count,
 			     "bo is already pinned with incorrect alignment:"
-			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
-			     obj->gtt_offset, alignment,
+			     obj->gtt_space.start, alignment,
 			     map_and_fenceable,
 			     obj->map_and_fenceable);
 			ret = i915_gem_object_unbind(obj);
@@ -3504,7 +3502,7 @@  i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 	 * as the X server doesn't manage domains yet
 	 */
 	i915_gem_object_flush_cpu_write_domain(obj);
-	args->offset = obj->gtt_offset;
+	args->offset = obj->gtt_space.start;
 out:
 	drm_gem_object_unreference(&obj->base);
 unlock:
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8da1899..1af7b9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -145,10 +145,10 @@  i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
 	int bad_count = 0;
 
 	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
-		 __func__, obj, obj->gtt_offset, handle,
+		 __func__, obj, obj->gtt_space.start, handle,
 		 obj->size / 1024);
 
-	gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
+	gtt_mapping = ioremap(dev->agp->base + obj->gtt_space.start, obj->base.size);
 	if (gtt_mapping == NULL) {
 		DRM_ERROR("failed to map GTT space\n");
 		return;
@@ -172,7 +172,7 @@  i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
 			if (cpuval != gttval) {
 				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
 					 "0x%08x vs 0x%08x\n",
-					 (int)(obj->gtt_offset +
+					 (int)(obj->gtt_space.start +
 					       page * PAGE_SIZE + i * 4),
 					 cpuval, gttval);
 				if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7774843..a06fac5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -284,7 +284,7 @@  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	if (unlikely(target_obj == NULL))
 		return -ENOENT;
 
-	target_offset = to_intel_bo(target_obj)->gtt_offset;
+	target_offset = to_intel_bo(target_obj)->gtt_space.start;
 
 	/* The target buffer should have appeared before us in the
 	 * exec_object list, so it should have a GTT space bound by now.
@@ -376,7 +376,7 @@  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 			return ret;
 
 		/* Map the page containing the relocation we're going to perform.  */
-		reloc->offset += obj->gtt_offset;
+		reloc->offset += obj->gtt_space.start;
 		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 						      reloc->offset & PAGE_MASK);
 		reloc_entry = (uint32_t __iomem *)
@@ -531,7 +531,7 @@  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			need_mappable =
 				entry->relocation_count ? true : need_fence;
 
-			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+			if ((entry->alignment && obj->gtt_space.start & (entry->alignment - 1)) ||
 			    (need_mappable && !obj->map_and_fenceable))
 				ret = i915_gem_object_unbind(obj);
 			else
@@ -580,7 +580,7 @@  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 				obj->pending_fenced_gpu_access = need_fence;
 			}
 
-			entry->offset = obj->gtt_offset;
+			entry->offset = obj->gtt_space.start;
 		}
 
 		/* Decrement pin count for bound objects */
@@ -1164,7 +1164,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	trace_i915_gem_ring_dispatch(ring, seqno);
 
-	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+	exec_start = batch_obj->gtt_space.start + args->batch_start_offset;
 	exec_len = args->batch_len;
 	if (cliprects) {
 		for (i = 0; i < args->num_cliprects; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e894a81..820c984 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -245,10 +245,10 @@  i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 		return true;
 
 	if (INTEL_INFO(obj->base.dev)->gen == 3) {
-		if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+		if (obj->gtt_space.start & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
-		if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+		if (obj->gtt_space.start & ~I830_FENCE_START_MASK)
 			return false;
 	}
 
@@ -267,7 +267,7 @@  i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 	if (obj->gtt_space.size != size)
 		return false;
 
-	if (obj->gtt_offset & (size - 1))
+	if (obj->gtt_space.start & (size - 1))
 		return false;
 
 	return true;
@@ -350,14 +350,14 @@  i915_gem_set_tiling(struct drm_device *dev, void *data,
 
 		obj->map_and_fenceable =
 			!drm_mm_node_allocated(&obj->gtt_space) ||
-			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+			(obj->gtt_space.start + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
 			 i915_gem_object_fence_ok(obj, args->tiling_mode));
 
 		/* Rebind if we need a change of alignment */
 		if (!obj->map_and_fenceable) {
 			u32 unfenced_alignment =
 				i915_gem_get_unfenced_gtt_alignment(obj);
-			if (obj->gtt_offset & (unfenced_alignment - 1))
+			if (obj->gtt_space.start & (unfenced_alignment - 1))
 				ret = i915_gem_object_unbind(obj);
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5c0466e..e9d43e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -588,7 +588,7 @@  i915_error_object_create(struct drm_i915_private *dev_priv,
 	if (dst == NULL)
 		return NULL;
 
-	reloc_offset = src->gtt_offset;
+	reloc_offset = src->gtt_space.start;
 	for (page = 0; page < page_count; page++) {
 		unsigned long flags;
 		void __iomem *s;
@@ -610,7 +610,7 @@  i915_error_object_create(struct drm_i915_private *dev_priv,
 		reloc_offset += PAGE_SIZE;
 	}
 	dst->page_count = page_count;
-	dst->gtt_offset = src->gtt_offset;
+	dst->gtt_offset = src->gtt_space.start;
 
 	return dst;
 
@@ -663,7 +663,7 @@  static u32 capture_bo_list(struct drm_i915_error_buffer *err,
 		err->size = obj->base.size;
 		err->name = obj->base.name;
 		err->seqno = obj->last_rendering_seqno;
-		err->gtt_offset = obj->gtt_offset;
+		err->gtt_offset = obj->gtt_space.start;
 		err->read_domains = obj->base.read_domains;
 		err->write_domain = obj->base.write_domain;
 		err->fence_reg = obj->fence_reg;
@@ -1071,10 +1071,10 @@  static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
 	obj = work->pending_flip_obj;
 	if (INTEL_INFO(dev)->gen >= 4) {
 		int dspsurf = DSPSURF(intel_crtc->plane);
-		stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+		stall_detected = I915_READ(dspsurf) == obj->gtt_space.start;
 	} else {
 		int dspaddr = DSPADDR(intel_crtc->plane);
-		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+		stall_detected = I915_READ(dspaddr) == (obj->gtt_space.start +
 							crtc->y * crtc->fb->pitch +
 							crtc->x * crtc->fb->bits_per_pixel/8);
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 62f9e52..c55f5ac 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1571,7 +1571,7 @@  static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
 		    dev_priv->cfb_fence == obj->fence_reg &&
 		    dev_priv->cfb_plane == intel_crtc->plane &&
-		    dev_priv->cfb_offset == obj->gtt_offset &&
+		    dev_priv->cfb_offset == obj->gtt_space.start &&
 		    dev_priv->cfb_y == crtc->y)
 			return;
 
@@ -1582,7 +1582,7 @@  static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
 	dev_priv->cfb_fence = obj->fence_reg;
 	dev_priv->cfb_plane = intel_crtc->plane;
-	dev_priv->cfb_offset = obj->gtt_offset;
+	dev_priv->cfb_offset = obj->gtt_space.start;
 	dev_priv->cfb_y = crtc->y;
 
 	dpfc_ctl &= DPFC_RESERVED;
@@ -1598,7 +1598,7 @@  static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_space.start | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -1894,7 +1894,7 @@  intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
 	I915_WRITE(reg, dspcntr);
 
-	Start = obj->gtt_offset;
+	Start = obj->gtt_space.start;
 	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
 
 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -5362,7 +5362,7 @@  static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 			goto fail_unpin;
 		}
 
-		addr = obj->gtt_offset;
+		addr = obj->gtt_space.start;
 	} else {
 		int align = IS_I830(dev) ? 16 * 1024 : 256;
 		ret = i915_gem_attach_phys_object(dev, obj,
@@ -6144,7 +6144,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj->gtt_offset + offset);
+		OUT_RING(obj->gtt_space.start + offset);
 		OUT_RING(MI_NOOP);
 		break;
 
@@ -6152,7 +6152,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		OUT_RING(MI_DISPLAY_FLIP_I915 |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj->gtt_offset + offset);
+		OUT_RING(obj->gtt_space.start + offset);
 		OUT_RING(MI_NOOP);
 		break;
 
@@ -6165,7 +6165,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj->gtt_offset | obj->tiling_mode);
+		OUT_RING(obj->gtt_space.start | obj->tiling_mode);
 
 		/* XXX Enabling the panel-fitter across page-flip is so far
 		 * untested on non-native modes, so ignore it for now.
@@ -6180,7 +6180,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch | obj->tiling_mode);
-		OUT_RING(obj->gtt_offset);
+		OUT_RING(obj->gtt_space.start);
 
 		pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
 		pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
@@ -7197,7 +7197,7 @@  void ironlake_enable_rc6(struct drm_device *dev)
 
 	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
 	OUT_RING(MI_SET_CONTEXT);
-	OUT_RING(dev_priv->renderctx->gtt_offset |
+	OUT_RING(dev_priv->renderctx->gtt_space.start |
 		 MI_MM_SPACE_GTT |
 		 MI_SAVE_EXT_STATE_EN |
 		 MI_RESTORE_EXT_STATE_EN |
@@ -7220,7 +7220,7 @@  void ironlake_enable_rc6(struct drm_device *dev)
 		return;
 	}
 
-	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_space.start | PWRCTX_EN);
 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 	mutex_unlock(&dev->struct_mutex);
 }
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 5127827..9e8a67d 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -136,10 +136,10 @@  static int intelfb_create(struct intel_fbdev *ifbdev,
 	info->apertures->ranges[0].size =
 		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
-	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_space.start;
 	info->fix.smem_len = size;
 
-	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
+	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_space.start, size);
 	if (!info->screen_base) {
 		ret = -ENOSPC;
 		goto out_unpin;
@@ -159,7 +159,7 @@  static int intelfb_create(struct intel_fbdev *ifbdev,
 
 	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
 		      fb->width, fb->height,
-		      obj->gtt_offset, obj);
+		      obj->gtt_space.start, obj);
 
 
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index fcf6fcb..8fa3597 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -199,7 +199,7 @@  intel_overlay_map_regs(struct intel_overlay *overlay)
 		regs = overlay->reg_bo->phys_obj->handle->vaddr;
 	else
 		regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
-					 overlay->reg_bo->gtt_offset);
+					 overlay->reg_bo->gtt_space.start);
 
 	return regs;
 }
@@ -817,7 +817,7 @@  static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
 				       params->offset_Y, tmp_width);
 	regs->SHEIGHT = params->src_h;
-	regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
+	regs->OBUF_0Y = new_bo->gtt_space.start + params-> offset_Y;
 	regs->OSTRIDE = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -831,8 +831,8 @@  static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 				      params->src_w/uv_hscale);
 		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
 		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
-		regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
-		regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
+		regs->OBUF_0U = new_bo->gtt_space.start + params->offset_U;
+		regs->OBUF_0V = new_bo->gtt_space.start + params->offset_V;
 		regs->OSTRIDE |= params->stride_UV << 16;
 	}
 
@@ -1427,7 +1427,7 @@  void intel_setup_overlay(struct drm_device *dev)
                         DRM_ERROR("failed to pin overlay register bo\n");
                         goto out_free_bo;
                 }
-		overlay->flip_addr = reg_bo->gtt_offset;
+		overlay->flip_addr = reg_bo->gtt_space.start;
 
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
 		if (ret) {
@@ -1501,7 +1501,7 @@  intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 		regs = overlay->reg_bo->phys_obj->handle->vaddr;
 	else
 		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-						overlay->reg_bo->gtt_offset);
+						overlay->reg_bo->gtt_space.start);
 
 	return regs;
 }
@@ -1534,7 +1534,7 @@  intel_overlay_capture_error_state(struct drm_device *dev)
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
 		error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
 	else
-		error->base = (long) overlay->reg_bo->gtt_offset;
+		error->base = (long) overlay->reg_bo->gtt_space.start;
 
 	regs = intel_overlay_map_regs_atomic(overlay);
 	if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f15d80f..638e63f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -151,7 +151,7 @@  static int init_ring_common(struct intel_ring_buffer *ring)
 	ring->write_tail(ring, 0);
 
 	/* Initialize the ring. */
-	I915_WRITE_START(ring, obj->gtt_offset);
+	I915_WRITE_START(ring, obj->gtt_space.start);
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
 	/* G45 ring initialization fails to reset head to zero */
@@ -183,7 +183,7 @@  static int init_ring_common(struct intel_ring_buffer *ring)
 
 	/* If the head is still not zero, the ring is dead */
 	if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
-	    I915_READ_START(ring) != obj->gtt_offset ||
+	    I915_READ_START(ring) != obj->gtt_space.start ||
 	    (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
 		DRM_ERROR("%s initialization failed "
 				"ctl %08x head %08x tail %08x start %08x\n",
@@ -243,7 +243,7 @@  init_pipe_control(struct intel_ring_buffer *ring)
 	if (ret)
 		goto err_unref;
 
-	pc->gtt_offset = obj->gtt_offset;
+	pc->gtt_offset = obj->gtt_space.start;
 	pc->cpu_page =  kmap(obj->pages[0]);
 	if (pc->cpu_page == NULL)
 		goto err_unpin;
@@ -768,7 +768,7 @@  static int init_status_page(struct intel_ring_buffer *ring)
 		goto err_unref;
 	}
 
-	ring->status_page.gfx_addr = obj->gtt_offset;
+	ring->status_page.gfx_addr = obj->gtt_space.start;
 	ring->status_page.page_addr = kmap(obj->pages[0]);
 	if (ring->status_page.page_addr == NULL) {
 		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -825,7 +825,7 @@  int intel_init_ring_buffer(struct drm_device *dev,
 		goto err_unref;
 
 	ring->map.size = ring->size;
-	ring->map.offset = dev->agp->base + obj->gtt_offset;
+	ring->map.offset = dev->agp->base + obj->gtt_space.start;
 	ring->map.type = 0;
 	ring->map.flags = 0;
 	ring->map.mtrr = 0;
@@ -1211,7 +1211,7 @@  static int blt_ring_begin(struct intel_ring_buffer *ring,
 			return ret;
 
 		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
-		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_space.start);
 
 		return 0;
 	} else