diff mbox

[5/8] drm/i915: Convert active API to VMA

Message ID 1378936675-27587-5-git-send-email-benjamin.widawsky@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky Sept. 11, 2013, 9:57 p.m. UTC
From: Ben Widawsky <ben@bwidawsk.net>

Even though we track object activity and not VMA, because we have the
active_list be based on the VM, it makes the most sense to use VMAs in
the APIs.

NOTE: Daniel intends to eventually rip out active/inactive LRUs, but for
now, leave them be.

v2: Remove leftover hunk from the previous patch which didn't keep
i915_gem_object_move_to_active. That patch had to rely on the ring to
get the dev instead of the obj. (Chris)

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h            | 5 ++---
 drivers/gpu/drm/i915/i915_gem.c            | 9 ++++++++-
 drivers/gpu/drm/i915/i915_gem_context.c    | 8 ++++----
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 +--
 4 files changed, 15 insertions(+), 10 deletions(-)

Comments

Chris Wilson Sept. 11, 2013, 10:14 p.m. UTC | #1
On Wed, Sep 11, 2013 at 02:57:52PM -0700, Ben Widawsky wrote:
>  	if (from != NULL) {
> -		struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
> -		struct i915_address_space *ggtt = &dev_priv->gtt.base;
> +		struct drm_i915_private *dev_priv = ring->dev->dev_private;
> +		struct i915_vma *vma =
> +			i915_gem_obj_to_vma(from->obj, &dev_priv->gtt.base);
>  		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
> -		list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
> -		i915_gem_object_move_to_active(from->obj, ring);
> +		i915_vma_move_to_active(vma, ring);

We do fairly often lookup the ggtt vma for an obj. Is this worth a
speciallised function call?

i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);

	struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
	{
		struct i915_vma *vma;

		if (WARN_ON(list_empty(&obj->vma_list))
			return NULL;

		vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
		if (WARN_ON(vma != &to_i915(obj->base.dev)->gtt.base))
			return NULL;

		return vma;
	}
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7caf71d..09a72c8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1884,9 +1884,8 @@  static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-				    struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0a99979..5796e31 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1913,7 +1913,7 @@  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 	return 0;
 }
 
-void
+static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 			       struct intel_ring_buffer *ring)
 {
@@ -1952,6 +1952,13 @@  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 	}
 }
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring)
+{
+	list_move_tail(&vma->mm_list, &vma->vm->active_list);
+	return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 26c3fcc..b71649a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -436,11 +436,11 @@  static int do_switch(struct i915_hw_context *to)
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-		struct i915_address_space *ggtt = &dev_priv->gtt.base;
+		struct drm_i915_private *dev_priv = ring->dev->dev_private;
+		struct i915_vma *vma =
+			i915_gem_obj_to_vma(from->obj, &dev_priv->gtt.base);
 		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
-		i915_gem_object_move_to_active(from->obj, ring);
+		i915_vma_move_to_active(vma, ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ee93357..b26d979 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -872,8 +872,7 @@  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-		list_move_tail(&vma->mm_list, &vma->vm->active_list);
-		i915_gem_object_move_to_active(obj, ring);
+		i915_vma_move_to_active(vma, ring);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->last_write_seqno = intel_ring_get_seqno(ring);