diff mbox

[7/8] drm/i915: Use the new vm [un]bind functions

Message ID 1378936675-27587-7-git-send-email-benjamin.widawsky@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky Sept. 11, 2013, 9:57 p.m. UTC
From: Ben Widawsky <ben@bwidawsk.net>

Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.

Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.

v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h            |  9 ------
 drivers/gpu/drm/i915/i915_gem.c            | 31 ++++++++-----------
 drivers/gpu/drm/i915/i915_gem_context.c    |  8 +++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 29 ++++++++----------
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 48 ++----------------------------
 5 files changed, 34 insertions(+), 91 deletions(-)

Comments

Chris Wilson Sept. 11, 2013, 10:39 p.m. UTC | #1
On Wed, Sep 11, 2013 at 02:57:54PM -0700, Ben Widawsky wrote:
> @@ -464,11 +465,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
>  				struct intel_ring_buffer *ring,
>  				bool *need_reloc)
>  {
> -	struct drm_i915_private *dev_priv = ring->dev->dev_private;
> +	struct drm_i915_gem_object *obj = vma->obj;
>  	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
>  	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
>  	bool need_fence, need_mappable;
> -	struct drm_i915_gem_object *obj = vma->obj;
> +	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
> +		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
>  	int ret;
>  
>  	need_fence =
> @@ -497,14 +499,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
>  		}
>  	}
>  
> -	/* Ensure ppgtt mapping exists if needed */
> -	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
> -		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> -				       obj, obj->cache_level);
> -
> -		obj->has_aliasing_ppgtt_mapping = 1;
> -	}
> -
>  	if (entry->offset != vma->node.start) {
>  		entry->offset = vma->node.start;
>  		*need_reloc = true;
> @@ -515,9 +509,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
>  		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
>  	}
>  
> -	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
> -	    !obj->has_global_gtt_mapping)
> -		i915_gem_gtt_bind_object(obj, obj->cache_level);
> +	vma->vm->bind_vma(vma, obj->cache_level, flags);

This results in us rebinding into the vma unconditionally once/twice for
every object on every execbuffer, right?
-Chris
Ben Widawsky Sept. 14, 2013, 1:08 a.m. UTC | #2
On Wed, Sep 11, 2013 at 11:39:30PM +0100, Chris Wilson wrote:
> On Wed, Sep 11, 2013 at 02:57:54PM -0700, Ben Widawsky wrote:
> > @@ -464,11 +465,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
> >  				struct intel_ring_buffer *ring,
> >  				bool *need_reloc)
> >  {
> > -	struct drm_i915_private *dev_priv = ring->dev->dev_private;
> > +	struct drm_i915_gem_object *obj = vma->obj;
> >  	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
> >  	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
> >  	bool need_fence, need_mappable;
> > -	struct drm_i915_gem_object *obj = vma->obj;
> > +	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
> > +		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
> >  	int ret;
> >  
> >  	need_fence =
> > @@ -497,14 +499,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
> >  		}
> >  	}
> >  
> > -	/* Ensure ppgtt mapping exists if needed */
> > -	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
> > -		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> > -				       obj, obj->cache_level);
> > -
> > -		obj->has_aliasing_ppgtt_mapping = 1;
> > -	}
> > -
> >  	if (entry->offset != vma->node.start) {
> >  		entry->offset = vma->node.start;
> >  		*need_reloc = true;
> > @@ -515,9 +509,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
> >  		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
> >  	}
> >  
> > -	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
> > -	    !obj->has_global_gtt_mapping)
> > -		i915_gem_gtt_bind_object(obj, obj->cache_level);
> > +	vma->vm->bind_vma(vma, obj->cache_level, flags);
> 
> This results in us rebinding into the vma unconditionally once/twice for
> every object on every execbuffer, right?
> -Chris
> 

Hmm, how does this suit you (untested)? I think it is addressing your concern.
Also, I suspect squashing this patch with the previous is probably a good thing
to do.


static void gen6_ggtt_bind_vma(struct i915_vma *vma,
                               enum i915_cache_level cache_level,
                               u32 flags)
{
        struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj = vma->obj;
        const unsigned long entry = vma->node.start >> PAGE_SHIFT;

        /* If there is an aliasing PPGTT, and the user didn't explicitly ask for
         * the global, just use aliasing */
        if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
                /* If the object is unbound, or we're change the cache bits */
                if (!obj->has_global_gtt_mapping ||
                    (cache_level != obj->cache_level)) {
                        gen6_ggtt_insert_entries(vma->vm, obj->pages, entry,
                                                 cache_level);
                        obj->has_global_gtt_mapping = 1;
                }
        }

        /* If put the mapping in the aliasing PPGTT as well as Global if we have
         * aliasing, but the user requested global. */
        if (dev_priv->mm.aliasing_ppgtt &&
            (!obj->has_aliasing_ppgtt_mapping ||
             (cache_level != obj->cache_level))) {
                gen6_ppgtt_insert_entries(&dev_priv->mm.aliasing_ppgtt->base,
                                          vma->obj->pages, entry, cache_level);
                vma->obj->has_aliasing_ppgtt_mapping = 1;
        }
}
Chris Wilson Sept. 14, 2013, 3:31 p.m. UTC | #3
On Fri, Sep 13, 2013 at 06:08:17PM -0700, Ben Widawsky wrote:
> On Wed, Sep 11, 2013 at 11:39:30PM +0100, Chris Wilson wrote:
> > On Wed, Sep 11, 2013 at 02:57:54PM -0700, Ben Widawsky wrote:
> > > @@ -464,11 +465,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
> > >  				struct intel_ring_buffer *ring,
> > >  				bool *need_reloc)
> > >  {
> > > -	struct drm_i915_private *dev_priv = ring->dev->dev_private;
> > > +	struct drm_i915_gem_object *obj = vma->obj;
> > >  	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
> > >  	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
> > >  	bool need_fence, need_mappable;
> > > -	struct drm_i915_gem_object *obj = vma->obj;
> > > +	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
> > > +		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
> > >  	int ret;
> > >  
> > >  	need_fence =
> > > @@ -497,14 +499,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
> > >  		}
> > >  	}
> > >  
> > > -	/* Ensure ppgtt mapping exists if needed */
> > > -	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
> > > -		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> > > -				       obj, obj->cache_level);
> > > -
> > > -		obj->has_aliasing_ppgtt_mapping = 1;
> > > -	}
> > > -
> > >  	if (entry->offset != vma->node.start) {
> > >  		entry->offset = vma->node.start;
> > >  		*need_reloc = true;
> > > @@ -515,9 +509,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
> > >  		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
> > >  	}
> > >  
> > > -	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
> > > -	    !obj->has_global_gtt_mapping)
> > > -		i915_gem_gtt_bind_object(obj, obj->cache_level);
> > > +	vma->vm->bind_vma(vma, obj->cache_level, flags);
> > 
> > This results in us rebinding into the vma unconditionally once/twice for
> > every object on every execbuffer, right?
> 
> Hmm, how does this suit you (untested)? I think it is addressing your concern.
> Also, I suspect squashing this patch with the previous is probably a good thing
> to do.

That looks like it should do the trick. However, would it not be cleaner
to move the vma->vm->bind_vma() into the pin()?
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a67b9a0..8dfe6e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2083,17 +2083,8 @@  int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 
 /* i915_gem_gtt.c */
 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-			    struct drm_i915_gem_object *obj,
-			    enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-			      struct drm_i915_gem_object *obj);
-
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-				enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5796e31..3cd6c21 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2696,12 +2696,8 @@  int i915_vma_unbind(struct i915_vma *vma)
 
 	trace_i915_vma_unbind(vma);
 
-	if (obj->has_global_gtt_mapping)
-		i915_gem_gtt_unbind_object(obj);
-	if (obj->has_aliasing_ppgtt_mapping) {
-		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
-		obj->has_aliasing_ppgtt_mapping = 0;
-	}
+	vma->vm->unbind_vma(vma);
+
 	i915_gem_gtt_finish_object(obj);
 	i915_gem_object_unpin_pages(obj);
 
@@ -3428,7 +3424,6 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level)
 {
 	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_vma *vma;
 	int ret;
 
@@ -3467,11 +3462,8 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				return ret;
 		}
 
-		if (obj->has_global_gtt_mapping)
-			i915_gem_gtt_bind_object(obj, cache_level);
-		if (obj->has_aliasing_ppgtt_mapping)
-			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-					       obj, cache_level);
+		list_for_each_entry(vma, &obj->vma_list, vma_link)
+			vma->vm->bind_vma(vma, cache_level, 0);
 	}
 
 	list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3799,6 +3791,7 @@  i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    bool map_and_fenceable,
 		    bool nonblocking)
 {
+	const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
 	struct i915_vma *vma;
 	int ret;
 
@@ -3827,20 +3820,22 @@  i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	}
 
 	if (!i915_gem_obj_bound(obj, vm)) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
 		ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
 						 map_and_fenceable,
 						 nonblocking);
 		if (ret)
 			return ret;
 
-		if (!dev_priv->mm.aliasing_ppgtt)
-			i915_gem_gtt_bind_object(obj, obj->cache_level);
-	}
+		vma = i915_gem_obj_to_vma(obj, vm);
+		vm->bind_vma(vma, obj->cache_level, flags);
+	} else
+		vma = i915_gem_obj_to_vma(obj, vm);
 
+	/* Objects are created map and fenceable. If we bind an object
+	 * the first time, and we had aliasing PPGTT (and didn't request
+	 * GLOBAL), we'll need to do this on the second bind.*/
 	if (!obj->has_global_gtt_mapping && map_and_fenceable)
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
+		vm->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
 	obj->pin_count++;
 	obj->pin_mappable |= map_and_fenceable;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b71649a..ce62a4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -391,6 +391,7 @@  mi_set_context(struct intel_ring_buffer *ring,
 static int do_switch(struct i915_hw_context *to)
 {
 	struct intel_ring_buffer *ring = to->ring;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct i915_hw_context *from = ring->last_context;
 	u32 hw_flags = 0;
 	int ret;
@@ -415,8 +416,11 @@  static int do_switch(struct i915_hw_context *to)
 		return ret;
 	}
 
-	if (!to->obj->has_global_gtt_mapping)
-		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+	if (!to->obj->has_global_gtt_mapping) {
+		struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+							   &dev_priv->gtt.base);
+		vma->vm->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+	}
 
 	if (!to->is_initialized || is_default_context(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b26d979..bfe8cef 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -286,8 +286,9 @@  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	if (unlikely(IS_GEN6(dev) &&
 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
 	    !target_i915_obj->has_global_gtt_mapping)) {
-		i915_gem_gtt_bind_object(target_i915_obj,
-					 target_i915_obj->cache_level);
+		struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+		vma->vm->bind_vma(vma, target_i915_obj->cache_level,
+				 GLOBAL_BIND);
 	}
 
 	/* Validate that the target is in a valid r/w GPU domain */
@@ -464,11 +465,12 @@  i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 				struct intel_ring_buffer *ring,
 				bool *need_reloc)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_object *obj = vma->obj;
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool need_fence, need_mappable;
-	struct drm_i915_gem_object *obj = vma->obj;
+	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
+		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
 	int ret;
 
 	need_fence =
@@ -497,14 +499,6 @@  i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 		}
 	}
 
-	/* Ensure ppgtt mapping exists if needed */
-	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-				       obj, obj->cache_level);
-
-		obj->has_aliasing_ppgtt_mapping = 1;
-	}
-
 	if (entry->offset != vma->node.start) {
 		entry->offset = vma->node.start;
 		*need_reloc = true;
@@ -515,9 +509,7 @@  i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
 	}
 
-	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
-	    !obj->has_global_gtt_mapping)
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
+	vma->vm->bind_vma(vma, obj->cache_level, flags);
 
 	return 0;
 }
@@ -1117,8 +1109,11 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but let's be paranoid and do it
 	 * unconditionally for now. */
-	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
-		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+	if (flags & I915_DISPATCH_SECURE &&
+	    !batch_obj->has_global_gtt_mapping) {
+		struct i915_vma *vma = i915_gem_obj_to_vma(batch_obj, vm);
+		vm->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+	}
 
 	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1d4e430..714f836 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -437,15 +437,6 @@  void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 	dev_priv->mm.aliasing_ppgtt = NULL;
 }
 
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-			    struct drm_i915_gem_object *obj,
-			    enum i915_cache_level cache_level)
-{
-	ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
-				   i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-				   cache_level);
-}
-
 static void __always_unused
 gen6_ppgtt_bind_vma(struct i915_vma *vma,
 		    enum i915_cache_level cache_level,
@@ -458,14 +449,6 @@  gen6_ppgtt_bind_vma(struct i915_vma *vma,
 	gen6_ppgtt_insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
 }
 
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-			      struct drm_i915_gem_object *obj)
-{
-	ppgtt->base.clear_range(&ppgtt->base,
-				i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-				obj->base.size >> PAGE_SHIFT);
-}
-
 static void __always_unused gen6_ppgtt_unbind_vma(struct i915_vma *vma)
 {
 	const unsigned long entry = vma->node.start >> PAGE_SHIFT;
@@ -523,8 +506,10 @@  void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 				       dev_priv->gtt.base.total / PAGE_SIZE);
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+							   &dev_priv->gtt.base);
 		i915_gem_clflush_object(obj, obj->pin_display);
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
+		vma->vm->bind_vma(vma, obj->cache_level, 0);
 	}
 
 	i915_gem_chipset_flush(dev);
@@ -677,33 +662,6 @@  static void gen6_ggtt_bind_vma(struct i915_vma *vma,
 	}
 }
 
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-			      enum i915_cache_level cache_level)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-	dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
-					  entry,
-					  cache_level);
-
-	obj->has_global_gtt_mapping = 1;
-}
-
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-				       entry,
-				       obj->base.size >> PAGE_SHIFT);
-
-	obj->has_global_gtt_mapping = 0;
-}
-
 static void gen6_ggtt_unbind_vma(struct i915_vma *vma)
 {
 	struct drm_device *dev = vma->vm->dev;