diff mbox

[3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object

Message ID 1458335784-1773-3-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson March 18, 2016, 9:16 p.m. UTC
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
 drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
 drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
 drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
 drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
 10 files changed, 46 insertions(+), 51 deletions(-)

Comments

Daniel Vetter March 21, 2016, 9:47 a.m. UTC | #1
On Fri, Mar 18, 2016 at 09:16:21PM +0000, Chris Wilson wrote:
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

I'm not sure this is too much magic ... If it gathers raving applaus and
support from others then I'm ok ;-)
-Daniel
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
>  drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
>  drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
>  drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
>  drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
>  drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
>  drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
>  drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
>  10 files changed, 46 insertions(+), 51 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index e0ba3e38000f..33ddcdf6d046 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>  static void
>  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>  	struct intel_engine_cs *engine;
>  	struct i915_vma *vma;
>  	int pin_count = 0;
> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>  		   obj->base.size / 1024,
>  		   obj->base.read_domains,
>  		   obj->base.write_domain);
> -	for_each_engine(engine, dev_priv, i)
> +	for_each_engine(engine, obj, i)
>  		seq_printf(m, "%x ",
>  				i915_gem_request_get_seqno(obj->last_read_req[i]));
>  	seq_printf(m, "] %x %x%s%s%s",
>  		   i915_gem_request_get_seqno(obj->last_write_req),
>  		   i915_gem_request_get_seqno(obj->last_fenced_req),
> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
>  		   obj->dirty ? " dirty" : "",
>  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>  	if (obj->base.name)
> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>  	if (obj->base.name || obj->base.dma_buf)
>  		stats->shared += obj->base.size;
>  
> -	if (USES_FULL_PPGTT(obj->base.dev)) {
> +	if (USES_FULL_PPGTT(obj)) {
>  		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>  			struct i915_hw_ppgtt *ppgtt;
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 0c9fe00d3e83..92365f047e53 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
>  };
>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>  
> +static inline struct drm_i915_private *
> +__obj_to_i915(const struct drm_i915_gem_object *obj)
> +{
> +	return __to_i915(obj->base.dev);
> +}
> +
>  void i915_gem_track_fb(struct drm_i915_gem_object *old,
>  		       struct drm_i915_gem_object *new,
>  		       unsigned frontbuffer_bits);
> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
>  		__p = (struct drm_i915_private *)p; \
>  	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
>  		__p = __to_i915((struct drm_device *)p); \
> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
>  	else \
>  		BUILD_BUG(); \
>  	__p; \
> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
>  bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
>  
>  /* Some GGTT VM helpers */
> -#define i915_obj_to_ggtt(obj) \
> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
>  
>  static inline struct i915_hw_ppgtt *
>  i915_vm_to_ppgtt(struct i915_address_space *vm)
> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
>  /* i915_gem_tiling.c */
>  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -
> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>  		obj->tiling_mode != I915_TILING_NONE;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 8588c83abb35..710a6bbc985e 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -361,14 +361,12 @@ out:
>  
>  void *i915_gem_object_alloc(struct drm_device *dev)
>  {
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
>  }
>  
>  void i915_gem_object_free(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -	kmem_cache_free(dev_priv->objects, obj);
> +	kmem_cache_free(to_i915(obj)->objects, obj);
>  }
>  
>  static int
> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
>  
>  static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int ret;
>  
>  	if (drm_vma_node_has_offset(&obj->base.vma_node))
> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>  static int
>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int page_count, i;
>  	struct address_space *mapping;
>  	struct sg_table *st;
> @@ -2372,7 +2370,7 @@ err_pages:
>  int
>  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	const struct drm_i915_gem_object_ops *ops = obj->ops;
>  	int ret;
>  
> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>  	 * so that we don't steal from recently used but inactive objects
>  	 * (unless we are forced to ofc!)
>  	 */
> -	list_move_tail(&obj->global_list,
> -		       &to_i915(obj->base.dev)->mm.bound_list);
> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
>  
>  	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>  		if (!list_empty(&vma->vm_link))
> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  		return 0;
>  
>  	if (!i915_semaphore_is_enabled(obj->base.dev)) {
> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +		struct drm_i915_private *i915 = to_i915(obj);
>  		ret = __i915_wait_request(from_req,
>  					  atomic_read(&i915->gpu_error.reset_counter),
>  					  i915->mm.interruptible,
> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
>  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>  {
>  	struct drm_i915_gem_object *obj = vma->obj;
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int ret;
>  
>  	if (list_empty(&vma->obj_link))
> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>  	vma = i915_gem_obj_to_ggtt(obj);
>  	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
>  		list_move_tail(&vma->vm_link,
> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
> +			       &to_i915(obj)->ggtt.base.inactive_list);
>  
>  	return 0;
>  }
> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>  	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
>  	 */
>  	ret = i915_gem_object_set_cache_level(obj,
> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
>  	if (ret)
>  		goto err_unpin_display;
>  
> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>  		     (vma->node.start & (fence_alignment - 1)) == 0);
>  
>  	mappable = (vma->node.start + fence_size <=
> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
> +		    to_i915(obj)->ggtt.mappable_end);
>  
>  	obj->map_and_fenceable = mappable && fenceable;
>  }
> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>  		       uint32_t alignment,
>  		       uint64_t flags)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct i915_vma *vma;
>  	unsigned bound;
>  	int ret;
> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>  	obj->fence_reg = I915_FENCE_REG_NONE;
>  	obj->madv = I915_MADV_WILLNEED;
>  
> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
>  }
>  
>  static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
>  void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  {
>  	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct i915_vma *vma, *next;
>  
>  	intel_runtime_pm_get(dev_priv);
> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>  
>  	list_del(&vma->obj_link);
>  
> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
>  }
>  
>  static void
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 374a0cb7a092..39ed403b9de3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
>  
>  static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
>  {
> -	return (HAS_LLC(obj->base.dev) ||
> +	return (HAS_LLC(obj) ||
>  		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
>  		obj->cache_level != I915_CACHE_NONE);
>  }
> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>  		   struct drm_i915_gem_relocation_entry *reloc,
>  		   uint64_t target_offset)
>  {
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	uint64_t delta = relocation_target(reloc, target_offset);
>  	uint64_t offset;
>  	void __iomem *reloc_page;
> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>  					      offset & PAGE_MASK);
>  	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
>  
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>  		offset += sizeof(uint32_t);
>  
>  		if (offset_in_page(offset) == 0) {
> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>  		       struct drm_i915_gem_relocation_entry *reloc,
>  		       uint64_t target_offset)
>  {
> -	struct drm_device *dev = obj->base.dev;
>  	uint32_t page_offset = offset_in_page(reloc->offset);
>  	uint64_t delta = relocation_target(reloc, target_offset);
>  	char *vaddr;
> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>  				reloc->offset >> PAGE_SHIFT));
>  	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
>  
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>  		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
>  
>  		if (page_offset == 0) {
> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>  				   struct eb_vmas *eb,
>  				   struct drm_i915_gem_relocation_entry *reloc)
>  {
> -	struct drm_device *dev = obj->base.dev;
>  	struct drm_gem_object *target_obj;
>  	struct drm_i915_gem_object *target_i915_obj;
>  	struct i915_vma *target_vma;
> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>  	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
>  	 * pipe_control writes because the gpu doesn't properly redirect them
>  	 * through the ppgtt for non_secure batchbuffers. */
> -	if (unlikely(IS_GEN6(dev) &&
> +	if (unlikely(IS_GEN6(obj) &&
>  	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
>  		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
>  				    PIN_GLOBAL);
> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>  
>  	/* Check that the relocation address is valid... */
>  	if (unlikely(reloc->offset >
> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
>  		DRM_DEBUG("Relocation beyond object bounds: "
>  			  "obj %p target %d offset %d size %d.\n",
>  			  obj, reloc->target_handle,
> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
>  		return false;
>  
>  	/* See also use_cpu_reloc() */
> -	if (HAS_LLC(vma->obj->base.dev))
> +	if (HAS_LLC(vma->obj))
>  		return false;
>  
>  	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> index 598198543dcd..1ef75bc2220c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
>  					 struct drm_i915_fence_reg *fence,
>  					 bool enable)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int reg = fence_number(dev_priv, fence);
>  
>  	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
> @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
>  int
>  i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct drm_i915_fence_reg *fence;
>  	int ret;
>  
> @@ -433,7 +433,7 @@ bool
>  i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
>  {
>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +		struct drm_i915_private *dev_priv = to_i915(obj);
>  		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
>  
>  		WARN_ON(!ggtt_vma ||
> @@ -457,7 +457,7 @@ void
>  i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
>  {
>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +		struct drm_i915_private *dev_priv = to_i915(obj);
>  		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
>  		dev_priv->fence_regs[obj->fence_reg].pin_count--;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 0715bb74d306..6447a5f9661e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -3305,7 +3305,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
>  		return ERR_PTR(-EINVAL);
>  
> -	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
> +	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
>  	if (vma == NULL)
>  		return ERR_PTR(-ENOMEM);
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index de891c928b2f..224389d077c7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -540,10 +540,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
>  static void
>  i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -
>  	if (obj->stolen) {
> -		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
> +		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
>  		kfree(obj->stolen);
>  		obj->stolen = NULL;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index 7410f6c962e7..bc4cb7f4fe80 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
>  	if (tiling_mode == I915_TILING_NONE)
>  		return true;
>  
> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
> +	if (INTEL_INFO(obj)->gen >= 4)
>  		return true;
>  
> -	if (INTEL_INFO(obj->base.dev)->gen == 3) {
> +	if (INTEL_INFO(obj)->gen == 3) {
>  		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
>  			return false;
>  	} else {
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index 54088a4d6498..f0e3ade59177 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -303,7 +303,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
>  static int
>  i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct i915_mm_struct *mm;
>  	int ret = 0;
>  
> @@ -376,7 +376,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
>  
>  	kref_put_mutex(&obj->userptr.mm->kref,
>  		       __i915_mm_struct_free,
> -		       &to_i915(obj->base.dev)->mm_lock);
> +		       &to_i915(obj)->mm_lock);
>  	obj->userptr.mm = NULL;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index df0ef5bba8e5..f981bddc9bbf 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2069,7 +2069,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>  
>  void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
>  {
> -	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
> +	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
>  		vunmap(ringbuf->virtual_start);
>  	else
>  		iounmap(ringbuf->virtual_start);
> -- 
> 2.8.0.rc3
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Tvrtko Ursulin March 21, 2016, 9:55 a.m. UTC | #2
On 18/03/16 21:16, Chris Wilson wrote:
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
>   drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
>   drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
>   drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
>   drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
>   drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
>   drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
>   drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
>   drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
>   10 files changed, 46 insertions(+), 51 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index e0ba3e38000f..33ddcdf6d046 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>   static void
>   describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>   	struct intel_engine_cs *engine;
>   	struct i915_vma *vma;
>   	int pin_count = 0;
> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   		   obj->base.size / 1024,
>   		   obj->base.read_domains,
>   		   obj->base.write_domain);
> -	for_each_engine(engine, dev_priv, i)
> +	for_each_engine(engine, obj, i)
>   		seq_printf(m, "%x ",
>   				i915_gem_request_get_seqno(obj->last_read_req[i]));
>   	seq_printf(m, "] %x %x%s%s%s",
>   		   i915_gem_request_get_seqno(obj->last_write_req),
>   		   i915_gem_request_get_seqno(obj->last_fenced_req),
> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
>   		   obj->dirty ? " dirty" : "",
>   		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>   	if (obj->base.name)
> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>   	if (obj->base.name || obj->base.dma_buf)
>   		stats->shared += obj->base.size;
>
> -	if (USES_FULL_PPGTT(obj->base.dev)) {
> +	if (USES_FULL_PPGTT(obj)) {
>   		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   			struct i915_hw_ppgtt *ppgtt;
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 0c9fe00d3e83..92365f047e53 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
>   };
>   #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>
> +static inline struct drm_i915_private *
> +__obj_to_i915(const struct drm_i915_gem_object *obj)
> +{
> +	return __to_i915(obj->base.dev);
> +}
> +
>   void i915_gem_track_fb(struct drm_i915_gem_object *old,
>   		       struct drm_i915_gem_object *new,
>   		       unsigned frontbuffer_bits);
> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
>   		__p = (struct drm_i915_private *)p; \
>   	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
>   		__p = __to_i915((struct drm_device *)p); \
> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
>   	else \
>   		BUILD_BUG(); \
>   	__p; \
> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
>   bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
>
>   /* Some GGTT VM helpers */
> -#define i915_obj_to_ggtt(obj) \
> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
>
>   static inline struct i915_hw_ppgtt *
>   i915_vm_to_ppgtt(struct i915_address_space *vm)
> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
>   /* i915_gem_tiling.c */
>   static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -
> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>   		obj->tiling_mode != I915_TILING_NONE;
>   }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 8588c83abb35..710a6bbc985e 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -361,14 +361,12 @@ out:
>
>   void *i915_gem_object_alloc(struct drm_device *dev)
>   {
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
>   }
>
>   void i915_gem_object_free(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -	kmem_cache_free(dev_priv->objects, obj);
> +	kmem_cache_free(to_i915(obj)->objects, obj);
>   }
>
>   static int
> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
>
>   static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	int ret;
>
>   	if (drm_vma_node_has_offset(&obj->base.vma_node))
> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>   static int
>   i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	int page_count, i;
>   	struct address_space *mapping;
>   	struct sg_table *st;
> @@ -2372,7 +2370,7 @@ err_pages:
>   int
>   i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	const struct drm_i915_gem_object_ops *ops = obj->ops;
>   	int ret;
>
> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>   	 * so that we don't steal from recently used but inactive objects
>   	 * (unless we are forced to ofc!)
>   	 */
> -	list_move_tail(&obj->global_list,
> -		       &to_i915(obj->base.dev)->mm.bound_list);
> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
>
>   	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   		if (!list_empty(&vma->vm_link))
> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>   		return 0;
>
>   	if (!i915_semaphore_is_enabled(obj->base.dev)) {
> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +		struct drm_i915_private *i915 = to_i915(obj);
>   		ret = __i915_wait_request(from_req,
>   					  atomic_read(&i915->gpu_error.reset_counter),
>   					  i915->mm.interruptible,
> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
>   static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>   {
>   	struct drm_i915_gem_object *obj = vma->obj;
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	int ret;
>
>   	if (list_empty(&vma->obj_link))
> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>   	vma = i915_gem_obj_to_ggtt(obj);
>   	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
>   		list_move_tail(&vma->vm_link,
> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
> +			       &to_i915(obj)->ggtt.base.inactive_list);
>
>   	return 0;
>   }
> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>   	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
>   	 */
>   	ret = i915_gem_object_set_cache_level(obj,
> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
>   	if (ret)
>   		goto err_unpin_display;
>
> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>   		     (vma->node.start & (fence_alignment - 1)) == 0);
>
>   	mappable = (vma->node.start + fence_size <=
> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
> +		    to_i915(obj)->ggtt.mappable_end);
>
>   	obj->map_and_fenceable = mappable && fenceable;
>   }
> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>   		       uint32_t alignment,
>   		       uint64_t flags)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	struct i915_vma *vma;
>   	unsigned bound;
>   	int ret;
> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>   	obj->fence_reg = I915_FENCE_REG_NONE;
>   	obj->madv = I915_MADV_WILLNEED;
>
> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
>   }
>
>   static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
>   void i915_gem_free_object(struct drm_gem_object *gem_obj)
>   {
>   	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	struct i915_vma *vma, *next;
>
>   	intel_runtime_pm_get(dev_priv);
> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>
>   	list_del(&vma->obj_link);
>
> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
>   }
>
>   static void
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 374a0cb7a092..39ed403b9de3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
>
>   static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
>   {
> -	return (HAS_LLC(obj->base.dev) ||
> +	return (HAS_LLC(obj) ||
>   		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
>   		obj->cache_level != I915_CACHE_NONE);
>   }
> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>   		   struct drm_i915_gem_relocation_entry *reloc,
>   		   uint64_t target_offset)
>   {
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	uint64_t delta = relocation_target(reloc, target_offset);
>   	uint64_t offset;
>   	void __iomem *reloc_page;
> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>   					      offset & PAGE_MASK);
>   	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
>
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>   		offset += sizeof(uint32_t);
>
>   		if (offset_in_page(offset) == 0) {
> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>   		       struct drm_i915_gem_relocation_entry *reloc,
>   		       uint64_t target_offset)
>   {
> -	struct drm_device *dev = obj->base.dev;
>   	uint32_t page_offset = offset_in_page(reloc->offset);
>   	uint64_t delta = relocation_target(reloc, target_offset);
>   	char *vaddr;
> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>   				reloc->offset >> PAGE_SHIFT));
>   	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
>
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>   		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
>
>   		if (page_offset == 0) {
> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>   				   struct eb_vmas *eb,
>   				   struct drm_i915_gem_relocation_entry *reloc)
>   {
> -	struct drm_device *dev = obj->base.dev;
>   	struct drm_gem_object *target_obj;
>   	struct drm_i915_gem_object *target_i915_obj;
>   	struct i915_vma *target_vma;
> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>   	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
>   	 * pipe_control writes because the gpu doesn't properly redirect them
>   	 * through the ppgtt for non_secure batchbuffers. */
> -	if (unlikely(IS_GEN6(dev) &&
> +	if (unlikely(IS_GEN6(obj) &&
>   	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
>   		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
>   				    PIN_GLOBAL);
> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>
>   	/* Check that the relocation address is valid... */
>   	if (unlikely(reloc->offset >
> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
>   		DRM_DEBUG("Relocation beyond object bounds: "
>   			  "obj %p target %d offset %d size %d.\n",
>   			  obj, reloc->target_handle,
> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
>   		return false;
>
>   	/* See also use_cpu_reloc() */
> -	if (HAS_LLC(vma->obj->base.dev))
> +	if (HAS_LLC(vma->obj))

Things like HAS_LLC(obj) and HAS_LLC(engine) are IMHO illogical and 
non-intuitive.

to_i915(various) makes sense, because any i915 object is exactly that - 
an i915 object. for_each_engine is also OK. But going further than that 
is not desirable.

Regards,

Tvrtko
Chris Wilson March 21, 2016, 10:04 a.m. UTC | #3
On Mon, Mar 21, 2016 at 09:55:10AM +0000, Tvrtko Ursulin wrote:
> >  	/* See also use_cpu_reloc() */
> >-	if (HAS_LLC(vma->obj->base.dev))
> >+	if (HAS_LLC(vma->obj))
> 
> Things like HAS_LLC(obj) and HAS_LLC(engine) are IMHO illogical and
> non-intuitive.

Does this object/engine have last-line coherency, seems to make sense to
me. The goal has been to shift these to using dev_priv, for 2 reaons, on
the hotpaths careless pointer dereferencing adds extra memory loads that
are not immediately obvious unless you are familar with the macros and
the second reason is that widespread dev -> dev_private -> dev spreads
add a few extra pages of object size just from the extra mov
instructions!

INTEL_INFO() is definitely a more subjective matter of taste, but anything
other then INTEL_INFO(i915) is a stopgap imo.
-Chris
Jani Nikula March 21, 2016, 1:01 p.m. UTC | #4
On Mon, 21 Mar 2016, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Fri, Mar 18, 2016 at 09:16:21PM +0000, Chris Wilson wrote:
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>
> I'm not sure this is too much magic ... If it gathers raving applaus and
> support from others then I'm ok ;-)

I'm not thrilled, like I said in [1].

If you guys really insist on having this, please at least make all the
*other* macros require dev_priv, and use to_i915() at the call sites.

BR,
Jani.



[1] http://mid.gmane.org/871t79hriq.fsf@intel.com

> -Daniel
>> ---
>>  drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
>>  drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
>>  drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
>>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
>>  drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
>>  drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
>>  drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
>>  drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
>>  drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
>>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
>>  10 files changed, 46 insertions(+), 51 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
>> index e0ba3e38000f..33ddcdf6d046 100644
>> --- a/drivers/gpu/drm/i915/i915_debugfs.c
>> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
>> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>>  static void
>>  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>>  	struct intel_engine_cs *engine;
>>  	struct i915_vma *vma;
>>  	int pin_count = 0;
>> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>>  		   obj->base.size / 1024,
>>  		   obj->base.read_domains,
>>  		   obj->base.write_domain);
>> -	for_each_engine(engine, dev_priv, i)
>> +	for_each_engine(engine, obj, i)
>>  		seq_printf(m, "%x ",
>>  				i915_gem_request_get_seqno(obj->last_read_req[i]));
>>  	seq_printf(m, "] %x %x%s%s%s",
>>  		   i915_gem_request_get_seqno(obj->last_write_req),
>>  		   i915_gem_request_get_seqno(obj->last_fenced_req),
>> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
>> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
>>  		   obj->dirty ? " dirty" : "",
>>  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>>  	if (obj->base.name)
>> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>>  	if (obj->base.name || obj->base.dma_buf)
>>  		stats->shared += obj->base.size;
>>  
>> -	if (USES_FULL_PPGTT(obj->base.dev)) {
>> +	if (USES_FULL_PPGTT(obj)) {
>>  		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>>  			struct i915_hw_ppgtt *ppgtt;
>>  
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>> index 0c9fe00d3e83..92365f047e53 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
>>  };
>>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>>  
>> +static inline struct drm_i915_private *
>> +__obj_to_i915(const struct drm_i915_gem_object *obj)
>> +{
>> +	return __to_i915(obj->base.dev);
>> +}
>> +
>>  void i915_gem_track_fb(struct drm_i915_gem_object *old,
>>  		       struct drm_i915_gem_object *new,
>>  		       unsigned frontbuffer_bits);
>> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
>>  		__p = (struct drm_i915_private *)p; \
>>  	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
>>  		__p = __to_i915((struct drm_device *)p); \
>> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
>> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
>>  	else \
>>  		BUILD_BUG(); \
>>  	__p; \
>> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
>>  bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
>>  
>>  /* Some GGTT VM helpers */
>> -#define i915_obj_to_ggtt(obj) \
>> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
>> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
>>  
>>  static inline struct i915_hw_ppgtt *
>>  i915_vm_to_ppgtt(struct i915_address_space *vm)
>> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
>>  /* i915_gem_tiling.c */
>>  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> -
>> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>>  		obj->tiling_mode != I915_TILING_NONE;
>>  }
>>  
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>> index 8588c83abb35..710a6bbc985e 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -361,14 +361,12 @@ out:
>>  
>>  void *i915_gem_object_alloc(struct drm_device *dev)
>>  {
>> -	struct drm_i915_private *dev_priv = dev->dev_private;
>> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
>> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
>>  }
>>  
>>  void i915_gem_object_free(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> -	kmem_cache_free(dev_priv->objects, obj);
>> +	kmem_cache_free(to_i915(obj)->objects, obj);
>>  }
>>  
>>  static int
>> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
>>  
>>  static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int ret;
>>  
>>  	if (drm_vma_node_has_offset(&obj->base.vma_node))
>> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>>  static int
>>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int page_count, i;
>>  	struct address_space *mapping;
>>  	struct sg_table *st;
>> @@ -2372,7 +2370,7 @@ err_pages:
>>  int
>>  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	const struct drm_i915_gem_object_ops *ops = obj->ops;
>>  	int ret;
>>  
>> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>>  	 * so that we don't steal from recently used but inactive objects
>>  	 * (unless we are forced to ofc!)
>>  	 */
>> -	list_move_tail(&obj->global_list,
>> -		       &to_i915(obj->base.dev)->mm.bound_list);
>> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
>>  
>>  	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>>  		if (!list_empty(&vma->vm_link))
>> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>>  		return 0;
>>  
>>  	if (!i915_semaphore_is_enabled(obj->base.dev)) {
>> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
>> +		struct drm_i915_private *i915 = to_i915(obj);
>>  		ret = __i915_wait_request(from_req,
>>  					  atomic_read(&i915->gpu_error.reset_counter),
>>  					  i915->mm.interruptible,
>> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
>>  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>>  {
>>  	struct drm_i915_gem_object *obj = vma->obj;
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int ret;
>>  
>>  	if (list_empty(&vma->obj_link))
>> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>>  	vma = i915_gem_obj_to_ggtt(obj);
>>  	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
>>  		list_move_tail(&vma->vm_link,
>> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
>> +			       &to_i915(obj)->ggtt.base.inactive_list);
>>  
>>  	return 0;
>>  }
>> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>>  	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
>>  	 */
>>  	ret = i915_gem_object_set_cache_level(obj,
>> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
>> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
>>  	if (ret)
>>  		goto err_unpin_display;
>>  
>> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>>  		     (vma->node.start & (fence_alignment - 1)) == 0);
>>  
>>  	mappable = (vma->node.start + fence_size <=
>> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
>> +		    to_i915(obj)->ggtt.mappable_end);
>>  
>>  	obj->map_and_fenceable = mappable && fenceable;
>>  }
>> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>>  		       uint32_t alignment,
>>  		       uint64_t flags)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct i915_vma *vma;
>>  	unsigned bound;
>>  	int ret;
>> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>>  	obj->fence_reg = I915_FENCE_REG_NONE;
>>  	obj->madv = I915_MADV_WILLNEED;
>>  
>> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
>> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
>>  }
>>  
>>  static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
>> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
>>  void i915_gem_free_object(struct drm_gem_object *gem_obj)
>>  {
>>  	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
>> -	struct drm_device *dev = obj->base.dev;
>> -	struct drm_i915_private *dev_priv = dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct i915_vma *vma, *next;
>>  
>>  	intel_runtime_pm_get(dev_priv);
>> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>>  
>>  	list_del(&vma->obj_link);
>>  
>> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
>> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
>>  }
>>  
>>  static void
>> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> index 374a0cb7a092..39ed403b9de3 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
>>  
>>  static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
>>  {
>> -	return (HAS_LLC(obj->base.dev) ||
>> +	return (HAS_LLC(obj) ||
>>  		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
>>  		obj->cache_level != I915_CACHE_NONE);
>>  }
>> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>>  		   struct drm_i915_gem_relocation_entry *reloc,
>>  		   uint64_t target_offset)
>>  {
>> -	struct drm_device *dev = obj->base.dev;
>> -	struct drm_i915_private *dev_priv = dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	uint64_t delta = relocation_target(reloc, target_offset);
>>  	uint64_t offset;
>>  	void __iomem *reloc_page;
>> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>>  					      offset & PAGE_MASK);
>>  	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
>>  
>> -	if (INTEL_INFO(dev)->gen >= 8) {
>> +	if (INTEL_INFO(obj)->gen >= 8) {
>>  		offset += sizeof(uint32_t);
>>  
>>  		if (offset_in_page(offset) == 0) {
>> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>>  		       struct drm_i915_gem_relocation_entry *reloc,
>>  		       uint64_t target_offset)
>>  {
>> -	struct drm_device *dev = obj->base.dev;
>>  	uint32_t page_offset = offset_in_page(reloc->offset);
>>  	uint64_t delta = relocation_target(reloc, target_offset);
>>  	char *vaddr;
>> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>>  				reloc->offset >> PAGE_SHIFT));
>>  	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
>>  
>> -	if (INTEL_INFO(dev)->gen >= 8) {
>> +	if (INTEL_INFO(obj)->gen >= 8) {
>>  		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
>>  
>>  		if (page_offset == 0) {
>> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>>  				   struct eb_vmas *eb,
>>  				   struct drm_i915_gem_relocation_entry *reloc)
>>  {
>> -	struct drm_device *dev = obj->base.dev;
>>  	struct drm_gem_object *target_obj;
>>  	struct drm_i915_gem_object *target_i915_obj;
>>  	struct i915_vma *target_vma;
>> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>>  	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
>>  	 * pipe_control writes because the gpu doesn't properly redirect them
>>  	 * through the ppgtt for non_secure batchbuffers. */
>> -	if (unlikely(IS_GEN6(dev) &&
>> +	if (unlikely(IS_GEN6(obj) &&
>>  	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
>>  		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
>>  				    PIN_GLOBAL);
>> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>>  
>>  	/* Check that the relocation address is valid... */
>>  	if (unlikely(reloc->offset >
>> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
>> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
>>  		DRM_DEBUG("Relocation beyond object bounds: "
>>  			  "obj %p target %d offset %d size %d.\n",
>>  			  obj, reloc->target_handle,
>> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
>>  		return false;
>>  
>>  	/* See also use_cpu_reloc() */
>> -	if (HAS_LLC(vma->obj->base.dev))
>> +	if (HAS_LLC(vma->obj))
>>  		return false;
>>  
>>  	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
>> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
>> index 598198543dcd..1ef75bc2220c 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
>> @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
>>  					 struct drm_i915_fence_reg *fence,
>>  					 bool enable)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int reg = fence_number(dev_priv, fence);
>>  
>>  	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
>> @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
>>  int
>>  i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct drm_i915_fence_reg *fence;
>>  	int ret;
>>  
>> @@ -433,7 +433,7 @@ bool
>>  i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
>>  {
>>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
>> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +		struct drm_i915_private *dev_priv = to_i915(obj);
>>  		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
>>  
>>  		WARN_ON(!ggtt_vma ||
>> @@ -457,7 +457,7 @@ void
>>  i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
>>  {
>>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
>> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +		struct drm_i915_private *dev_priv = to_i915(obj);
>>  		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
>>  		dev_priv->fence_regs[obj->fence_reg].pin_count--;
>>  	}
>> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> index 0715bb74d306..6447a5f9661e 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> @@ -3305,7 +3305,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>>  	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
>>  		return ERR_PTR(-EINVAL);
>>  
>> -	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
>> +	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
>>  	if (vma == NULL)
>>  		return ERR_PTR(-ENOMEM);
>>  
>> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> index de891c928b2f..224389d077c7 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> @@ -540,10 +540,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
>>  static void
>>  i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> -
>>  	if (obj->stolen) {
>> -		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
>> +		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
>>  		kfree(obj->stolen);
>>  		obj->stolen = NULL;
>>  	}
>> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
>> index 7410f6c962e7..bc4cb7f4fe80 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
>> @@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
>>  	if (tiling_mode == I915_TILING_NONE)
>>  		return true;
>>  
>> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
>> +	if (INTEL_INFO(obj)->gen >= 4)
>>  		return true;
>>  
>> -	if (INTEL_INFO(obj->base.dev)->gen == 3) {
>> +	if (INTEL_INFO(obj)->gen == 3) {
>>  		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
>>  			return false;
>>  	} else {
>> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> index 54088a4d6498..f0e3ade59177 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> @@ -303,7 +303,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
>>  static int
>>  i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct i915_mm_struct *mm;
>>  	int ret = 0;
>>  
>> @@ -376,7 +376,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
>>  
>>  	kref_put_mutex(&obj->userptr.mm->kref,
>>  		       __i915_mm_struct_free,
>> -		       &to_i915(obj->base.dev)->mm_lock);
>> +		       &to_i915(obj)->mm_lock);
>>  	obj->userptr.mm = NULL;
>>  }
>>  
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> index df0ef5bba8e5..f981bddc9bbf 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> @@ -2069,7 +2069,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>>  
>>  void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
>>  {
>> -	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
>> +	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
>>  		vunmap(ringbuf->virtual_start);
>>  	else
>>  		iounmap(ringbuf->virtual_start);
>> -- 
>> 2.8.0.rc3
>> 
>> _______________________________________________
>> Intel-gfx mailing list
>> Intel-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Daniel Vetter March 21, 2016, 5:44 p.m. UTC | #5
On Mon, Mar 21, 2016 at 03:01:22PM +0200, Jani Nikula wrote:
> On Mon, 21 Mar 2016, Daniel Vetter <daniel@ffwll.ch> wrote:
> > On Fri, Mar 18, 2016 at 09:16:21PM +0000, Chris Wilson wrote:
> >> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >
> > I'm not sure this is too much magic ... If it gathers raving applaus and
> > support from others then I'm ok ;-)
> 
> I'm not thrilled, like I said in [1].
> 
> If you guys really insist on having this, please at least make all the
> *other* macros require dev_priv, and use to_i915() at the call sites.

tbh personally leaning the exact same way, if someone really wants to hear
my bikeshed. I already dropped it in the first thread too, *_to_i915 seems
like a much more C like approach.
-Daniel

> 
> BR,
> Jani.
> 
> 
> 
> [1] http://mid.gmane.org/871t79hriq.fsf@intel.com
> 
> > -Daniel
> >> ---
> >>  drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
> >>  drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
> >>  drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
> >>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
> >>  drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
> >>  drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
> >>  drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
> >>  drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
> >>  drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
> >>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
> >>  10 files changed, 46 insertions(+), 51 deletions(-)
> >> 
> >> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> >> index e0ba3e38000f..33ddcdf6d046 100644
> >> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> >> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> >> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
> >>  static void
> >>  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> >>  	struct intel_engine_cs *engine;
> >>  	struct i915_vma *vma;
> >>  	int pin_count = 0;
> >> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
> >>  		   obj->base.size / 1024,
> >>  		   obj->base.read_domains,
> >>  		   obj->base.write_domain);
> >> -	for_each_engine(engine, dev_priv, i)
> >> +	for_each_engine(engine, obj, i)
> >>  		seq_printf(m, "%x ",
> >>  				i915_gem_request_get_seqno(obj->last_read_req[i]));
> >>  	seq_printf(m, "] %x %x%s%s%s",
> >>  		   i915_gem_request_get_seqno(obj->last_write_req),
> >>  		   i915_gem_request_get_seqno(obj->last_fenced_req),
> >> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
> >> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
> >>  		   obj->dirty ? " dirty" : "",
> >>  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
> >>  	if (obj->base.name)
> >> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
> >>  	if (obj->base.name || obj->base.dma_buf)
> >>  		stats->shared += obj->base.size;
> >>  
> >> -	if (USES_FULL_PPGTT(obj->base.dev)) {
> >> +	if (USES_FULL_PPGTT(obj)) {
> >>  		list_for_each_entry(vma, &obj->vma_list, obj_link) {
> >>  			struct i915_hw_ppgtt *ppgtt;
> >>  
> >> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> >> index 0c9fe00d3e83..92365f047e53 100644
> >> --- a/drivers/gpu/drm/i915/i915_drv.h
> >> +++ b/drivers/gpu/drm/i915/i915_drv.h
> >> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
> >>  };
> >>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
> >>  
> >> +static inline struct drm_i915_private *
> >> +__obj_to_i915(const struct drm_i915_gem_object *obj)
> >> +{
> >> +	return __to_i915(obj->base.dev);
> >> +}
> >> +
> >>  void i915_gem_track_fb(struct drm_i915_gem_object *old,
> >>  		       struct drm_i915_gem_object *new,
> >>  		       unsigned frontbuffer_bits);
> >> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
> >>  		__p = (struct drm_i915_private *)p; \
> >>  	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
> >>  		__p = __to_i915((struct drm_device *)p); \
> >> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
> >> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
> >>  	else \
> >>  		BUILD_BUG(); \
> >>  	__p; \
> >> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
> >>  bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
> >>  
> >>  /* Some GGTT VM helpers */
> >> -#define i915_obj_to_ggtt(obj) \
> >> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
> >> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
> >>  
> >>  static inline struct i915_hw_ppgtt *
> >>  i915_vm_to_ppgtt(struct i915_address_space *vm)
> >> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
> >>  /* i915_gem_tiling.c */
> >>  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> -
> >> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> >> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> >>  		obj->tiling_mode != I915_TILING_NONE;
> >>  }
> >>  
> >> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> >> index 8588c83abb35..710a6bbc985e 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem.c
> >> @@ -361,14 +361,12 @@ out:
> >>  
> >>  void *i915_gem_object_alloc(struct drm_device *dev)
> >>  {
> >> -	struct drm_i915_private *dev_priv = dev->dev_private;
> >> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
> >> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
> >>  }
> >>  
> >>  void i915_gem_object_free(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> -	kmem_cache_free(dev_priv->objects, obj);
> >> +	kmem_cache_free(to_i915(obj)->objects, obj);
> >>  }
> >>  
> >>  static int
> >> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
> >>  
> >>  static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int ret;
> >>  
> >>  	if (drm_vma_node_has_offset(&obj->base.vma_node))
> >> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
> >>  static int
> >>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int page_count, i;
> >>  	struct address_space *mapping;
> >>  	struct sg_table *st;
> >> @@ -2372,7 +2370,7 @@ err_pages:
> >>  int
> >>  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	const struct drm_i915_gem_object_ops *ops = obj->ops;
> >>  	int ret;
> >>  
> >> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
> >>  	 * so that we don't steal from recently used but inactive objects
> >>  	 * (unless we are forced to ofc!)
> >>  	 */
> >> -	list_move_tail(&obj->global_list,
> >> -		       &to_i915(obj->base.dev)->mm.bound_list);
> >> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
> >>  
> >>  	list_for_each_entry(vma, &obj->vma_list, obj_link) {
> >>  		if (!list_empty(&vma->vm_link))
> >> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
> >>  		return 0;
> >>  
> >>  	if (!i915_semaphore_is_enabled(obj->base.dev)) {
> >> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> >> +		struct drm_i915_private *i915 = to_i915(obj);
> >>  		ret = __i915_wait_request(from_req,
> >>  					  atomic_read(&i915->gpu_error.reset_counter),
> >>  					  i915->mm.interruptible,
> >> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
> >>  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
> >>  {
> >>  	struct drm_i915_gem_object *obj = vma->obj;
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int ret;
> >>  
> >>  	if (list_empty(&vma->obj_link))
> >> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
> >>  	vma = i915_gem_obj_to_ggtt(obj);
> >>  	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
> >>  		list_move_tail(&vma->vm_link,
> >> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
> >> +			       &to_i915(obj)->ggtt.base.inactive_list);
> >>  
> >>  	return 0;
> >>  }
> >> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
> >>  	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
> >>  	 */
> >>  	ret = i915_gem_object_set_cache_level(obj,
> >> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
> >> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
> >>  	if (ret)
> >>  		goto err_unpin_display;
> >>  
> >> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
> >>  		     (vma->node.start & (fence_alignment - 1)) == 0);
> >>  
> >>  	mappable = (vma->node.start + fence_size <=
> >> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
> >> +		    to_i915(obj)->ggtt.mappable_end);
> >>  
> >>  	obj->map_and_fenceable = mappable && fenceable;
> >>  }
> >> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
> >>  		       uint32_t alignment,
> >>  		       uint64_t flags)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct i915_vma *vma;
> >>  	unsigned bound;
> >>  	int ret;
> >> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
> >>  	obj->fence_reg = I915_FENCE_REG_NONE;
> >>  	obj->madv = I915_MADV_WILLNEED;
> >>  
> >> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
> >> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
> >>  }
> >>  
> >>  static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
> >> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
> >>  void i915_gem_free_object(struct drm_gem_object *gem_obj)
> >>  {
> >>  	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> >> -	struct drm_device *dev = obj->base.dev;
> >> -	struct drm_i915_private *dev_priv = dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct i915_vma *vma, *next;
> >>  
> >>  	intel_runtime_pm_get(dev_priv);
> >> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
> >>  
> >>  	list_del(&vma->obj_link);
> >>  
> >> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> >> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
> >>  }
> >>  
> >>  static void
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> >> index 374a0cb7a092..39ed403b9de3 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> >> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
> >>  
> >>  static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
> >>  {
> >> -	return (HAS_LLC(obj->base.dev) ||
> >> +	return (HAS_LLC(obj) ||
> >>  		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
> >>  		obj->cache_level != I915_CACHE_NONE);
> >>  }
> >> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
> >>  		   struct drm_i915_gem_relocation_entry *reloc,
> >>  		   uint64_t target_offset)
> >>  {
> >> -	struct drm_device *dev = obj->base.dev;
> >> -	struct drm_i915_private *dev_priv = dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	uint64_t delta = relocation_target(reloc, target_offset);
> >>  	uint64_t offset;
> >>  	void __iomem *reloc_page;
> >> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
> >>  					      offset & PAGE_MASK);
> >>  	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
> >>  
> >> -	if (INTEL_INFO(dev)->gen >= 8) {
> >> +	if (INTEL_INFO(obj)->gen >= 8) {
> >>  		offset += sizeof(uint32_t);
> >>  
> >>  		if (offset_in_page(offset) == 0) {
> >> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
> >>  		       struct drm_i915_gem_relocation_entry *reloc,
> >>  		       uint64_t target_offset)
> >>  {
> >> -	struct drm_device *dev = obj->base.dev;
> >>  	uint32_t page_offset = offset_in_page(reloc->offset);
> >>  	uint64_t delta = relocation_target(reloc, target_offset);
> >>  	char *vaddr;
> >> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
> >>  				reloc->offset >> PAGE_SHIFT));
> >>  	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
> >>  
> >> -	if (INTEL_INFO(dev)->gen >= 8) {
> >> +	if (INTEL_INFO(obj)->gen >= 8) {
> >>  		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
> >>  
> >>  		if (page_offset == 0) {
> >> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> >>  				   struct eb_vmas *eb,
> >>  				   struct drm_i915_gem_relocation_entry *reloc)
> >>  {
> >> -	struct drm_device *dev = obj->base.dev;
> >>  	struct drm_gem_object *target_obj;
> >>  	struct drm_i915_gem_object *target_i915_obj;
> >>  	struct i915_vma *target_vma;
> >> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> >>  	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
> >>  	 * pipe_control writes because the gpu doesn't properly redirect them
> >>  	 * through the ppgtt for non_secure batchbuffers. */
> >> -	if (unlikely(IS_GEN6(dev) &&
> >> +	if (unlikely(IS_GEN6(obj) &&
> >>  	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
> >>  		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
> >>  				    PIN_GLOBAL);
> >> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> >>  
> >>  	/* Check that the relocation address is valid... */
> >>  	if (unlikely(reloc->offset >
> >> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
> >> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
> >>  		DRM_DEBUG("Relocation beyond object bounds: "
> >>  			  "obj %p target %d offset %d size %d.\n",
> >>  			  obj, reloc->target_handle,
> >> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
> >>  		return false;
> >>  
> >>  	/* See also use_cpu_reloc() */
> >> -	if (HAS_LLC(vma->obj->base.dev))
> >> +	if (HAS_LLC(vma->obj))
> >>  		return false;
> >>  
> >>  	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> >> index 598198543dcd..1ef75bc2220c 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> >> @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
> >>  					 struct drm_i915_fence_reg *fence,
> >>  					 bool enable)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int reg = fence_number(dev_priv, fence);
> >>  
> >>  	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
> >> @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
> >>  int
> >>  i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct drm_i915_fence_reg *fence;
> >>  	int ret;
> >>  
> >> @@ -433,7 +433,7 @@ bool
> >>  i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
> >>  {
> >>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> >> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +		struct drm_i915_private *dev_priv = to_i915(obj);
> >>  		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
> >>  
> >>  		WARN_ON(!ggtt_vma ||
> >> @@ -457,7 +457,7 @@ void
> >>  i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
> >>  {
> >>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> >> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +		struct drm_i915_private *dev_priv = to_i915(obj);
> >>  		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
> >>  		dev_priv->fence_regs[obj->fence_reg].pin_count--;
> >>  	}
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> >> index 0715bb74d306..6447a5f9661e 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> >> @@ -3305,7 +3305,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
> >>  	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
> >>  		return ERR_PTR(-EINVAL);
> >>  
> >> -	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
> >> +	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
> >>  	if (vma == NULL)
> >>  		return ERR_PTR(-ENOMEM);
> >>  
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> >> index de891c928b2f..224389d077c7 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> >> @@ -540,10 +540,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
> >>  static void
> >>  i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> -
> >>  	if (obj->stolen) {
> >> -		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
> >> +		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
> >>  		kfree(obj->stolen);
> >>  		obj->stolen = NULL;
> >>  	}
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> >> index 7410f6c962e7..bc4cb7f4fe80 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> >> @@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
> >>  	if (tiling_mode == I915_TILING_NONE)
> >>  		return true;
> >>  
> >> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
> >> +	if (INTEL_INFO(obj)->gen >= 4)
> >>  		return true;
> >>  
> >> -	if (INTEL_INFO(obj->base.dev)->gen == 3) {
> >> +	if (INTEL_INFO(obj)->gen == 3) {
> >>  		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
> >>  			return false;
> >>  	} else {
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> >> index 54088a4d6498..f0e3ade59177 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> >> @@ -303,7 +303,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
> >>  static int
> >>  i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct i915_mm_struct *mm;
> >>  	int ret = 0;
> >>  
> >> @@ -376,7 +376,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
> >>  
> >>  	kref_put_mutex(&obj->userptr.mm->kref,
> >>  		       __i915_mm_struct_free,
> >> -		       &to_i915(obj->base.dev)->mm_lock);
> >> +		       &to_i915(obj)->mm_lock);
> >>  	obj->userptr.mm = NULL;
> >>  }
> >>  
> >> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> index df0ef5bba8e5..f981bddc9bbf 100644
> >> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> @@ -2069,7 +2069,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
> >>  
> >>  void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> >>  {
> >> -	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
> >> +	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
> >>  		vunmap(ringbuf->virtual_start);
> >>  	else
> >>  		iounmap(ringbuf->virtual_start);
> >> -- 
> >> 2.8.0.rc3
> >> 
> >> _______________________________________________
> >> Intel-gfx mailing list
> >> Intel-gfx@lists.freedesktop.org
> >> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Jani Nikula, Intel Open Source Technology Center
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0ba3e38000f..33ddcdf6d046 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -128,7 +128,6 @@  static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
 	int pin_count = 0;
@@ -143,13 +142,13 @@  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain);
-	for_each_engine(engine, dev_priv, i)
+	for_each_engine(engine, obj, i)
 		seq_printf(m, "%x ",
 				i915_gem_request_get_seqno(obj->last_read_req[i]));
 	seq_printf(m, "] %x %x%s%s%s",
 		   i915_gem_request_get_seqno(obj->last_write_req),
 		   i915_gem_request_get_seqno(obj->last_fenced_req),
-		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+		   i915_cache_level_str(to_i915(obj), obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -339,7 +338,7 @@  static int per_file_stats(int id, void *ptr, void *data)
 	if (obj->base.name || obj->base.dma_buf)
 		stats->shared += obj->base.size;
 
-	if (USES_FULL_PPGTT(obj->base.dev)) {
+	if (USES_FULL_PPGTT(obj)) {
 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			struct i915_hw_ppgtt *ppgtt;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0c9fe00d3e83..92365f047e53 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2186,6 +2186,12 @@  struct drm_i915_gem_object {
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
+static inline struct drm_i915_private *
+__obj_to_i915(const struct drm_i915_gem_object *obj)
+{
+	return __to_i915(obj->base.dev);
+}
+
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits);
@@ -2455,6 +2461,8 @@  struct drm_i915_cmd_table {
 		__p = (struct drm_i915_private *)p; \
 	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
 		__p = __to_i915((struct drm_device *)p); \
+	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
+		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
 	else \
 		BUILD_BUG(); \
 	__p; \
@@ -3132,8 +3140,7 @@  i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 
 /* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
-	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
+#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
 
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
@@ -3282,9 +3289,7 @@  void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
-	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8588c83abb35..710a6bbc985e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -361,14 +361,12 @@  out:
 
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
+	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	kmem_cache_free(dev_priv->objects, obj);
+	kmem_cache_free(to_i915(obj)->objects, obj);
 }
 
 static int
@@ -2028,7 +2026,7 @@  i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int ret;
 
 	if (drm_vma_node_has_offset(&obj->base.vma_node))
@@ -2241,7 +2239,7 @@  i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int page_count, i;
 	struct address_space *mapping;
 	struct sg_table *st;
@@ -2372,7 +2370,7 @@  err_pages:
 int
 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	const struct drm_i915_gem_object_ops *ops = obj->ops;
 	int ret;
 
@@ -2449,8 +2447,7 @@  i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 */
-	list_move_tail(&obj->global_list,
-		       &to_i915(obj->base.dev)->mm.bound_list);
+	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!list_empty(&vma->vm_link))
@@ -3172,7 +3169,7 @@  __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		return 0;
 
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		struct drm_i915_private *i915 = to_i915(obj);
 		ret = __i915_wait_request(from_req,
 					  atomic_read(&i915->gpu_error.reset_counter),
 					  i915->mm.interruptible,
@@ -3312,7 +3309,7 @@  static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int ret;
 
 	if (list_empty(&vma->obj_link))
@@ -3772,7 +3769,7 @@  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	vma = i915_gem_obj_to_ggtt(obj);
 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
 		list_move_tail(&vma->vm_link,
-			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
+			       &to_i915(obj)->ggtt.base.inactive_list);
 
 	return 0;
 }
@@ -4010,7 +4007,7 @@  i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
-					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
 	if (ret)
 		goto err_unpin_display;
 
@@ -4209,7 +4206,7 @@  void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 		     (vma->node.start & (fence_alignment - 1)) == 0);
 
 	mappable = (vma->node.start + fence_size <=
-		    to_i915(obj->base.dev)->ggtt.mappable_end);
+		    to_i915(obj)->ggtt.mappable_end);
 
 	obj->map_and_fenceable = mappable && fenceable;
 }
@@ -4221,7 +4218,7 @@  i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		       uint32_t alignment,
 		       uint64_t flags)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_vma *vma;
 	unsigned bound;
 	int ret;
@@ -4456,7 +4453,7 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	obj->madv = I915_MADV_WILLNEED;
 
-	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4545,8 +4542,7 @@  static bool discard_backing_storage(struct drm_i915_gem_object *obj)
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_vma *vma, *next;
 
 	intel_runtime_pm_get(dev_priv);
@@ -4647,7 +4643,7 @@  void i915_gem_vma_destroy(struct i915_vma *vma)
 
 	list_del(&vma->obj_link);
 
-	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 374a0cb7a092..39ed403b9de3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -241,7 +241,7 @@  static void eb_destroy(struct eb_vmas *eb)
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
-	return (HAS_LLC(obj->base.dev) ||
+	return (HAS_LLC(obj) ||
 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 		obj->cache_level != I915_CACHE_NONE);
 }
@@ -312,8 +312,7 @@  relocate_entry_gtt(struct drm_i915_gem_object *obj,
 		   struct drm_i915_gem_relocation_entry *reloc,
 		   uint64_t target_offset)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	uint64_t delta = relocation_target(reloc, target_offset);
 	uint64_t offset;
 	void __iomem *reloc_page;
@@ -334,7 +333,7 @@  relocate_entry_gtt(struct drm_i915_gem_object *obj,
 					      offset & PAGE_MASK);
 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(obj)->gen >= 8) {
 		offset += sizeof(uint32_t);
 
 		if (offset_in_page(offset) == 0) {
@@ -367,7 +366,6 @@  relocate_entry_clflush(struct drm_i915_gem_object *obj,
 		       struct drm_i915_gem_relocation_entry *reloc,
 		       uint64_t target_offset)
 {
-	struct drm_device *dev = obj->base.dev;
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	uint64_t delta = relocation_target(reloc, target_offset);
 	char *vaddr;
@@ -381,7 +379,7 @@  relocate_entry_clflush(struct drm_i915_gem_object *obj,
 				reloc->offset >> PAGE_SHIFT));
 	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(obj)->gen >= 8) {
 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 
 		if (page_offset == 0) {
@@ -403,7 +401,6 @@  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc)
 {
-	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
 	struct i915_vma *target_vma;
@@ -422,7 +419,7 @@  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	 * pipe_control writes because the gpu doesn't properly redirect them
 	 * through the ppgtt for non_secure batchbuffers. */
-	if (unlikely(IS_GEN6(dev) &&
+	if (unlikely(IS_GEN6(obj) &&
 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
 				    PIN_GLOBAL);
@@ -464,7 +461,7 @@  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
 	/* Check that the relocation address is valid... */
 	if (unlikely(reloc->offset >
-		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
 		DRM_DEBUG("Relocation beyond object bounds: "
 			  "obj %p target %d offset %d size %d.\n",
 			  obj, reloc->target_handle,
@@ -672,7 +669,7 @@  need_reloc_mappable(struct i915_vma *vma)
 		return false;
 
 	/* See also use_cpu_reloc() */
-	if (HAS_LLC(vma->obj->base.dev))
+	if (HAS_LLC(vma->obj))
 		return false;
 
 	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 598198543dcd..1ef75bc2220c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -229,7 +229,7 @@  static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 					 struct drm_i915_fence_reg *fence,
 					 bool enable)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int reg = fence_number(dev_priv, fence);
 
 	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@  i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct drm_i915_fence_reg *fence;
 	int ret;
 
@@ -433,7 +433,7 @@  bool
 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct drm_i915_private *dev_priv = to_i915(obj);
 		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
 
 		WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@  void
 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct drm_i915_private *dev_priv = to_i915(obj);
 		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
 		dev_priv->fence_regs[obj->fence_reg].pin_count--;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0715bb74d306..6447a5f9661e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3305,7 +3305,7 @@  __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
 		return ERR_PTR(-EINVAL);
 
-	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index de891c928b2f..224389d077c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -540,10 +540,8 @@  static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
 	if (obj->stolen) {
-		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
+		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
 		kfree(obj->stolen);
 		obj->stolen = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7410f6c962e7..bc4cb7f4fe80 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -122,10 +122,10 @@  i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen >= 4)
+	if (INTEL_INFO(obj)->gen >= 4)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen == 3) {
+	if (INTEL_INFO(obj)->gen == 3) {
 		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 54088a4d6498..f0e3ade59177 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -303,7 +303,7 @@  __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
 static int
 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_mm_struct *mm;
 	int ret = 0;
 
@@ -376,7 +376,7 @@  i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
 
 	kref_put_mutex(&obj->userptr.mm->kref,
 		       __i915_mm_struct_free,
-		       &to_i915(obj->base.dev)->mm_lock);
+		       &to_i915(obj)->mm_lock);
 	obj->userptr.mm = NULL;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index df0ef5bba8e5..f981bddc9bbf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2069,7 +2069,7 @@  static int init_phys_status_page(struct intel_engine_cs *engine)
 
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
-	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
+	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
 		vunmap(ringbuf->virtual_start);
 	else
 		iounmap(ringbuf->virtual_start);