diff mbox series

[07/19] drm/i915: vma is always backed by an object.

Message ID 20210830121006.2978297-8-maarten.lankhorst@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: Short-term pinning and async eviction. | expand

Commit Message

Maarten Lankhorst Aug. 30, 2021, 12:09 p.m. UTC
vma->obj and vma->resv are now never NULL, and some checks can be removed.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context.c       |  2 +-
 .../gpu/drm/i915/gt/intel_ring_submission.c   |  2 +-
 drivers/gpu/drm/i915/i915_vma.c               | 48 ++++++++-----------
 drivers/gpu/drm/i915/i915_vma.h               |  3 --
 4 files changed, 22 insertions(+), 33 deletions(-)

Comments

Tvrtko Ursulin Aug. 31, 2021, 9:18 a.m. UTC | #1
On 30/08/2021 13:09, Maarten Lankhorst wrote:
> vma->obj and vma->resv are now never NULL, and some checks can be removed.

Is the direction here compatible with SVM / VM_BIND?

Regards,

Tvrtko

> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_context.c       |  2 +-
>   .../gpu/drm/i915/gt/intel_ring_submission.c   |  2 +-
>   drivers/gpu/drm/i915/i915_vma.c               | 48 ++++++++-----------
>   drivers/gpu/drm/i915/i915_vma.h               |  3 --
>   4 files changed, 22 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index 745e84c72c90..d3ad16df3ca2 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
>   	 */
>   
>   	err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
> -	if (!err && ce->ring->vma->obj)
> +	if (!err)
>   		err = i915_gem_object_lock(ce->ring->vma->obj, ww);
>   	if (!err && ce->state)
>   		err = i915_gem_object_lock(ce->state->obj, ww);
> diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
> index 3c65efcb7bed..cc31ccc13bfb 100644
> --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
> @@ -1354,7 +1354,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
>   	err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
>   	if (!err && gen7_wa_vma)
>   		err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
> -	if (!err && engine->legacy.ring->vma->obj)
> +	if (!err)
>   		err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
>   	if (!err)
>   		err = intel_timeline_pin(timeline, &ww);
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index f9ac33e0bac9..ad5d52b33eb6 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -40,12 +40,12 @@
>   
>   static struct kmem_cache *slab_vmas;
>   
> -struct i915_vma *i915_vma_alloc(void)
> +static struct i915_vma *i915_vma_alloc(void)
>   {
>   	return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
>   }
>   
> -void i915_vma_free(struct i915_vma *vma)
> +static void i915_vma_free(struct i915_vma *vma)
>   {
>   	return kmem_cache_free(slab_vmas, vma);
>   }
> @@ -426,10 +426,8 @@ int i915_vma_bind(struct i915_vma *vma,
>   
>   		work->base.dma.error = 0; /* enable the queue_work() */
>   
> -		if (vma->obj) {
> -			__i915_gem_object_pin_pages(vma->obj);
> -			work->pinned = i915_gem_object_get(vma->obj);
> -		}
> +		__i915_gem_object_pin_pages(vma->obj);
> +		work->pinned = i915_gem_object_get(vma->obj);
>   	} else {
>   		vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
>   	}
> @@ -670,7 +668,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
>   	}
>   
>   	color = 0;
> -	if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
> +	if (i915_vm_has_cache_coloring(vma->vm))
>   		color = vma->obj->cache_level;
>   
>   	if (flags & PIN_OFFSET_FIXED) {
> @@ -795,17 +793,14 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
>   static int vma_get_pages(struct i915_vma *vma)
>   {
>   	int err = 0;
> -	bool pinned_pages = false;
> +	bool pinned_pages = true;
>   
>   	if (atomic_add_unless(&vma->pages_count, 1, 0))
>   		return 0;
>   
> -	if (vma->obj) {
> -		err = i915_gem_object_pin_pages(vma->obj);
> -		if (err)
> -			return err;
> -		pinned_pages = true;
> -	}
> +	err = i915_gem_object_pin_pages(vma->obj);
> +	if (err)
> +		return err;
>   
>   	/* Allocations ahoy! */
>   	if (mutex_lock_interruptible(&vma->pages_mutex)) {
> @@ -838,8 +833,8 @@ static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
>   	if (atomic_sub_return(count, &vma->pages_count) == 0) {
>   		vma->ops->clear_pages(vma);
>   		GEM_BUG_ON(vma->pages);
> -		if (vma->obj)
> -			i915_gem_object_unpin_pages(vma->obj);
> +
> +		i915_gem_object_unpin_pages(vma->obj);
>   	}
>   	mutex_unlock(&vma->pages_mutex);
>   }
> @@ -875,7 +870,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
>   	int err;
>   
>   #ifdef CONFIG_PROVE_LOCKING
> -	if (debug_locks && !WARN_ON(!ww) && vma->resv)
> +	if (debug_locks && !WARN_ON(!ww))
>   		assert_vma_held(vma);
>   #endif
>   
> @@ -983,7 +978,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
>   
>   	GEM_BUG_ON(!vma->pages);
>   	err = i915_vma_bind(vma,
> -			    vma->obj ? vma->obj->cache_level : 0,
> +			    vma->obj->cache_level,
>   			    flags, work);
>   	if (err)
>   		goto err_remove;
> @@ -1037,7 +1032,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
>   	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
>   
>   #ifdef CONFIG_LOCKDEP
> -	WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
> +	WARN_ON(!ww && dma_resv_held(vma->resv));
>   #endif
>   
>   	do {
> @@ -1116,6 +1111,7 @@ void i915_vma_reopen(struct i915_vma *vma)
>   void i915_vma_release(struct kref *ref)
>   {
>   	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
> +	struct drm_i915_gem_object *obj = vma->obj;
>   
>   	if (drm_mm_node_allocated(&vma->node)) {
>   		mutex_lock(&vma->vm->mutex);
> @@ -1126,15 +1122,11 @@ void i915_vma_release(struct kref *ref)
>   	}
>   	GEM_BUG_ON(i915_vma_is_active(vma));
>   
> -	if (vma->obj) {
> -		struct drm_i915_gem_object *obj = vma->obj;
> -
> -		spin_lock(&obj->vma.lock);
> -		list_del(&vma->obj_link);
> -		if (!RB_EMPTY_NODE(&vma->obj_node))
> -			rb_erase(&vma->obj_node, &obj->vma.tree);
> -		spin_unlock(&obj->vma.lock);
> -	}
> +	spin_lock(&obj->vma.lock);
> +	list_del(&vma->obj_link);
> +	if (!RB_EMPTY_NODE(&vma->obj_node))
> +		rb_erase(&vma->obj_node, &obj->vma.tree);
> +	spin_unlock(&obj->vma.lock);
>   
>   	__i915_vma_remove_closed(vma);
>   	i915_vm_put(vma->vm);
> diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
> index 1c930515ec3d..1ba82bf863a5 100644
> --- a/drivers/gpu/drm/i915/i915_vma.h
> +++ b/drivers/gpu/drm/i915/i915_vma.h
> @@ -409,9 +409,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma)
>   	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
>   		for_each_until(!i915_vma_is_ggtt(V))
>   
> -struct i915_vma *i915_vma_alloc(void);
> -void i915_vma_free(struct i915_vma *vma);
> -
>   struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
>   void i915_vma_make_shrinkable(struct i915_vma *vma);
>   void i915_vma_make_purgeable(struct i915_vma *vma);
>
Maarten Lankhorst Aug. 31, 2021, 9:34 a.m. UTC | #2
Op 31-08-2021 om 11:18 schreef Tvrtko Ursulin:
>
> On 30/08/2021 13:09, Maarten Lankhorst wrote:
>> vma->obj and vma->resv are now never NULL, and some checks can be removed.
>
> Is the direction here compatible with SVM / VM_BIND? 


Yeah, it should be. The changes here make the obj->resv->lock the main lock, so it should at least simplify locking for VM_BIND.

I also have some patches on top to remove i915_vma->active, which was 1 of the requirements for VM_BIND iirc.
Tvrtko Ursulin Aug. 31, 2021, 10:29 a.m. UTC | #3
On 31/08/2021 10:34, Maarten Lankhorst wrote:
> Op 31-08-2021 om 11:18 schreef Tvrtko Ursulin:
>>
>> On 30/08/2021 13:09, Maarten Lankhorst wrote:
>>> vma->obj and vma->resv are now never NULL, and some checks can be removed.
>>
>> Is the direction here compatible with SVM / VM_BIND?
> 
> 
> Yeah, it should be. The changes here make the obj->resv->lock the main lock, so it should at least simplify locking for VM_BIND.

Hm but what will vma->obj point to in case of SVM, when there is no GEM BO?

Regards,

Tvrtko

> 
> I also have some patches on top to remove i915_vma->active, which was 1 of the requirements for VM_BIND iirc.
>
Maarten Lankhorst Sept. 3, 2021, 9:31 a.m. UTC | #4
Op 31-08-2021 om 12:29 schreef Tvrtko Ursulin:
>
> On 31/08/2021 10:34, Maarten Lankhorst wrote:
>> Op 31-08-2021 om 11:18 schreef Tvrtko Ursulin:
>>>
>>> On 30/08/2021 13:09, Maarten Lankhorst wrote:
>>>> vma->obj and vma->resv are now never NULL, and some checks can be removed.
>>>
>>> Is the direction here compatible with SVM / VM_BIND?
>>
>>
>> Yeah, it should be. The changes here make the obj->resv->lock the main lock, so it should at least simplify locking for VM_BIND.
>
> Hm but what will vma->obj point to in case of SVM, when there is no GEM BO? 

Probably to one of the bo's in i915_vm, or a dummy bo that least shares the vm_resv object, similar to the aliasing gtt handling.
Tvrtko Ursulin Sept. 3, 2021, 10:48 a.m. UTC | #5
On 03/09/2021 10:31, Maarten Lankhorst wrote:
> Op 31-08-2021 om 12:29 schreef Tvrtko Ursulin:
>>
>> On 31/08/2021 10:34, Maarten Lankhorst wrote:
>>> Op 31-08-2021 om 11:18 schreef Tvrtko Ursulin:
>>>>
>>>> On 30/08/2021 13:09, Maarten Lankhorst wrote:
>>>>> vma->obj and vma->resv are now never NULL, and some checks can be removed.
>>>>
>>>> Is the direction here compatible with SVM / VM_BIND?
>>>
>>>
>>> Yeah, it should be. The changes here make the obj->resv->lock the main lock, so it should at least simplify locking for VM_BIND.
>>
>> Hm but what will vma->obj point to in case of SVM, when there is no GEM BO?
> 
> Probably to one of the bo's in i915_vm, or a dummy bo that least shares the vm_resv object, similar to the aliasing gtt handling.

As a long term or short term solution? Worried that would waste a lot of 
SLAB space just for convenience (whole struct drm_i915_gem_object just 
to store a single pointer to a dma_resv object, if I got that right), 
while it should be possible to come up with a cleaner design.

Even for the upcoming page granularity work we will need multiple VMAs 
point to single GEM bo in ppgtt and that, when SVM is considered, could 
for instance mean that VMAs should instead be backed by some new backing 
store objects, which in turn may (or may not) point to GEM BOs.

Regards,

Tvrtko
Maarten Lankhorst Sept. 16, 2021, 1:41 p.m. UTC | #6
Op 03-09-2021 om 12:48 schreef Tvrtko Ursulin:
>
> On 03/09/2021 10:31, Maarten Lankhorst wrote:
>> Op 31-08-2021 om 12:29 schreef Tvrtko Ursulin:
>>>
>>> On 31/08/2021 10:34, Maarten Lankhorst wrote:
>>>> Op 31-08-2021 om 11:18 schreef Tvrtko Ursulin:
>>>>>
>>>>> On 30/08/2021 13:09, Maarten Lankhorst wrote:
>>>>>> vma->obj and vma->resv are now never NULL, and some checks can be removed.
>>>>>
>>>>> Is the direction here compatible with SVM / VM_BIND?
>>>>
>>>>
>>>> Yeah, it should be. The changes here make the obj->resv->lock the main lock, so it should at least simplify locking for VM_BIND.
>>>
>>> Hm but what will vma->obj point to in case of SVM, when there is no GEM BO?
>>
>> Probably to one of the bo's in i915_vm, or a dummy bo that least shares the vm_resv object, similar to the aliasing gtt handling.
>
> As a long term or short term solution? Worried that would waste a lot of SLAB space just for convenience (whole struct drm_i915_gem_object just to store a single pointer to a dma_resv object, if I got that right), while it should be possible to come up with a cleaner design.
>
> Even for the upcoming page granularity work we will need multiple VMAs point to single GEM bo in ppgtt and that, when SVM is considered, could for instance mean that VMAs should instead be backed by some new backing store objects, which in turn may (or may not) point to GEM BOs.
>
> Regards,
>
> Tvrtko

We could revisit this if it's required for SVM, since we can always optimize that code later, I don't think it's a problem to remove it for now, especially since it's a lot easier if VMA becomes a pointer to a memory slab for an object only, without its own lifetime rules. :)
Tvrtko Ursulin Sept. 16, 2021, 2:30 p.m. UTC | #7
On 16/09/2021 14:41, Maarten Lankhorst wrote:
> Op 03-09-2021 om 12:48 schreef Tvrtko Ursulin:
>>
>> On 03/09/2021 10:31, Maarten Lankhorst wrote:
>>> Op 31-08-2021 om 12:29 schreef Tvrtko Ursulin:
>>>>
>>>> On 31/08/2021 10:34, Maarten Lankhorst wrote:
>>>>> Op 31-08-2021 om 11:18 schreef Tvrtko Ursulin:
>>>>>>
>>>>>> On 30/08/2021 13:09, Maarten Lankhorst wrote:
>>>>>>> vma->obj and vma->resv are now never NULL, and some checks can be removed.
>>>>>>
>>>>>> Is the direction here compatible with SVM / VM_BIND?
>>>>>
>>>>>
>>>>> Yeah, it should be. The changes here make the obj->resv->lock the main lock, so it should at least simplify locking for VM_BIND.
>>>>
>>>> Hm but what will vma->obj point to in case of SVM, when there is no GEM BO?
>>>
>>> Probably to one of the bo's in i915_vm, or a dummy bo that least shares the vm_resv object, similar to the aliasing gtt handling.
>>
>> As a long term or short term solution? Worried that would waste a lot of SLAB space just for convenience (whole struct drm_i915_gem_object just to store a single pointer to a dma_resv object, if I got that right), while it should be possible to come up with a cleaner design.
>>
>> Even for the upcoming page granularity work we will need multiple VMAs point to single GEM bo in ppgtt and that, when SVM is considered, could for instance mean that VMAs should instead be backed by some new backing store objects, which in turn may (or may not) point to GEM BOs.
>>
>> Regards,
>>
>> Tvrtko
> 
> We could revisit this if it's required for SVM, since we can always optimize that code later, I don't think it's a problem to remove it for now, especially since it's a lot easier if VMA becomes a pointer to a memory slab for an object only, without its own lifetime rules. :)

Not sure what you meant with "pointer to memory slab for an object"?

But on the high level, what worries me is whether the direction is not 
wrong. Sure you can change it all again later, but if we are moving 
towards the world where VMAs are fundamentally and predominantly *not* 
backed by GEM buffer objects, then I have to ask the question whether 
this plan of rewriting everything twice is the most efficient one.

Maybe its not that scary, not sure, but again, all I see is a large-ish 
series which implements some very important functionality, and _seems_ 
to rely on what I think is a design direction incompatible with where I 
thought we needed to go.

I suppose all I can do is ask you to verify this direction with Daniel. 
Maybe you already have but I did not see it in public at least. So 
adding him to CC.

Regards,

Tvrtko
Tvrtko Ursulin Sept. 16, 2021, 3:05 p.m. UTC | #8
On 16/09/2021 15:30, Tvrtko Ursulin wrote:
> 
> On 16/09/2021 14:41, Maarten Lankhorst wrote:
>> Op 03-09-2021 om 12:48 schreef Tvrtko Ursulin:
>>>
>>> On 03/09/2021 10:31, Maarten Lankhorst wrote:
>>>> Op 31-08-2021 om 12:29 schreef Tvrtko Ursulin:
>>>>>
>>>>> On 31/08/2021 10:34, Maarten Lankhorst wrote:
>>>>>> Op 31-08-2021 om 11:18 schreef Tvrtko Ursulin:
>>>>>>>
>>>>>>> On 30/08/2021 13:09, Maarten Lankhorst wrote:
>>>>>>>> vma->obj and vma->resv are now never NULL, and some checks can 
>>>>>>>> be removed.
>>>>>>>
>>>>>>> Is the direction here compatible with SVM / VM_BIND?
>>>>>>
>>>>>>
>>>>>> Yeah, it should be. The changes here make the obj->resv->lock the 
>>>>>> main lock, so it should at least simplify locking for VM_BIND.
>>>>>
>>>>> Hm but what will vma->obj point to in case of SVM, when there is no 
>>>>> GEM BO?
>>>>
>>>> Probably to one of the bo's in i915_vm, or a dummy bo that least 
>>>> shares the vm_resv object, similar to the aliasing gtt handling.
>>>
>>> As a long term or short term solution? Worried that would waste a lot 
>>> of SLAB space just for convenience (whole struct drm_i915_gem_object 
>>> just to store a single pointer to a dma_resv object, if I got that 
>>> right), while it should be possible to come up with a cleaner design.
>>>
>>> Even for the upcoming page granularity work we will need multiple 
>>> VMAs point to single GEM bo in ppgtt and that, when SVM is 
>>> considered, could for instance mean that VMAs should instead be 
>>> backed by some new backing store objects, which in turn may (or may 
>>> not) point to GEM BOs.
>>>
>>> Regards,
>>>
>>> Tvrtko
>>
>> We could revisit this if it's required for SVM, since we can always 
>> optimize that code later, I don't think it's a problem to remove it 
>> for now, especially since it's a lot easier if VMA becomes a pointer 
>> to a memory slab for an object only, without its own lifetime rules. :)
> 
> Not sure what you meant with "pointer to memory slab for an object"?
> 
> But on the high level, what worries me is whether the direction is not 
> wrong. Sure you can change it all again later, but if we are moving 
> towards the world where VMAs are fundamentally and predominantly *not* 
> backed by GEM buffer objects, then I have to ask the question whether 
> this plan of rewriting everything twice is the most efficient one.
> 
> Maybe its not that scary, not sure, but again, all I see is a large-ish 
> series which implements some very important functionality, and _seems_ 
> to rely on what I think is a design direction incompatible with where I 
> thought we needed to go.
> 
> I suppose all I can do is ask you to verify this direction with Daniel. 
> Maybe you already have but I did not see it in public at least. So 
> adding him to CC.

Okay I reminded myself of how the SVM is implemented and perhaps it is 
not a concern. It seems that it doesn't use the VMA for more than a 
temporary vehicle during PTE setup stage.

And for page granularity paths over legacy binding I think it should 
also be fine since, as you say, all VMAs can and should point to the 
same obj.

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 745e84c72c90..d3ad16df3ca2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -219,7 +219,7 @@  int __intel_context_do_pin_ww(struct intel_context *ce,
 	 */
 
 	err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
-	if (!err && ce->ring->vma->obj)
+	if (!err)
 		err = i915_gem_object_lock(ce->ring->vma->obj, ww);
 	if (!err && ce->state)
 		err = i915_gem_object_lock(ce->state->obj, ww);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 3c65efcb7bed..cc31ccc13bfb 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1354,7 +1354,7 @@  int intel_ring_submission_setup(struct intel_engine_cs *engine)
 	err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
 	if (!err && gen7_wa_vma)
 		err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
-	if (!err && engine->legacy.ring->vma->obj)
+	if (!err)
 		err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
 	if (!err)
 		err = intel_timeline_pin(timeline, &ww);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index f9ac33e0bac9..ad5d52b33eb6 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -40,12 +40,12 @@ 
 
 static struct kmem_cache *slab_vmas;
 
-struct i915_vma *i915_vma_alloc(void)
+static struct i915_vma *i915_vma_alloc(void)
 {
 	return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
 }
 
-void i915_vma_free(struct i915_vma *vma)
+static void i915_vma_free(struct i915_vma *vma)
 {
 	return kmem_cache_free(slab_vmas, vma);
 }
@@ -426,10 +426,8 @@  int i915_vma_bind(struct i915_vma *vma,
 
 		work->base.dma.error = 0; /* enable the queue_work() */
 
-		if (vma->obj) {
-			__i915_gem_object_pin_pages(vma->obj);
-			work->pinned = i915_gem_object_get(vma->obj);
-		}
+		__i915_gem_object_pin_pages(vma->obj);
+		work->pinned = i915_gem_object_get(vma->obj);
 	} else {
 		vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
 	}
@@ -670,7 +668,7 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	}
 
 	color = 0;
-	if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
+	if (i915_vm_has_cache_coloring(vma->vm))
 		color = vma->obj->cache_level;
 
 	if (flags & PIN_OFFSET_FIXED) {
@@ -795,17 +793,14 @@  static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
 static int vma_get_pages(struct i915_vma *vma)
 {
 	int err = 0;
-	bool pinned_pages = false;
+	bool pinned_pages = true;
 
 	if (atomic_add_unless(&vma->pages_count, 1, 0))
 		return 0;
 
-	if (vma->obj) {
-		err = i915_gem_object_pin_pages(vma->obj);
-		if (err)
-			return err;
-		pinned_pages = true;
-	}
+	err = i915_gem_object_pin_pages(vma->obj);
+	if (err)
+		return err;
 
 	/* Allocations ahoy! */
 	if (mutex_lock_interruptible(&vma->pages_mutex)) {
@@ -838,8 +833,8 @@  static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
 		vma->ops->clear_pages(vma);
 		GEM_BUG_ON(vma->pages);
-		if (vma->obj)
-			i915_gem_object_unpin_pages(vma->obj);
+
+		i915_gem_object_unpin_pages(vma->obj);
 	}
 	mutex_unlock(&vma->pages_mutex);
 }
@@ -875,7 +870,7 @@  int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
 	int err;
 
 #ifdef CONFIG_PROVE_LOCKING
-	if (debug_locks && !WARN_ON(!ww) && vma->resv)
+	if (debug_locks && !WARN_ON(!ww))
 		assert_vma_held(vma);
 #endif
 
@@ -983,7 +978,7 @@  int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
 
 	GEM_BUG_ON(!vma->pages);
 	err = i915_vma_bind(vma,
-			    vma->obj ? vma->obj->cache_level : 0,
+			    vma->obj->cache_level,
 			    flags, work);
 	if (err)
 		goto err_remove;
@@ -1037,7 +1032,7 @@  int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 
 #ifdef CONFIG_LOCKDEP
-	WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
+	WARN_ON(!ww && dma_resv_held(vma->resv));
 #endif
 
 	do {
@@ -1116,6 +1111,7 @@  void i915_vma_reopen(struct i915_vma *vma)
 void i915_vma_release(struct kref *ref)
 {
 	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+	struct drm_i915_gem_object *obj = vma->obj;
 
 	if (drm_mm_node_allocated(&vma->node)) {
 		mutex_lock(&vma->vm->mutex);
@@ -1126,15 +1122,11 @@  void i915_vma_release(struct kref *ref)
 	}
 	GEM_BUG_ON(i915_vma_is_active(vma));
 
-	if (vma->obj) {
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		spin_lock(&obj->vma.lock);
-		list_del(&vma->obj_link);
-		if (!RB_EMPTY_NODE(&vma->obj_node))
-			rb_erase(&vma->obj_node, &obj->vma.tree);
-		spin_unlock(&obj->vma.lock);
-	}
+	spin_lock(&obj->vma.lock);
+	list_del(&vma->obj_link);
+	if (!RB_EMPTY_NODE(&vma->obj_node))
+		rb_erase(&vma->obj_node, &obj->vma.tree);
+	spin_unlock(&obj->vma.lock);
 
 	__i915_vma_remove_closed(vma);
 	i915_vm_put(vma->vm);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 1c930515ec3d..1ba82bf863a5 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -409,9 +409,6 @@  static inline void i915_vma_clear_scanout(struct i915_vma *vma)
 	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
 		for_each_until(!i915_vma_is_ggtt(V))
 
-struct i915_vma *i915_vma_alloc(void);
-void i915_vma_free(struct i915_vma *vma);
-
 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
 void i915_vma_make_shrinkable(struct i915_vma *vma);
 void i915_vma_make_purgeable(struct i915_vma *vma);