@@ -211,7 +211,7 @@ void ppgtt_unbind_vma(struct i915_address_space *vm,
return;
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
- vma_invalidate_tlb(vm, vma_res->tlb);
+ vma_invalidate_tlb(vm, vma_res->tlb, vma_res->start, vma_res->vma_size);
}
static unsigned long pd_count(u64 size, int shift)
@@ -1339,7 +1339,8 @@ I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
return err;
}
-void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb
+ u64 start, u64 size)
{
struct intel_gt *gt;
int id;
@@ -1355,9 +1356,11 @@ void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
* the most recent TLB invalidation seqno, and if we have not yet
* flushed the TLBs upon release, perform a full invalidation.
*/
- for_each_gt(gt, vm->i915, id)
- WRITE_ONCE(tlb[id],
- intel_gt_next_invalidate_tlb_full(gt));
+ for_each_gt(gt, vm->i915, id) {
+ if (!intel_gt_invalidate_tlb_range(gt, start, size))
+ WRITE_ONCE(tlb[id],
+ intel_gt_next_invalidate_tlb_full(gt));
+ }
}
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
@@ -2041,7 +2044,8 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
dma_fence_put(unbind_fence);
unbind_fence = NULL;
}
- vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
+ vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb,
+ vma->node.start, vma->size);
}
/*
@@ -260,7 +260,8 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
-void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb,
+ u64 start, u64 size);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);