Message ID | 20180709130208.11730-3-chris@chris-wilson.co.uk (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 09/07/2018 14:02, Chris Wilson wrote: > Our goal is to remove struct_mutex and replace it with fine grained > locking. One of the thorny issues is our eviction logic for reclaiming > space for an execbuffer (or GTT mmaping, among a few other examples). > While eviction itself is easy to move under a per-VM mutex, performing > the activity tracking is less agreeable. One solution is not to do any > MRU tracking and do a simple coarse evaluation during eviction of > active/inactive, with a loose temporal ordering of last > insertion/evaluation. That keeps all the locking constrained to when we > are manipulating the VM itself, neatly avoiding the tricky handling of > possible recursive locking during execbuf and elsewhere. > > Note that discarding the MRU is unlikely to impact upon our efficiency > to reclaim VM space (where we think a LRU model is best) as our > current strategy is to use random idle replacement first before doing > a search, and over time the use of softpinned 48b per-ppGTT is growing > (thereby eliminating any need to perform any eviction searches, in > theory at least). It's a big change to eviction logic but also mostly affects GGTT which is diminishing in significance. But we still probably need some real world mmap_gtt users to benchmark and general 3d pre-ppgtt. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > --- > drivers/gpu/drm/i915/i915_gem.c | 10 +--- > drivers/gpu/drm/i915/i915_gem_evict.c | 56 ++++++++++--------- > drivers/gpu/drm/i915/i915_gem_gtt.c | 15 ++--- > drivers/gpu/drm/i915/i915_gem_gtt.h | 26 +-------- > drivers/gpu/drm/i915/i915_gem_shrinker.c | 8 ++- > drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- > drivers/gpu/drm/i915/i915_gpu_error.c | 37 ++++++------ > drivers/gpu/drm/i915/i915_vma.c | 9 +-- > .../gpu/drm/i915/selftests/i915_gem_evict.c | 4 +- > drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +- > 10 files changed, 68 insertions(+), 101 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index b35cbfd16c9c..3bc2d40e6e0f 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -251,10 +251,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, > > pinned = ggtt->vm.reserved; > mutex_lock(&dev->struct_mutex); > - list_for_each_entry(vma, &ggtt->vm.active_list, vm_link) > - if (i915_vma_is_pinned(vma)) > - pinned += vma->node.size; > - list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link) > + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) > if (i915_vma_is_pinned(vma)) > pinned += vma->node.size; > mutex_unlock(&dev->struct_mutex); > @@ -1684,13 +1681,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) > GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); > > for_each_ggtt_vma(vma, obj) { > - if (i915_vma_is_active(vma)) > - continue; > - > if (!drm_mm_node_allocated(&vma->node)) > continue; > > - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); > + list_move_tail(&vma->vm_link, &vma->vm->bound_list); > } > > i915 = to_i915(obj->base.dev); > diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c > index 02b83a5ed96c..6a3608398d2a 100644 > --- a/drivers/gpu/drm/i915/i915_gem_evict.c > +++ b/drivers/gpu/drm/i915/i915_gem_evict.c > @@ -127,14 +127,10 @@ i915_gem_evict_something(struct i915_address_space *vm, > struct drm_i915_private *dev_priv = vm->i915; > struct drm_mm_scan scan; > struct list_head eviction_list; > - struct list_head *phases[] = { > - &vm->inactive_list, > - &vm->active_list, > - NULL, > - }, **phase; > struct i915_vma *vma, *next; > struct drm_mm_node *node; > enum drm_mm_insert_mode mode; > + struct i915_vma *active; > int ret; > > lockdep_assert_held(&vm->i915->drm.struct_mutex); > @@ -170,17 +166,31 @@ i915_gem_evict_something(struct i915_address_space *vm, > */ > if (!(flags & PIN_NONBLOCK)) > i915_retire_requests(dev_priv); > - else > - phases[1] = NULL; > There are comments around here referring to active/inactive lists which will need updating/removing. > search_again: > + active = NULL; > INIT_LIST_HEAD(&eviction_list); > - phase = phases; > - do { > - list_for_each_entry(vma, *phase, vm_link) > - if (mark_free(&scan, vma, flags, &eviction_list)) > - goto found; > - } while (*++phase); > + list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { > + if (i915_vma_is_active(vma)) { > + if (vma == active) { > + if (flags & PIN_NONBLOCK) > + break; > + > + active = ERR_PTR(-EAGAIN); > + } > + > + if (active != ERR_PTR(-EAGAIN)) { > + if (!active) > + active = vma; > + > + list_move_tail(&vma->vm_link, &vm->bound_list); > + continue; > + } > + } > + > + if (mark_free(&scan, vma, flags, &eviction_list)) > + goto found; > + } This loop need a narrative to explain the process. > > /* Nothing found, clean up and bail out! */ > list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { > @@ -389,11 +399,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, > */ > int i915_gem_evict_vm(struct i915_address_space *vm) > { > - struct list_head *phases[] = { > - &vm->inactive_list, > - &vm->active_list, > - NULL > - }, **phase; > struct list_head eviction_list; > struct i915_vma *vma, *next; > int ret; > @@ -413,16 +418,13 @@ int i915_gem_evict_vm(struct i915_address_space *vm) > } > > INIT_LIST_HEAD(&eviction_list); > - phase = phases; > - do { > - list_for_each_entry(vma, *phase, vm_link) { > - if (i915_vma_is_pinned(vma)) > - continue; > + list_for_each_entry(vma, &vm->bound_list, vm_link) { > + if (i915_vma_is_pinned(vma)) > + continue; > > - __i915_vma_pin(vma); > - list_add(&vma->evict_link, &eviction_list); > - } > - } while (*++phase); > + __i915_vma_pin(vma); > + list_add(&vma->evict_link, &eviction_list); > + } > > ret = 0; > list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index abd81fb9b0b6..16841a25be04 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -537,9 +537,8 @@ static void i915_address_space_init(struct i915_address_space *vm, > > stash_init(&vm->free_pages); > > - INIT_LIST_HEAD(&vm->active_list); > - INIT_LIST_HEAD(&vm->inactive_list); > INIT_LIST_HEAD(&vm->unbound_list); > + INIT_LIST_HEAD(&vm->bound_list); > } > > static void i915_address_space_fini(struct i915_address_space *vm) > @@ -2280,8 +2279,7 @@ void i915_ppgtt_close(struct i915_address_space *vm) > static void ppgtt_destroy_vma(struct i915_address_space *vm) > { > struct list_head *phases[] = { > - &vm->active_list, > - &vm->inactive_list, > + &vm->bound_list, > &vm->unbound_list, > NULL, > }, **phase; > @@ -2304,8 +2302,7 @@ void i915_ppgtt_release(struct kref *kref) > > ppgtt_destroy_vma(&ppgtt->vm); > > - GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list)); > - GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list)); > + GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list)); > GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); > > ppgtt->vm.cleanup(&ppgtt->vm); > @@ -2958,8 +2955,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) > mutex_lock(&dev_priv->drm.struct_mutex); > i915_gem_fini_aliasing_ppgtt(dev_priv); > > - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); > - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) > + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) > WARN_ON(i915_vma_unbind(vma)); > > if (drm_mm_node_allocated(&ggtt->error_capture)) > @@ -3649,8 +3645,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) > ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ > > /* clflush objects bound into the GGTT and rebind them. */ > - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); > - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) { > + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { > struct drm_i915_gem_object *obj = vma->obj; > > if (!(vma->flags & I915_VMA_GLOBAL_BIND)) > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h > index feda45dfd481..fa494e861ca3 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.h > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h > @@ -299,32 +299,12 @@ struct i915_address_space { > struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */ > > /** > - * List of objects currently involved in rendering. > - * > - * Includes buffers having the contents of their GPU caches > - * flushed, not necessarily primitives. last_read_req > - * represents when the rendering involved will be completed. > - * > - * A reference is held on the buffer while on this list. > + * List of vma currently bound. > */ > - struct list_head active_list; > + struct list_head bound_list; > > /** > - * LRU list of objects which are not in the ringbuffer and > - * are ready to unbind, but are still in the GTT. > - * > - * last_read_req is NULL while an object is in this list. > - * > - * A reference is not held on the buffer while on this list, > - * as merely being GTT-bound shouldn't prevent its being > - * freed, and we'll pull it off the list in the free path. > - */ > - struct list_head inactive_list; > - > - /** > - * List of vma that have been unbound. > - * > - * A reference is not held on the buffer while on this list. > + * List of vma that are not unbound. vma's (plural) that are not s/unbound/bound/ I think. > */ > struct list_head unbound_list; > > diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c > index c61f5b80fee3..234eb33f2f71 100644 > --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c > +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c > @@ -485,9 +485,13 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr > > /* We also want to clear any cached iomaps as they wrap vmap */ > list_for_each_entry_safe(vma, next, > - &i915->ggtt.vm.inactive_list, vm_link) { > + &i915->ggtt.vm.bound_list, vm_link) { > unsigned long count = vma->node.size >> PAGE_SHIFT; > - if (vma->iomap && i915_vma_unbind(vma) == 0) > + > + if (!vma->iomap || i915_vma_is_active(vma)) > + continue; > + > + if (i915_vma_unbind(vma) == 0) > freed_pages += count; > } > > diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c > index 055f8687776d..93686782b675 100644 > --- a/drivers/gpu/drm/i915/i915_gem_stolen.c > +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c > @@ -667,7 +667,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv > vma->pages = obj->mm.pages; > vma->flags |= I915_VMA_GLOBAL_BIND; > __i915_vma_set_map_and_fenceable(vma); > - list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list); > + list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); > > spin_lock(&dev_priv->mm.obj_lock); > list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); > diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c > index 8c81cf3aa182..a3692c36814b 100644 > --- a/drivers/gpu/drm/i915/i915_gpu_error.c > +++ b/drivers/gpu/drm/i915/i915_gpu_error.c > @@ -1036,7 +1036,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, > > static u32 capture_error_bo(struct drm_i915_error_buffer *err, > int count, struct list_head *head, > - bool pinned_only) > + bool active_only, bool pinned_only) > { > struct i915_vma *vma; > int i = 0; > @@ -1045,6 +1045,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, > if (!vma->obj) > continue; > > + if (active_only && !i915_vma_is_active(vma)) > + continue; > + > if (pinned_only && !i915_vma_is_pinned(vma)) > continue; > > @@ -1516,14 +1519,16 @@ static void gem_capture_vm(struct i915_gpu_state *error, > int count; > > count = 0; > - list_for_each_entry(vma, &vm->active_list, vm_link) > - count++; > + list_for_each_entry(vma, &vm->bound_list, vm_link) > + if (i915_vma_is_active(vma)) > + count++; > > active_bo = NULL; > if (count) > active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); > if (active_bo) > - count = capture_error_bo(active_bo, count, &vm->active_list, false); > + count = capture_error_bo(active_bo, count, &vm->bound_list, > + true, false); > else > count = 0; > > @@ -1561,28 +1566,20 @@ static void capture_pinned_buffers(struct i915_gpu_state *error) > struct i915_address_space *vm = &error->i915->ggtt.vm; > struct drm_i915_error_buffer *bo; > struct i915_vma *vma; > - int count_inactive, count_active; > - > - count_inactive = 0; > - list_for_each_entry(vma, &vm->inactive_list, vm_link) > - count_inactive++; > + int count; > > - count_active = 0; > - list_for_each_entry(vma, &vm->active_list, vm_link) > - count_active++; > + count = 0; > + list_for_each_entry(vma, &vm->bound_list, vm_link) > + count++; > > bo = NULL; > - if (count_inactive + count_active) > - bo = kcalloc(count_inactive + count_active, > - sizeof(*bo), GFP_ATOMIC); > + if (count) > + bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC); > if (!bo) > return; > > - count_inactive = capture_error_bo(bo, count_inactive, > - &vm->active_list, true); > - count_active = capture_error_bo(bo + count_inactive, count_active, > - &vm->inactive_list, true); > - error->pinned_bo_count = count_inactive + count_active; > + error->pinned_bo_count = > + capture_error_bo(bo, count, &vm->bound_list, false, true); > error->pinned_bo = bo; > } > > diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c > index ed4e0fb558f7..44cc3390eee4 100644 > --- a/drivers/gpu/drm/i915/i915_vma.c > +++ b/drivers/gpu/drm/i915/i915_vma.c > @@ -79,9 +79,6 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) > if (--vma->active_count) > return; > > - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); > - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); > - > GEM_BUG_ON(!i915_gem_object_is_active(obj)); > if (--obj->active_count) > return; > @@ -657,7 +654,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) > GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); > GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); > > - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); > + list_move_tail(&vma->vm_link, &vma->vm->bound_list); > > if (vma->obj) { > struct drm_i915_gem_object *obj = vma->obj; > @@ -996,10 +993,8 @@ int i915_vma_move_to_active(struct i915_vma *vma, > * add the active reference first and queue for it to be dropped > * *last*. > */ > - if (!i915_gem_active_isset(active) && !vma->active_count++) { > - list_move_tail(&vma->vm_link, &vma->vm->active_list); It is not workable to bump it to the head of bound list here? Regards, Tvrtko > + if (!i915_gem_active_isset(active) && !vma->active_count++) > obj->active_count++; > - } > i915_gem_active_set(active, rq); > GEM_BUG_ON(!i915_vma_is_active(vma)); > GEM_BUG_ON(!obj->active_count); > diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c > index 128ad1cf0647..f29d3f7a23fd 100644 > --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c > +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c > @@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915) > return -EINVAL; > } > > - if (list_empty(&i915->ggtt.vm.inactive_list)) { > + if (list_empty(&i915->ggtt.vm.bound_list)) { > pr_err("No objects on the GGTT inactive list!\n"); > return -EINVAL; > } > @@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915) > { > struct i915_vma *vma; > > - list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link) > + list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link) > i915_vma_unpin(vma); > } > > diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c > index 600a3bcbd3d6..4b5ae1b92392 100644 > --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c > @@ -1235,7 +1235,7 @@ static void track_vma_bind(struct i915_vma *vma) > __i915_gem_object_pin_pages(obj); > > vma->pages = obj->mm.pages; > - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); > + list_move_tail(&vma->vm_link, &vma->vm->bound_list); > } > > static int exercise_mock(struct drm_i915_private *i915, >
Quoting Tvrtko Ursulin (2018-07-10 13:19:51) > > On 09/07/2018 14:02, Chris Wilson wrote: > > Our goal is to remove struct_mutex and replace it with fine grained > > locking. One of the thorny issues is our eviction logic for reclaiming > > space for an execbuffer (or GTT mmaping, among a few other examples). > > While eviction itself is easy to move under a per-VM mutex, performing > > the activity tracking is less agreeable. One solution is not to do any > > MRU tracking and do a simple coarse evaluation during eviction of > > active/inactive, with a loose temporal ordering of last > > insertion/evaluation. That keeps all the locking constrained to when we > > are manipulating the VM itself, neatly avoiding the tricky handling of > > possible recursive locking during execbuf and elsewhere. > > > > Note that discarding the MRU is unlikely to impact upon our efficiency > > to reclaim VM space (where we think a LRU model is best) as our > > current strategy is to use random idle replacement first before doing > > a search, and over time the use of softpinned 48b per-ppGTT is growing > > (thereby eliminating any need to perform any eviction searches, in > > theory at least). > > It's a big change to eviction logic but also mostly affects GGTT which > is diminishing in significance. But we still probably need some real > world mmap_gtt users to benchmark and general 3d pre-ppgtt. mmap_gtt is not really significant here as we use random replacement almost exclusively for them. Any workload that relies on thrashing is a lost cause more or less; we can not implement an optimal eviction strategy (no fore knowledge) and even MRU is suggested by some of the literature as not being a good strategy for graphics (the evidence in games are that the MRU reused surfaces in a frame are typically use-once whereas the older surfaces are typically used at the start of each frame; it is those use once surfaces that then distort the MRU into making a bad prediction). Fwiw, you will never get a demo with a >GTT working set that isn't a slideshow, simply because we don't have the memory bw :-p (Those machines have ~6GB/s main memory bw, a few fold higher while in the LLC cache, but not enough to sustain ~180GB/s for the example, even ignoring all the other ratelimiting steps.) > > diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c > > index 02b83a5ed96c..6a3608398d2a 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_evict.c > > +++ b/drivers/gpu/drm/i915/i915_gem_evict.c > > @@ -127,14 +127,10 @@ i915_gem_evict_something(struct i915_address_space *vm, > > struct drm_i915_private *dev_priv = vm->i915; > > struct drm_mm_scan scan; > > struct list_head eviction_list; > > - struct list_head *phases[] = { > > - &vm->inactive_list, > > - &vm->active_list, > > - NULL, > > - }, **phase; > > struct i915_vma *vma, *next; > > struct drm_mm_node *node; > > enum drm_mm_insert_mode mode; > > + struct i915_vma *active; > > int ret; > > > > lockdep_assert_held(&vm->i915->drm.struct_mutex); > > @@ -170,17 +166,31 @@ i915_gem_evict_something(struct i915_address_space *vm, > > */ > > if (!(flags & PIN_NONBLOCK)) > > i915_retire_requests(dev_priv); > > - else > > - phases[1] = NULL; > > > > There are comments around here referring to active/inactive lists which > will need updating/removing. > > > search_again: > > + active = NULL; > > INIT_LIST_HEAD(&eviction_list); > > - phase = phases; > > - do { > > - list_for_each_entry(vma, *phase, vm_link) > > - if (mark_free(&scan, vma, flags, &eviction_list)) > > - goto found; > > - } while (*++phase); > > + list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { > > + if (i915_vma_is_active(vma)) { > > + if (vma == active) { > > + if (flags & PIN_NONBLOCK) > > + break; > > + > > + active = ERR_PTR(-EAGAIN); > > + } > > + > > + if (active != ERR_PTR(-EAGAIN)) { > > + if (!active) > > + active = vma; > > + > > + list_move_tail(&vma->vm_link, &vm->bound_list); > > + continue; > > + } > > + } > > + > > + if (mark_free(&scan, vma, flags, &eviction_list)) > > + goto found; > > + } > > This loop need a narrative to explain the process. active/inactive doesn't cover it? The comments still make sense to me explaining the code flow. There's only the conceptual juggle that the two lists are combined into one. > > @@ -996,10 +993,8 @@ int i915_vma_move_to_active(struct i915_vma *vma, > > * add the active reference first and queue for it to be dropped > > * *last*. > > */ > > - if (!i915_gem_active_isset(active) && !vma->active_count++) { > > - list_move_tail(&vma->vm_link, &vma->vm->active_list); > > It is not workable to bump it to the head of bound list here? See the opening statement of intent: killing the list_move here is something that appears on the profiles for everyone but very rarely used (and never for the softpinned cases). -Chris
On 10/07/2018 13:37, Chris Wilson wrote: > Quoting Tvrtko Ursulin (2018-07-10 13:19:51) >> >> On 09/07/2018 14:02, Chris Wilson wrote: >>> Our goal is to remove struct_mutex and replace it with fine grained >>> locking. One of the thorny issues is our eviction logic for reclaiming >>> space for an execbuffer (or GTT mmaping, among a few other examples). >>> While eviction itself is easy to move under a per-VM mutex, performing >>> the activity tracking is less agreeable. One solution is not to do any >>> MRU tracking and do a simple coarse evaluation during eviction of >>> active/inactive, with a loose temporal ordering of last >>> insertion/evaluation. That keeps all the locking constrained to when we >>> are manipulating the VM itself, neatly avoiding the tricky handling of >>> possible recursive locking during execbuf and elsewhere. >>> >>> Note that discarding the MRU is unlikely to impact upon our efficiency >>> to reclaim VM space (where we think a LRU model is best) as our >>> current strategy is to use random idle replacement first before doing >>> a search, and over time the use of softpinned 48b per-ppGTT is growing >>> (thereby eliminating any need to perform any eviction searches, in >>> theory at least). >> >> It's a big change to eviction logic but also mostly affects GGTT which >> is diminishing in significance. But we still probably need some real >> world mmap_gtt users to benchmark and general 3d pre-ppgtt. > > mmap_gtt is not really significant here as we use random replacement > almost exclusively for them. You mean ggtt mmaps can be kicked out by someone doing random replacement, but mmap_gtt faults cannot necessarily kick out other ggtt vmas using random replacement. In which case today it falls back to LRU so I think there is still something to verify if we want to be 100% nice. Challenge is knowing such workloads. From memory transcode jobs used to be quite heavy in this respect when there are many many contexts and each uses some large mmap_gtt. I know the message was move away from mmap_gtt, and I don't know if that has been done yet, but maybe there are other similar ones. Or also GVT with it's reduced aperture space will run eviction more in its more constrained ggtt space. So anything running under there could show the effect as amplified. > Any workload that relies on thrashing is a lost cause more or less; we > can not implement an optimal eviction strategy (no fore knowledge) and > even MRU is suggested by some of the literature as not being a good > strategy for graphics (the evidence in games are that the MRU reused > surfaces in a frame are typically use-once whereas the older surfaces > are typically used at the start of each frame; it is those use once > surfaces that then distort the MRU into making a bad prediction). > > Fwiw, you will never get a demo with a >GTT working set that isn't a > slideshow, simply because we don't have the memory bw :-p > (Those machines have ~6GB/s main memory bw, a few fold higher while > in the LLC cache, but not enough to sustain ~180GB/s for the example, > even ignoring all the other ratelimiting steps.) I agree with that. >>> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c >>> index 02b83a5ed96c..6a3608398d2a 100644 >>> --- a/drivers/gpu/drm/i915/i915_gem_evict.c >>> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c >>> @@ -127,14 +127,10 @@ i915_gem_evict_something(struct i915_address_space *vm, >>> struct drm_i915_private *dev_priv = vm->i915; >>> struct drm_mm_scan scan; >>> struct list_head eviction_list; >>> - struct list_head *phases[] = { >>> - &vm->inactive_list, >>> - &vm->active_list, >>> - NULL, >>> - }, **phase; >>> struct i915_vma *vma, *next; >>> struct drm_mm_node *node; >>> enum drm_mm_insert_mode mode; >>> + struct i915_vma *active; >>> int ret; >>> >>> lockdep_assert_held(&vm->i915->drm.struct_mutex); >>> @@ -170,17 +166,31 @@ i915_gem_evict_something(struct i915_address_space *vm, >>> */ >>> if (!(flags & PIN_NONBLOCK)) >>> i915_retire_requests(dev_priv); >>> - else >>> - phases[1] = NULL; >>> >> >> There are comments around here referring to active/inactive lists which >> will need updating/removing. >> >>> search_again: >>> + active = NULL; >>> INIT_LIST_HEAD(&eviction_list); >>> - phase = phases; >>> - do { >>> - list_for_each_entry(vma, *phase, vm_link) >>> - if (mark_free(&scan, vma, flags, &eviction_list)) >>> - goto found; >>> - } while (*++phase); >>> + list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { >>> + if (i915_vma_is_active(vma)) { >>> + if (vma == active) { >>> + if (flags & PIN_NONBLOCK) >>> + break; >>> + >>> + active = ERR_PTR(-EAGAIN); >>> + } >>> + >>> + if (active != ERR_PTR(-EAGAIN)) { >>> + if (!active) >>> + active = vma; >>> + >>> + list_move_tail(&vma->vm_link, &vm->bound_list); >>> + continue; >>> + } >>> + } >>> + >>> + if (mark_free(&scan, vma, flags, &eviction_list)) >>> + goto found; >>> + } >> >> This loop need a narrative to explain the process. > > active/inactive doesn't cover it? > > The comments still make sense to me explaining the code flow. There's > only the conceptual juggle that the two lists are combined into one. I don't completely agree. There is talk of LRU and retirement order etc. And talking about active and inactive is just misleading when with this change it is bound/pinned/active. This is relating to pre-existing comments. For this particular loop I don't want the reader to think too much. A short comment shouldn't be too big of an ask for such an important loop. For instance "if (vma == active)". When does this happen? I see "active = vma; continue;", which will walk the the next vma. So it is not immediately obvious. Okay, the loop modifies the list. So maybe s/active/first_active_vma/ for more self-documentation. But it is also important to say why is the first active vma interesting. There is no ordering/grouping of active/inactive with the proposed patch so why is first active vma important? Maybe it is a way to say we have walked the whole list and didn't find anything but active vmas.. So my point, why not just say that in a comment instead of anyone in the future who will need to understand it has to spend a few extra minutes figuring it out. > >>> @@ -996,10 +993,8 @@ int i915_vma_move_to_active(struct i915_vma *vma, >>> * add the active reference first and queue for it to be dropped >>> * *last*. >>> */ >>> - if (!i915_gem_active_isset(active) && !vma->active_count++) { >>> - list_move_tail(&vma->vm_link, &vma->vm->active_list); >> >> It is not workable to bump it to the head of bound list here? > > See the opening statement of intent: killing the list_move here is > something that appears on the profiles for everyone but very rarely used > (and never for the softpinned cases). You mean in the commit message? I don't see anything about the cost of this list_move_tail there. But it would mean locking in the following patches. Does that create the cost? Or creates a locking inversion problem? Regards, Tvrtko
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b35cbfd16c9c..3bc2d40e6e0f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -251,10 +251,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = ggtt->vm.reserved; mutex_lock(&dev->struct_mutex); - list_for_each_entry(vma, &ggtt->vm.active_list, vm_link) - if (i915_vma_is_pinned(vma)) - pinned += vma->node.size; - list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link) + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) if (i915_vma_is_pinned(vma)) pinned += vma->node.size; mutex_unlock(&dev->struct_mutex); @@ -1684,13 +1681,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); for_each_ggtt_vma(vma, obj) { - if (i915_vma_is_active(vma)) - continue; - if (!drm_mm_node_allocated(&vma->node)) continue; - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); } i915 = to_i915(obj->base.dev); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 02b83a5ed96c..6a3608398d2a 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -127,14 +127,10 @@ i915_gem_evict_something(struct i915_address_space *vm, struct drm_i915_private *dev_priv = vm->i915; struct drm_mm_scan scan; struct list_head eviction_list; - struct list_head *phases[] = { - &vm->inactive_list, - &vm->active_list, - NULL, - }, **phase; struct i915_vma *vma, *next; struct drm_mm_node *node; enum drm_mm_insert_mode mode; + struct i915_vma *active; int ret; lockdep_assert_held(&vm->i915->drm.struct_mutex); @@ -170,17 +166,31 @@ i915_gem_evict_something(struct i915_address_space *vm, */ if (!(flags & PIN_NONBLOCK)) i915_retire_requests(dev_priv); - else - phases[1] = NULL; search_again: + active = NULL; INIT_LIST_HEAD(&eviction_list); - phase = phases; - do { - list_for_each_entry(vma, *phase, vm_link) - if (mark_free(&scan, vma, flags, &eviction_list)) - goto found; - } while (*++phase); + list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { + if (i915_vma_is_active(vma)) { + if (vma == active) { + if (flags & PIN_NONBLOCK) + break; + + active = ERR_PTR(-EAGAIN); + } + + if (active != ERR_PTR(-EAGAIN)) { + if (!active) + active = vma; + + list_move_tail(&vma->vm_link, &vm->bound_list); + continue; + } + } + + if (mark_free(&scan, vma, flags, &eviction_list)) + goto found; + } /* Nothing found, clean up and bail out! */ list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { @@ -389,11 +399,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, */ int i915_gem_evict_vm(struct i915_address_space *vm) { - struct list_head *phases[] = { - &vm->inactive_list, - &vm->active_list, - NULL - }, **phase; struct list_head eviction_list; struct i915_vma *vma, *next; int ret; @@ -413,16 +418,13 @@ int i915_gem_evict_vm(struct i915_address_space *vm) } INIT_LIST_HEAD(&eviction_list); - phase = phases; - do { - list_for_each_entry(vma, *phase, vm_link) { - if (i915_vma_is_pinned(vma)) - continue; + list_for_each_entry(vma, &vm->bound_list, vm_link) { + if (i915_vma_is_pinned(vma)) + continue; - __i915_vma_pin(vma); - list_add(&vma->evict_link, &eviction_list); - } - } while (*++phase); + __i915_vma_pin(vma); + list_add(&vma->evict_link, &eviction_list); + } ret = 0; list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index abd81fb9b0b6..16841a25be04 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -537,9 +537,8 @@ static void i915_address_space_init(struct i915_address_space *vm, stash_init(&vm->free_pages); - INIT_LIST_HEAD(&vm->active_list); - INIT_LIST_HEAD(&vm->inactive_list); INIT_LIST_HEAD(&vm->unbound_list); + INIT_LIST_HEAD(&vm->bound_list); } static void i915_address_space_fini(struct i915_address_space *vm) @@ -2280,8 +2279,7 @@ void i915_ppgtt_close(struct i915_address_space *vm) static void ppgtt_destroy_vma(struct i915_address_space *vm) { struct list_head *phases[] = { - &vm->active_list, - &vm->inactive_list, + &vm->bound_list, &vm->unbound_list, NULL, }, **phase; @@ -2304,8 +2302,7 @@ void i915_ppgtt_release(struct kref *kref) ppgtt_destroy_vma(&ppgtt->vm); - GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list)); - GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list)); + GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list)); GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); ppgtt->vm.cleanup(&ppgtt->vm); @@ -2958,8 +2955,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); i915_gem_fini_aliasing_ppgtt(dev_priv); - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) WARN_ON(i915_vma_unbind(vma)); if (drm_mm_node_allocated(&ggtt->error_capture)) @@ -3649,8 +3645,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ /* clflush objects bound into the GGTT and rebind them. */ - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) { + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; if (!(vma->flags & I915_VMA_GLOBAL_BIND)) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index feda45dfd481..fa494e861ca3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -299,32 +299,12 @@ struct i915_address_space { struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */ /** - * List of objects currently involved in rendering. - * - * Includes buffers having the contents of their GPU caches - * flushed, not necessarily primitives. last_read_req - * represents when the rendering involved will be completed. - * - * A reference is held on the buffer while on this list. + * List of vma currently bound. */ - struct list_head active_list; + struct list_head bound_list; /** - * LRU list of objects which are not in the ringbuffer and - * are ready to unbind, but are still in the GTT. - * - * last_read_req is NULL while an object is in this list. - * - * A reference is not held on the buffer while on this list, - * as merely being GTT-bound shouldn't prevent its being - * freed, and we'll pull it off the list in the free path. - */ - struct list_head inactive_list; - - /** - * List of vma that have been unbound. - * - * A reference is not held on the buffer while on this list. + * List of vma that are not unbound. */ struct list_head unbound_list; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index c61f5b80fee3..234eb33f2f71 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -485,9 +485,13 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr /* We also want to clear any cached iomaps as they wrap vmap */ list_for_each_entry_safe(vma, next, - &i915->ggtt.vm.inactive_list, vm_link) { + &i915->ggtt.vm.bound_list, vm_link) { unsigned long count = vma->node.size >> PAGE_SHIFT; - if (vma->iomap && i915_vma_unbind(vma) == 0) + + if (!vma->iomap || i915_vma_is_active(vma)) + continue; + + if (i915_vma_unbind(vma) == 0) freed_pages += count; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 055f8687776d..93686782b675 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -667,7 +667,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv vma->pages = obj->mm.pages; vma->flags |= I915_VMA_GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list); + list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); spin_lock(&dev_priv->mm.obj_lock); list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8c81cf3aa182..a3692c36814b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1036,7 +1036,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, static u32 capture_error_bo(struct drm_i915_error_buffer *err, int count, struct list_head *head, - bool pinned_only) + bool active_only, bool pinned_only) { struct i915_vma *vma; int i = 0; @@ -1045,6 +1045,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, if (!vma->obj) continue; + if (active_only && !i915_vma_is_active(vma)) + continue; + if (pinned_only && !i915_vma_is_pinned(vma)) continue; @@ -1516,14 +1519,16 @@ static void gem_capture_vm(struct i915_gpu_state *error, int count; count = 0; - list_for_each_entry(vma, &vm->active_list, vm_link) - count++; + list_for_each_entry(vma, &vm->bound_list, vm_link) + if (i915_vma_is_active(vma)) + count++; active_bo = NULL; if (count) active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); if (active_bo) - count = capture_error_bo(active_bo, count, &vm->active_list, false); + count = capture_error_bo(active_bo, count, &vm->bound_list, + true, false); else count = 0; @@ -1561,28 +1566,20 @@ static void capture_pinned_buffers(struct i915_gpu_state *error) struct i915_address_space *vm = &error->i915->ggtt.vm; struct drm_i915_error_buffer *bo; struct i915_vma *vma; - int count_inactive, count_active; - - count_inactive = 0; - list_for_each_entry(vma, &vm->inactive_list, vm_link) - count_inactive++; + int count; - count_active = 0; - list_for_each_entry(vma, &vm->active_list, vm_link) - count_active++; + count = 0; + list_for_each_entry(vma, &vm->bound_list, vm_link) + count++; bo = NULL; - if (count_inactive + count_active) - bo = kcalloc(count_inactive + count_active, - sizeof(*bo), GFP_ATOMIC); + if (count) + bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC); if (!bo) return; - count_inactive = capture_error_bo(bo, count_inactive, - &vm->active_list, true); - count_active = capture_error_bo(bo + count_inactive, count_active, - &vm->inactive_list, true); - error->pinned_bo_count = count_inactive + count_active; + error->pinned_bo_count = + capture_error_bo(bo, count, &vm->bound_list, false, true); error->pinned_bo = bo; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index ed4e0fb558f7..44cc3390eee4 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -79,9 +79,6 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) if (--vma->active_count) return; - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); - GEM_BUG_ON(!i915_gem_object_is_active(obj)); if (--obj->active_count) return; @@ -657,7 +654,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; @@ -996,10 +993,8 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - if (!i915_gem_active_isset(active) && !vma->active_count++) { - list_move_tail(&vma->vm_link, &vma->vm->active_list); + if (!i915_gem_active_isset(active) && !vma->active_count++) obj->active_count++; - } i915_gem_active_set(active, rq); GEM_BUG_ON(!i915_vma_is_active(vma)); GEM_BUG_ON(!obj->active_count); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 128ad1cf0647..f29d3f7a23fd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915) return -EINVAL; } - if (list_empty(&i915->ggtt.vm.inactive_list)) { + if (list_empty(&i915->ggtt.vm.bound_list)) { pr_err("No objects on the GGTT inactive list!\n"); return -EINVAL; } @@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915) { struct i915_vma *vma; - list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link) + list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link) i915_vma_unpin(vma); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 600a3bcbd3d6..4b5ae1b92392 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1235,7 +1235,7 @@ static void track_vma_bind(struct i915_vma *vma) __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); } static int exercise_mock(struct drm_i915_private *i915,
Our goal is to remove struct_mutex and replace it with fine grained locking. One of the thorny issues is our eviction logic for reclaiming space for an execbuffer (or GTT mmaping, among a few other examples). While eviction itself is easy to move under a per-VM mutex, performing the activity tracking is less agreeable. One solution is not to do any MRU tracking and do a simple coarse evaluation during eviction of active/inactive, with a loose temporal ordering of last insertion/evaluation. That keeps all the locking constrained to when we are manipulating the VM itself, neatly avoiding the tricky handling of possible recursive locking during execbuf and elsewhere. Note that discarding the MRU is unlikely to impact upon our efficiency to reclaim VM space (where we think a LRU model is best) as our current strategy is to use random idle replacement first before doing a search, and over time the use of softpinned 48b per-ppGTT is growing (thereby eliminating any need to perform any eviction searches, in theory at least). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem.c | 10 +--- drivers/gpu/drm/i915/i915_gem_evict.c | 56 ++++++++++--------- drivers/gpu/drm/i915/i915_gem_gtt.c | 15 ++--- drivers/gpu/drm/i915/i915_gem_gtt.h | 26 +-------- drivers/gpu/drm/i915/i915_gem_shrinker.c | 8 ++- drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 37 ++++++------ drivers/gpu/drm/i915/i915_vma.c | 9 +-- .../gpu/drm/i915/selftests/i915_gem_evict.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +- 10 files changed, 68 insertions(+), 101 deletions(-)