Message ID | 20190214145740.14521-6-matthew.auld@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Introduce memory region concept (including device local memory) | expand |
Quoting Matthew Auld (2019-02-14 14:57:03) > Support basic eviction for regions. > > Signed-off-by: Matthew Auld <matthew.auld@intel.com> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> > Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> > --- > drivers/gpu/drm/i915/i915_drv.h | 2 + > drivers/gpu/drm/i915/i915_gem.c | 16 ++++ > drivers/gpu/drm/i915/i915_gem_object.h | 7 ++ > drivers/gpu/drm/i915/i915_gem_shrinker.c | 59 ++++++++++++++ > drivers/gpu/drm/i915/intel_memory_region.c | 40 +++++++++- > drivers/gpu/drm/i915/intel_memory_region.h | 7 ++ > .../drm/i915/selftests/intel_memory_region.c | 76 +++++++++++++++++++ > drivers/gpu/drm/i915/selftests/mock_region.c | 1 + > 8 files changed, 204 insertions(+), 4 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 0bea7d889284..3df27769b978 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -3196,6 +3196,8 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915); > void i915_gem_shrinker_unregister(struct drm_i915_private *i915); > void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, > struct mutex *mutex); > +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, > + resource_size_t target); > > /* i915_gem_tiling.c */ > static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 92768ab294a4..7f044b643a75 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -4095,6 +4095,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, > !i915_gem_object_has_pages(obj)) > i915_gem_object_truncate(obj); > > + if (obj->memory_region) { > + mutex_lock(&obj->memory_region->obj_lock); > + > + switch (obj->mm.madv) { > + case I915_MADV_WILLNEED: > + list_move(&obj->region_link, &obj->memory_region->objects); > + break; > + default: > + list_move(&obj->region_link, > + &obj->memory_region->purgeable); > + break; > + } > + > + mutex_unlock(&obj->memory_region->obj_lock); > + } > + > args->retained = obj->mm.madv != __I915_MADV_PURGED; > mutex_unlock(&obj->mm.lock); > > diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h > index ac52f61e8ad1..76947a6f49f1 100644 > --- a/drivers/gpu/drm/i915/i915_gem_object.h > +++ b/drivers/gpu/drm/i915/i915_gem_object.h > @@ -95,6 +95,13 @@ struct drm_i915_gem_object { > * List of memory region blocks allocated for this object. > */ > struct list_head blocks; > + /** > + * Element within memory_region->objects or memory_region->purgeable if > + * the object is marked as DONTNEED. Access is protected by > + * memory_region->obj_lock. Lies. ;-p > + */ > + struct list_head region_link; > + struct list_head tmp_link; > > struct { > /** > diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c > index 6da795c7e62e..713c6c93cf30 100644 > --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c > +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c > @@ -308,6 +308,65 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) > return freed; > } > > +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, > + resource_size_t target) If it's not going to be coupled into the mm.shrinker callback, do not put it here! And there's no reason why we would ever couple local memory to the generic mm shrinker! > +{ > + struct drm_i915_private *i915 = mem->i915; > + struct drm_i915_gem_object *obj, *on; > + resource_size_t found; > + LIST_HEAD(purgeable); > + bool unlock; > + int err; > + > + if (!shrinker_lock(i915, 0, &unlock)) > + return 0; Don't... > + > + i915_retire_requests(i915); And this, don't do this. > + err = 0; > + found = 0; > + > + mutex_lock(&mem->obj_lock); That's all the top-level locking we should ever need. > + list_for_each_entry(obj, &mem->purgeable, region_link) { > + if (!i915_gem_object_has_pages(obj)) > + continue; > + > + if (READ_ONCE(obj->pin_global)) > + continue; > + > + if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) > + continue; > + > + list_add(&obj->tmp_link, &purgeable); Oh crikey. > + > + found += obj->base.size; > + if (found >= target) > + goto found; > + } > + > + err = -ENOSPC; > +found: > + mutex_unlock(&mem->obj_lock); > + > + list_for_each_entry_safe(obj, on, &purgeable, tmp_link) { > + if (!err) > + err = i915_gem_object_unbind(obj); I would advise not to worry about unbinding until you have it decoupled from struct_mutex. Or at least defer struct_mutex until you truly need it to access the vma, so that it doesn't get abused for anything else. > + if (!err) { > + __i915_gem_object_put_pages(obj, > + I915_MM_SHRINKER); > + if (!i915_gem_object_has_pages(obj)) > + obj->mm.madv = __I915_MADV_PURGED; But this is racy. > + } > + > + list_del(&obj->tmp_link); I'm still going crikey. That's no excuse to invoke struct_mutex. -Chris
On Thu, 14 Feb 2019 at 15:25, Chris Wilson <chris@chris-wilson.co.uk> wrote: > > Quoting Matthew Auld (2019-02-14 14:57:03) > > Support basic eviction for regions. > > > > Signed-off-by: Matthew Auld <matthew.auld@intel.com> > > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> > > Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> > > --- > > drivers/gpu/drm/i915/i915_drv.h | 2 + > > drivers/gpu/drm/i915/i915_gem.c | 16 ++++ > > drivers/gpu/drm/i915/i915_gem_object.h | 7 ++ > > drivers/gpu/drm/i915/i915_gem_shrinker.c | 59 ++++++++++++++ > > drivers/gpu/drm/i915/intel_memory_region.c | 40 +++++++++- > > drivers/gpu/drm/i915/intel_memory_region.h | 7 ++ > > .../drm/i915/selftests/intel_memory_region.c | 76 +++++++++++++++++++ > > drivers/gpu/drm/i915/selftests/mock_region.c | 1 + > > 8 files changed, 204 insertions(+), 4 deletions(-) > > > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > > index 0bea7d889284..3df27769b978 100644 > > --- a/drivers/gpu/drm/i915/i915_drv.h > > +++ b/drivers/gpu/drm/i915/i915_drv.h > > @@ -3196,6 +3196,8 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915); > > void i915_gem_shrinker_unregister(struct drm_i915_private *i915); > > void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, > > struct mutex *mutex); > > +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, > > + resource_size_t target); > > > > /* i915_gem_tiling.c */ > > static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > > index 92768ab294a4..7f044b643a75 100644 > > --- a/drivers/gpu/drm/i915/i915_gem.c > > +++ b/drivers/gpu/drm/i915/i915_gem.c > > @@ -4095,6 +4095,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, > > !i915_gem_object_has_pages(obj)) > > i915_gem_object_truncate(obj); > > > > + if (obj->memory_region) { > > + mutex_lock(&obj->memory_region->obj_lock); > > + > > + switch (obj->mm.madv) { > > + case I915_MADV_WILLNEED: > > + list_move(&obj->region_link, &obj->memory_region->objects); > > + break; > > + default: > > + list_move(&obj->region_link, > > + &obj->memory_region->purgeable); > > + break; > > + } > > + > > + mutex_unlock(&obj->memory_region->obj_lock); > > + } > > + > > args->retained = obj->mm.madv != __I915_MADV_PURGED; > > mutex_unlock(&obj->mm.lock); > > > > diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h > > index ac52f61e8ad1..76947a6f49f1 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_object.h > > +++ b/drivers/gpu/drm/i915/i915_gem_object.h > > @@ -95,6 +95,13 @@ struct drm_i915_gem_object { > > * List of memory region blocks allocated for this object. > > */ > > struct list_head blocks; > > + /** > > + * Element within memory_region->objects or memory_region->purgeable if > > + * the object is marked as DONTNEED. Access is protected by > > + * memory_region->obj_lock. > > Lies. ;-p > > > + */ > > + struct list_head region_link; > > + struct list_head tmp_link; > > > > struct { > > /** > > diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c > > index 6da795c7e62e..713c6c93cf30 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c > > +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c > > @@ -308,6 +308,65 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) > > return freed; > > } > > > > +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, > > + resource_size_t target) > > If it's not going to be coupled into the mm.shrinker callback, do not put > it here! And there's no reason why we would ever couple local memory to > the generic mm shrinker! Yup. > > > +{ > > + struct drm_i915_private *i915 = mem->i915; > > + struct drm_i915_gem_object *obj, *on; > > + resource_size_t found; > > + LIST_HEAD(purgeable); > > + bool unlock; > > + int err; > > + > > + if (!shrinker_lock(i915, 0, &unlock)) > > + return 0; > > Don't... > > > + > > + i915_retire_requests(i915); > > And this, don't do this. > > > + err = 0; > > + found = 0; > > + > > + mutex_lock(&mem->obj_lock); > > That's all the top-level locking we should ever need. > > > + list_for_each_entry(obj, &mem->purgeable, region_link) { > > + if (!i915_gem_object_has_pages(obj)) > > + continue; > > + > > + if (READ_ONCE(obj->pin_global)) > > + continue; > > + > > + if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) > > + continue; > > + > > + list_add(&obj->tmp_link, &purgeable); > > Oh crikey. What's the crikey for? We do the purging in two passes? Yeah, I guess that's kinda garbage. There is definitely some leftover baggage from when we did "interesting" things in here, which needs to be fixed up. > > > + > > + found += obj->base.size; > > + if (found >= target) > > + goto found; > > + } > > + > > + err = -ENOSPC; > > +found: > > + mutex_unlock(&mem->obj_lock); > > + > > + list_for_each_entry_safe(obj, on, &purgeable, tmp_link) { > > + if (!err) > > + err = i915_gem_object_unbind(obj); > > I would advise not to worry about unbinding until you have it decoupled > from struct_mutex. Or at least defer struct_mutex until you truly need > it to access the vma, so that it doesn't get abused for anything else. Ok, I'll just drop the unbind, and kiss for now. > > > + if (!err) { > > + __i915_gem_object_put_pages(obj, > > + I915_MM_SHRINKER); > > + if (!i915_gem_object_has_pages(obj)) > > + obj->mm.madv = __I915_MADV_PURGED; > > But this is racy. > > > + } > > + > > + list_del(&obj->tmp_link); > > I'm still going crikey. That's no excuse to invoke struct_mutex. > -Chris > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Quoting Matthew Auld (2019-02-26 14:58:31) > On Thu, 14 Feb 2019 at 15:25, Chris Wilson <chris@chris-wilson.co.uk> wrote: > > > > Quoting Matthew Auld (2019-02-14 14:57:03) > > > + list_for_each_entry(obj, &mem->purgeable, region_link) { > > > + if (!i915_gem_object_has_pages(obj)) > > > + continue; > > > + > > > + if (READ_ONCE(obj->pin_global)) > > > + continue; > > > + > > > + if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) > > > + continue; > > > + > > > + list_add(&obj->tmp_link, &purgeable); > > > > Oh crikey. > > What's the crikey for? We do the purging in two passes? Yeah, I guess > that's kinda garbage. There is definitely some leftover baggage from > when we did "interesting" things in here, which needs to be fixed up. "tmp_link" has a very bad taste (prior experience turned sour), and this turns out to be lacking in locking. Pesky little global thing. Alternatives tend to be to dynamically allocate list entries. My favourite half-baked idea is to use an XArray for a chunked list (so we get storage allocated for a bunch of entries at once). -Chris
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0bea7d889284..3df27769b978 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3196,6 +3196,8 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915); void i915_gem_shrinker_unregister(struct drm_i915_private *i915); void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, struct mutex *mutex); +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, + resource_size_t target); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 92768ab294a4..7f044b643a75 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4095,6 +4095,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, !i915_gem_object_has_pages(obj)) i915_gem_object_truncate(obj); + if (obj->memory_region) { + mutex_lock(&obj->memory_region->obj_lock); + + switch (obj->mm.madv) { + case I915_MADV_WILLNEED: + list_move(&obj->region_link, &obj->memory_region->objects); + break; + default: + list_move(&obj->region_link, + &obj->memory_region->purgeable); + break; + } + + mutex_unlock(&obj->memory_region->obj_lock); + } + args->retained = obj->mm.madv != __I915_MADV_PURGED; mutex_unlock(&obj->mm.lock); diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index ac52f61e8ad1..76947a6f49f1 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -95,6 +95,13 @@ struct drm_i915_gem_object { * List of memory region blocks allocated for this object. */ struct list_head blocks; + /** + * Element within memory_region->objects or memory_region->purgeable if + * the object is marked as DONTNEED. Access is protected by + * memory_region->obj_lock. + */ + struct list_head region_link; + struct list_head tmp_link; struct { /** diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 6da795c7e62e..713c6c93cf30 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -308,6 +308,65 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) return freed; } +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, + resource_size_t target) +{ + struct drm_i915_private *i915 = mem->i915; + struct drm_i915_gem_object *obj, *on; + resource_size_t found; + LIST_HEAD(purgeable); + bool unlock; + int err; + + if (!shrinker_lock(i915, 0, &unlock)) + return 0; + + i915_retire_requests(i915); + + err = 0; + found = 0; + + mutex_lock(&mem->obj_lock); + + list_for_each_entry(obj, &mem->purgeable, region_link) { + if (!i915_gem_object_has_pages(obj)) + continue; + + if (READ_ONCE(obj->pin_global)) + continue; + + if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) + continue; + + list_add(&obj->tmp_link, &purgeable); + + found += obj->base.size; + if (found >= target) + goto found; + } + + err = -ENOSPC; +found: + mutex_unlock(&mem->obj_lock); + + list_for_each_entry_safe(obj, on, &purgeable, tmp_link) { + if (!err) + err = i915_gem_object_unbind(obj); + if (!err) { + __i915_gem_object_put_pages(obj, + I915_MM_SHRINKER); + if (!i915_gem_object_has_pages(obj)) + obj->mm.madv = __I915_MADV_PURGED; + } + + list_del(&obj->tmp_link); + } + + shrinker_unlock(i915, unlock); + + return err; +} + static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 405d6d51194f..f7fdc3e942e6 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -89,7 +89,8 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) unsigned int order; u64 block_size; u64 offset; - + bool retry = false; +retry: order = fls(n_pages) - 1; GEM_BUG_ON(order > mem->mm.max_order); @@ -98,9 +99,25 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) if (!IS_ERR(block)) break; - /* XXX: some kind of eviction pass, local to the device */ - if (!order--) - goto err_free_blocks; + if (!order--) { + resource_size_t target; + int err; + + if (retry) + goto err_free_blocks; + + target = n_pages * mem->mm.min_size; + + mutex_unlock(&mem->mm_lock); + err = i915_gem_shrink_memory_region(mem, + target); + mutex_lock(&mem->mm_lock); + if (err) + goto err_free_blocks; + + retry = true; + goto retry; + } } while (1); n_pages -= 1 << order; @@ -151,6 +168,13 @@ void i915_memory_region_release_buddy(struct intel_memory_region *mem) i915_gem_buddy_fini(&mem->mm); } +void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) +{ + mutex_lock(&obj->memory_region->obj_lock); + list_del(&obj->region_link); + mutex_unlock(&obj->memory_region->obj_lock); +} + struct drm_i915_gem_object * i915_gem_object_create_region(struct intel_memory_region *mem, resource_size_t size, @@ -179,6 +203,10 @@ i915_gem_object_create_region(struct intel_memory_region *mem, INIT_LIST_HEAD(&obj->blocks); obj->memory_region = mem; + mutex_lock(&mem->obj_lock); + list_add(&obj->region_link, &mem->objects); + mutex_unlock(&mem->obj_lock); + i915_gem_object_set_cache_coherency(obj, obj->cache_level); return obj; @@ -205,6 +233,10 @@ intel_memory_region_create(struct drm_i915_private *i915, mem->min_page_size = min_page_size; mem->ops = ops; + mutex_init(&mem->obj_lock); + INIT_LIST_HEAD(&mem->objects); + INIT_LIST_HEAD(&mem->purgeable); + mutex_init(&mem->mm_lock); if (ops->init) { diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 6d8a954ca75e..b1546afb0b6e 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -99,6 +99,11 @@ struct intel_memory_region { unsigned int type; unsigned int instance; unsigned int id; + + /* Protects access to objects and purgeable */ + struct mutex obj_lock; + struct list_head objects; + struct list_head purgeable; }; int i915_memory_region_init_buddy(struct intel_memory_region *mem); @@ -108,6 +113,8 @@ int i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj); void i915_memory_region_put_pages_buddy(struct drm_i915_gem_object *obj, struct sg_table *pages); +void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 2b8d28216d87..1cea381d2d5e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -93,10 +93,86 @@ static int igt_mock_fill(void *arg) return err; } +static void igt_mark_evictable(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); + obj->mm.madv = I915_MADV_DONTNEED; + list_move(&obj->region_link, &obj->memory_region->purgeable); +} + +static int igt_mock_shrink(void *arg) +{ + struct intel_memory_region *mem = arg; + struct drm_i915_gem_object *obj; + unsigned long n_objects; + LIST_HEAD(objects); + resource_size_t target; + resource_size_t total; + int err = 0; + + target = mem->mm.min_size; + total = resource_size(&mem->region); + n_objects = total / target; + + while (n_objects--) { + obj = i915_gem_object_create_region(mem, + target, + 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (err) + goto err_close_objects; + + /* + * Make half of the region evictable, though do so in a + * horribly fragmented fashion. + */ + if (n_objects % 2) + igt_mark_evictable(obj); + } + + while (target <= total / 2) { + obj = i915_gem_object_create_region(mem, target, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + /* Provoke the shrinker to start violently swinging its axe! */ + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("failed to shrink for target=%pa", &target); + goto err_close_objects; + } + + /* Again, half of the region should remain evictable */ + igt_mark_evictable(obj); + + target <<= 1; + } + +err_close_objects: + close_objects(&objects); + + if (err == -ENOMEM) + err = 0; + + return err; +} + int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_mock_fill), + SUBTEST(igt_mock_shrink), }; struct intel_memory_region *mem; struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 2c83711f780d..11e9f379aaca 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -27,6 +27,7 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = { .get_pages = i915_memory_region_get_pages_buddy, .put_pages = i915_memory_region_put_pages_buddy, + .release = i915_gem_object_release_memory_region, }; static struct drm_i915_gem_object *
Support basic eviction for regions. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem.c | 16 ++++ drivers/gpu/drm/i915/i915_gem_object.h | 7 ++ drivers/gpu/drm/i915/i915_gem_shrinker.c | 59 ++++++++++++++ drivers/gpu/drm/i915/intel_memory_region.c | 40 +++++++++- drivers/gpu/drm/i915/intel_memory_region.h | 7 ++ .../drm/i915/selftests/intel_memory_region.c | 76 +++++++++++++++++++ drivers/gpu/drm/i915/selftests/mock_region.c | 1 + 8 files changed, 204 insertions(+), 4 deletions(-)