@@ -434,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
- seq_printf(m, "%u objects, %llu bytes\n",
+ seq_printf(m, "%u active objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
@@ -986,7 +986,6 @@ struct i915_gem_mm {
uint32_t bit_6_swizzle_y;
/* accounting, useful for userland debugging */
- spinlock_t object_stat_lock;
u64 object_memory;
u32 object_count;
};
@@ -77,25 +77,6 @@ remove_mappable_node(struct drm_mm_node *node)
drm_mm_remove_node(node);
}
-/* some bookkeeping */
-static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
- u64 size)
-{
- spin_lock(&dev_priv->mm.object_stat_lock);
- dev_priv->mm.object_count++;
- dev_priv->mm.object_memory += size;
- spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
-static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
- u64 size)
-{
- spin_lock(&dev_priv->mm.object_stat_lock);
- dev_priv->mm.object_count--;
- dev_priv->mm.object_memory -= size;
- spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
static int
i915_gem_wait_for_error(struct i915_gpu_error *error)
{
@@ -2419,6 +2400,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
+ i915->mm.object_count--;
+ i915->mm.object_memory -= obj->base.size;
spin_unlock(&i915->mm.obj_lock);
if (obj->mm.mapping) {
@@ -2705,6 +2688,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
spin_lock(&i915->mm.obj_lock);
+ i915->mm.object_count++;
+ i915->mm.object_memory += obj->base.size;
list_add(&obj->mm.link, &i915->mm.unbound_list);
spin_unlock(&i915->mm.obj_lock);
}
@@ -4674,8 +4659,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
-
- i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4863,7 +4846,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
reservation_object_fini(&obj->__builtin_resv);
drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(i915, obj->base.size);
kfree(obj->bit_17);
i915_gem_object_free(obj);
@@ -5589,7 +5571,6 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
- spin_lock_init(&i915->mm.object_stat_lock);
spin_lock_init(&i915->mm.obj_lock);
spin_lock_init(&i915->mm.free_lock);
@@ -309,30 +309,14 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_i915_gem_object *obj;
- unsigned long num_objects = 0;
- unsigned long count = 0;
+ unsigned long num_objects;
+ unsigned long count;
- spin_lock(&i915->mm.obj_lock);
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- if (can_release_pages(obj)) {
- count += obj->base.size >> PAGE_SHIFT;
- num_objects++;
- }
+ count = READ_ONCE(i915->mm.object_memory) >> PAGE_SHIFT;
+ num_objects = READ_ONCE(i915->mm.object_count);
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
- if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
- count += obj->base.size >> PAGE_SHIFT;
- num_objects++;
- }
- list_for_each_entry(obj, &i915->mm.purge_list, mm.link)
- if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
- count += obj->base.size >> PAGE_SHIFT;
- num_objects++;
- }
- spin_unlock(&i915->mm.obj_lock);
-
- /* Update our preferred vmscan batch size for the next pass.
+ /*
+ * Update our preferred vmscan batch size for the next pass.
* Our rough guess for an effective batch size is roughly 2
* available GEM objects worth of pages. That is we don't want
* the shrinker to fire, until it is worth the cost of freeing an
Currently, we try to report to the shrinker the precise number of objects (pages) that are available to be reaped at this moment. This requires searching all objects with allocated pages to see if they fulfill the search criteria, and this count is performed quite frequently. (The shrinker tries to free ~128 pages on each invocation, before which we count all the objects; counting takes longer than unbinding the objects!) If we take the pragmatic view that with sufficient desire, all objects are eventually reapable (they become inactive, or no longer used as framebuffer etc), we can simply return the count of pinned pages maintained during get_pages/put_pages rather than walk the lists every time. The downside is that we may (slightly) over-report the number of objects/pages we could shrink and so penalize ourselves by shrinking more than required. This is mitigated by keeping the order in which we shrink objects such that we avoid penalizing active and frequently used objects, and if memory is so tight that we need to free them we would need to anyway. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 27 ++++------------------- drivers/gpu/drm/i915/i915_gem_shrinker.c | 28 +++++------------------- 4 files changed, 11 insertions(+), 47 deletions(-)