@@ -143,7 +143,7 @@ using a number of wrapper functions:
Query the address space, and return true if it is completely
unevictable.
-These are currently used in two places in the kernel:
+These are currently used in three places in the kernel:
(1) By ramfs to mark the address spaces of its inodes when they are created,
and this mark remains for the life of the inode.
@@ -154,6 +154,8 @@ These are currently used in two places in the kernel:
swapped out; the application must touch the pages manually if it wants to
ensure they're in memory.
+ (3) By the i915 driver to mark pinned address space until it's unpinned.
+
Detecting Unevictable Pages
---------------------------
@@ -2390,6 +2390,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
{
struct sgt_iter sgt_iter;
struct page *page;
+ struct address_space *mapping;
__i915_gem_object_release_shmem(obj, pages, true);
@@ -2409,6 +2410,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
}
obj->mm.dirty = false;
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ mapping_clear_unevictable(mapping);
+ shmem_unlock_mapping(mapping);
+
sg_free_table(pages);
kfree(pages);
}
@@ -2551,6 +2556,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
@@ -2664,6 +2670,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
+ mapping_clear_unevictable(mapping);
+ shmem_unlock_mapping(mapping);
sg_free_table(st);
kfree(st);
@@ -786,6 +786,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
cond_resched();
}
}
+EXPORT_SYMBOL_GPL(shmem_unlock_mapping);
/*
* Remove range of pages and swap entries from radix tree, and free them.
@@ -3874,6 +3875,7 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
void shmem_unlock_mapping(struct address_space *mapping)
{
}
+EXPORT_SYMBOL_GPL(shmem_unlock_mapping);
#ifdef CONFIG_MMU
unsigned long shmem_get_unmapped_area(struct file *file,
The i915 driver uses shmemfs to allocate backing storage for gem objects. These shmemfs pages can be pinned (increased ref count) by shmem_read_mapping_page_gfp(). When a lot of pages are pinned, vmscan wastes a lot of time scanning these pinned pages. In some extreme case, all pages in the inactive anon lru are pinned, and only the inactive anon lru is scanned due to inactive_ratio, the system cannot swap and invokes the oom-killer. Mark these pinned pages as unevictable to speed up vmscan. By exporting shmem_unlock_mapping, drivers can: 1. mark a shmemfs address space as unevictable with mapping_set_unevictable(), pages in the address space will be moved to unevictable list in vmscan. 2. mark an address space as evictable with mapping_clear_unevictable(), and move these pages back to evictable list with shmem_unlock_mapping(). This patch was inspired by Chris Wilson's change [1]. [1]: https://patchwork.kernel.org/patch/9768741/ Signed-off-by: Kuo-Hsin Yang <vovoy@chromium.org> --- Changes for v2: Squashed the two patches. Documentation/vm/unevictable-lru.rst | 4 +++- drivers/gpu/drm/i915/i915_gem.c | 8 ++++++++ mm/shmem.c | 2 ++ 3 files changed, 13 insertions(+), 1 deletion(-)