@@ -756,6 +756,7 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#define MEM_CGROUP_ID_MAX 0
struct mem_cgroup;
+#define root_mem_cgroup NULL
static inline bool mem_cgroup_disabled(void)
{
@@ -376,6 +376,7 @@ int prealloc_shrinker(struct shrinker *shrinker)
goto free_deferred;
}
+ INIT_LIST_HEAD(&shrinker->list);
return 0;
free_deferred:
@@ -547,6 +548,63 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
return freed;
}
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
+ struct mem_cgroup *memcg, int priority)
+{
+ struct memcg_shrinker_map *map;
+ unsigned long freed = 0;
+ int ret, i;
+
+ if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
+ return 0;
+
+ if (!down_read_trylock(&shrinker_rwsem))
+ return 0;
+
+ /*
+ * 1)Caller passes only alive memcg, so map can't be NULL.
+ * 2)shrinker_rwsem protects from maps expanding.
+ */
+ map = rcu_dereference_protected(MEMCG_SHRINKER_MAP(memcg, nid), true);
+ BUG_ON(!map);
+
+ for_each_set_bit(i, map->map, memcg_shrinker_nr_max) {
+ struct shrink_control sc = {
+ .gfp_mask = gfp_mask,
+ .nid = nid,
+ .memcg = memcg,
+ };
+ struct shrinker *shrinker;
+
+ shrinker = idr_find(&shrinker_idr, i);
+ if (!shrinker) {
+ clear_bit(i, map->map);
+ continue;
+ }
+ if (list_empty(&shrinker->list))
+ continue;
+
+ ret = do_shrink_slab(&sc, shrinker, priority);
+ freed += ret;
+
+ if (rwsem_is_contended(&shrinker_rwsem)) {
+ freed = freed ? : 1;
+ break;
+ }
+ }
+
+ up_read(&shrinker_rwsem);
+ return freed;
+}
+#else
+static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
+ struct mem_cgroup *memcg, int priority)
+{
+ return 0;
+}
+#endif
+
/**
* shrink_slab - shrink slab caches
* @gfp_mask: allocation context
@@ -576,8 +634,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
struct shrinker *shrinker;
unsigned long freed = 0;
- if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
- return 0;
+ if (memcg && memcg != root_mem_cgroup)
+ return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
if (!down_read_trylock(&shrinker_rwsem))
goto out;
@@ -589,13 +647,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
.memcg = memcg,
};
- /*
- * If kernel memory accounting is disabled, we ignore
- * SHRINKER_MEMCG_AWARE flag and call all shrinkers
- * passing NULL for memcg.
- */
- if (memcg_kmem_enabled() &&
- !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
+ if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
continue;
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
Using the preparations made in previous patches, in case of memcg shrink, we may avoid shrinkers, which are not set in memcg's shrinkers bitmap. To do that, we separate iterations over memcg-aware and !memcg-aware shrinkers, and memcg-aware shrinkers are chosen via for_each_set_bit() from the bitmap. In case of big nodes, having many isolated environments, this gives significant performance growth. See next patches for the details. Note, that the patch does not respect to empty memcg shrinkers, since we never clear the bitmap bits after we set it once. Their shrinkers will be called again, with no shrinked objects as result. This functionality is provided by next patches. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> --- include/linux/memcontrol.h | 1 + mm/vmscan.c | 70 ++++++++++++++++++++++++++++++++++++++------ 2 files changed, 62 insertions(+), 9 deletions(-)