@@ -127,6 +127,7 @@ struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat __rcu /* __percpu */ *lruvec_stat_cpu;
+ struct lruvec_stat __percpu *lruvec_stat_cpu_offlined;
atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
@@ -4459,7 +4459,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
if (!pn)
return;
- free_percpu(pn->lruvec_stat_cpu);
+ WARN_ON_ONCE(pn->lruvec_stat_cpu != NULL);
kfree(pn);
}
@@ -4615,7 +4615,17 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
static void mem_cgroup_free_percpu(struct rcu_head *rcu)
{
struct mem_cgroup *memcg = container_of(rcu, struct mem_cgroup, rcu);
+ int node;
+
+ for_each_node(node) {
+ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+ if (!pn)
+ continue;
+
+ free_percpu(pn->lruvec_stat_cpu_offlined);
+ WARN_ON_ONCE(pn->lruvec_stat_cpu != NULL);
+ }
free_percpu(memcg->vmstats_percpu_offlined);
WARN_ON_ONCE(memcg->vmstats_percpu);
@@ -4624,6 +4634,18 @@ static void mem_cgroup_free_percpu(struct rcu_head *rcu)
static void mem_cgroup_offline_percpu(struct mem_cgroup *memcg)
{
+ int node;
+
+ for_each_node(node) {
+ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+
+ if (!pn)
+ continue;
+
+ pn->lruvec_stat_cpu_offlined = (struct lruvec_stat __percpu *)
+ rcu_dereference(pn->lruvec_stat_cpu);
+ rcu_assign_pointer(pn->lruvec_stat_cpu, NULL);
+ }
memcg->vmstats_percpu_offlined = (struct memcg_vmstats_percpu __percpu*)
rcu_dereference(memcg->vmstats_percpu);
rcu_assign_pointer(memcg->vmstats_percpu, NULL);
Similar to memcg-level statistics, per-node data isn't expected to be hot after cgroup removal. Switching over to atomics and prematurely releasing percpu data helps to reduce the memory footprint of dying cgroups. Signed-off-by: Roman Gushchin <guro@fb.com> --- include/linux/memcontrol.h | 1 + mm/memcontrol.c | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-)