diff mbox series

[v6,07/11] mm: vmscan: add per memcg shrinker nr_deferred

Message ID 20210203172042.800474-8-shy828301@gmail.com (mailing list archive)
State New, archived
Headers show
Series Make shrinker's nr_deferred memcg aware | expand

Commit Message

Yang Shi Feb. 3, 2021, 5:20 p.m. UTC
Currently the number of deferred objects are per shrinker, but some slabs, for example,
vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.

The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
may suffer from over shrink, excessive reclaim latency, etc.

For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
heavy workload.  Workload in A generates excessive deferred objects, then B's vfs cache
might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.

We observed this hit in our production environment which was running vfs heavy workload
shown as the below tracing log:

<...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
cache items 246404277 delta 31345 total_scan 123202138
<...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
last shrinker return val 123186855

The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
This also resulted in significant amount of page caches were dropped due to inodes eviction.

Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
better isolation.

When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all the time.

Signed-off-by: Yang Shi <shy828301@gmail.com>
---
 include/linux/memcontrol.h |  7 +++---
 mm/vmscan.c                | 45 ++++++++++++++++++++++++--------------
 2 files changed, 33 insertions(+), 19 deletions(-)

Comments

Kirill Tkhai Feb. 4, 2021, 8:30 a.m. UTC | #1
On 03.02.2021 20:20, Yang Shi wrote:
> Currently the number of deferred objects are per shrinker, but some slabs, for example,
> vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
> 
> The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
> excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
> may suffer from over shrink, excessive reclaim latency, etc.
> 
> For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
> heavy workload.  Workload in A generates excessive deferred objects, then B's vfs cache
> might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
> 
> We observed this hit in our production environment which was running vfs heavy workload
> shown as the below tracing log:
> 
> <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> cache items 246404277 delta 31345 total_scan 123202138
> <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
> last shrinker return val 123186855
> 
> The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
> This also resulted in significant amount of page caches were dropped due to inodes eviction.
> 
> Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
> better isolation.
> 
> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
> would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all the time.
> 
> Signed-off-by: Yang Shi <shy828301@gmail.com>
> ---
>  include/linux/memcontrol.h |  7 +++---
>  mm/vmscan.c                | 45 ++++++++++++++++++++++++--------------
>  2 files changed, 33 insertions(+), 19 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 4c9253896e25..c457fc7bc631 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -93,12 +93,13 @@ struct lruvec_stat {
>  };
>  
>  /*
> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> - * which have elements charged to this memcg.
> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> + * shrinkers, which have elements charged to this memcg.
>   */
>  struct shrinker_info {
>  	struct rcu_head rcu;
> -	unsigned long map[];
> +	atomic_long_t *nr_deferred;
> +	unsigned long *map;
>  };
>  
>  /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index dc0d69e081b0..d9126f12890f 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -196,10 +196,12 @@ static void free_shrinker_info_rcu(struct rcu_head *head)
>  }
>  
>  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> -				   int size, int old_size)
> +				    int m_size, int d_size,
> +				    int old_m_size, int old_d_size)
>  {
>  	struct shrinker_info *new, *old;
>  	int nid;
> +	int size = m_size + d_size;
>  
>  	for_each_node(nid) {
>  		old = rcu_dereference_protected(
> @@ -212,9 +214,15 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
>  		if (!new)
>  			return -ENOMEM;
>  
> -		/* Set all old bits, clear all new bits */
> -		memset(new->map, (int)0xff, old_size);
> -		memset((void *)new->map + old_size, 0, size - old_size);
> +		new->nr_deferred = (atomic_long_t *)(new + 1);
> +		new->map = (void *)new->nr_deferred + d_size;
> +
> +		/* map: set all old bits, clear all new bits */
> +		memset(new->map, (int)0xff, old_m_size);
> +		memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
> +		/* nr_deferred: copy old values, clear all new values */
> +		memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
> +		memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
>  
>  		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
>  		call_rcu(&old->rcu, free_shrinker_info_rcu);
> @@ -229,9 +237,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
>  	struct shrinker_info *info;
>  	int nid;
>  
> -	if (mem_cgroup_is_root(memcg))
> -		return;
> -
>  	for_each_node(nid) {
>  		pn = mem_cgroup_nodeinfo(memcg, nid);
>  		info = rcu_dereference_protected(pn->shrinker_info, true);
> @@ -244,12 +249,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
>  {
>  	struct shrinker_info *info;
>  	int nid, size, ret = 0;
> -
> -	if (mem_cgroup_is_root(memcg))
> -		return 0;
> +	int m_size, d_size = 0;
>  
>  	down_write(&shrinker_rwsem);
> -	size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> +	m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> +	d_size = shrinker_nr_max * sizeof(atomic_long_t);
> +	size = m_size + d_size;
> +
>  	for_each_node(nid) {
>  		info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
>  		if (!info) {
> @@ -257,6 +263,8 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
>  			ret = -ENOMEM;
>  			break;
>  		}
> +		info->nr_deferred = (atomic_long_t *)(info + 1);
> +		info->map = (void *)info->nr_deferred + d_size;
>  		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
>  	}
>  	up_write(&shrinker_rwsem);
> @@ -268,10 +276,16 @@ static int expand_shrinker_info(int new_id)
>  {
>  	int size, old_size, ret = 0;
>  	int new_nr_max = new_id + 1;
> +	int m_size, d_size = 0;
> +	int old_m_size, old_d_size = 0;
>  	struct mem_cgroup *memcg;
>  
> -	size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
> -	old_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> +	m_size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
> +	d_size = new_nr_max * sizeof(atomic_long_t);
> +	size = m_size + d_size;
> +	old_m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> +	old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
> +	old_size = old_m_size + old_d_size;
>  	if (size <= old_size)
>  		goto out;

Before this patch we used to allocate shrinker_info with BITS_PER_LONG batching.
So, first registered shrinker used to allocate a map of unsigned long size, and
we could to allocate 63 more shrinkers without maps expanding.

After this patch we will expand maps on every shrinker registration, won't we?
What do you think about batching here?

>  
> @@ -280,9 +294,8 @@ static int expand_shrinker_info(int new_id)
>  
>  	memcg = mem_cgroup_iter(NULL, NULL, NULL);
>  	do {
> -		if (mem_cgroup_is_root(memcg))
> -			continue;
> -		ret = expand_one_shrinker_info(memcg, size, old_size);
> +		ret = expand_one_shrinker_info(memcg, m_size, d_size,
> +					       old_m_size, old_d_size);
>  		if (ret) {
>  			mem_cgroup_iter_break(NULL, memcg);
>  			goto out;
>
Yang Shi Feb. 4, 2021, 5:17 p.m. UTC | #2
On Thu, Feb 4, 2021 at 12:31 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>
> On 03.02.2021 20:20, Yang Shi wrote:
> > Currently the number of deferred objects are per shrinker, but some slabs, for example,
> > vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
> >
> > The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
> > excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
> > may suffer from over shrink, excessive reclaim latency, etc.
> >
> > For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
> > heavy workload.  Workload in A generates excessive deferred objects, then B's vfs cache
> > might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
> >
> > We observed this hit in our production environment which was running vfs heavy workload
> > shown as the below tracing log:
> >
> > <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> > nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> > cache items 246404277 delta 31345 total_scan 123202138
> > <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> > nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
> > last shrinker return val 123186855
> >
> > The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
> > This also resulted in significant amount of page caches were dropped due to inodes eviction.
> >
> > Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
> > better isolation.
> >
> > When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
> > would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all the time.
> >
> > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > ---
> >  include/linux/memcontrol.h |  7 +++---
> >  mm/vmscan.c                | 45 ++++++++++++++++++++++++--------------
> >  2 files changed, 33 insertions(+), 19 deletions(-)
> >
> > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > index 4c9253896e25..c457fc7bc631 100644
> > --- a/include/linux/memcontrol.h
> > +++ b/include/linux/memcontrol.h
> > @@ -93,12 +93,13 @@ struct lruvec_stat {
> >  };
> >
> >  /*
> > - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> > - * which have elements charged to this memcg.
> > + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> > + * shrinkers, which have elements charged to this memcg.
> >   */
> >  struct shrinker_info {
> >       struct rcu_head rcu;
> > -     unsigned long map[];
> > +     atomic_long_t *nr_deferred;
> > +     unsigned long *map;
> >  };
> >
> >  /*
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index dc0d69e081b0..d9126f12890f 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -196,10 +196,12 @@ static void free_shrinker_info_rcu(struct rcu_head *head)
> >  }
> >
> >  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> > -                                int size, int old_size)
> > +                                 int m_size, int d_size,
> > +                                 int old_m_size, int old_d_size)
> >  {
> >       struct shrinker_info *new, *old;
> >       int nid;
> > +     int size = m_size + d_size;
> >
> >       for_each_node(nid) {
> >               old = rcu_dereference_protected(
> > @@ -212,9 +214,15 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> >               if (!new)
> >                       return -ENOMEM;
> >
> > -             /* Set all old bits, clear all new bits */
> > -             memset(new->map, (int)0xff, old_size);
> > -             memset((void *)new->map + old_size, 0, size - old_size);
> > +             new->nr_deferred = (atomic_long_t *)(new + 1);
> > +             new->map = (void *)new->nr_deferred + d_size;
> > +
> > +             /* map: set all old bits, clear all new bits */
> > +             memset(new->map, (int)0xff, old_m_size);
> > +             memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
> > +             /* nr_deferred: copy old values, clear all new values */
> > +             memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
> > +             memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
> >
> >               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
> >               call_rcu(&old->rcu, free_shrinker_info_rcu);
> > @@ -229,9 +237,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
> >       struct shrinker_info *info;
> >       int nid;
> >
> > -     if (mem_cgroup_is_root(memcg))
> > -             return;
> > -
> >       for_each_node(nid) {
> >               pn = mem_cgroup_nodeinfo(memcg, nid);
> >               info = rcu_dereference_protected(pn->shrinker_info, true);
> > @@ -244,12 +249,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> >  {
> >       struct shrinker_info *info;
> >       int nid, size, ret = 0;
> > -
> > -     if (mem_cgroup_is_root(memcg))
> > -             return 0;
> > +     int m_size, d_size = 0;
> >
> >       down_write(&shrinker_rwsem);
> > -     size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> > +     m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> > +     d_size = shrinker_nr_max * sizeof(atomic_long_t);
> > +     size = m_size + d_size;
> > +
> >       for_each_node(nid) {
> >               info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
> >               if (!info) {
> > @@ -257,6 +263,8 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> >                       ret = -ENOMEM;
> >                       break;
> >               }
> > +             info->nr_deferred = (atomic_long_t *)(info + 1);
> > +             info->map = (void *)info->nr_deferred + d_size;
> >               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
> >       }
> >       up_write(&shrinker_rwsem);
> > @@ -268,10 +276,16 @@ static int expand_shrinker_info(int new_id)
> >  {
> >       int size, old_size, ret = 0;
> >       int new_nr_max = new_id + 1;
> > +     int m_size, d_size = 0;
> > +     int old_m_size, old_d_size = 0;
> >       struct mem_cgroup *memcg;
> >
> > -     size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
> > -     old_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> > +     m_size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
> > +     d_size = new_nr_max * sizeof(atomic_long_t);
> > +     size = m_size + d_size;
> > +     old_m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> > +     old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
> > +     old_size = old_m_size + old_d_size;
> >       if (size <= old_size)
> >               goto out;
>
> Before this patch we used to allocate shrinker_info with BITS_PER_LONG batching.
> So, first registered shrinker used to allocate a map of unsigned long size, and
> we could to allocate 63 more shrinkers without maps expanding.
>
> After this patch we will expand maps on every shrinker registration, won't we?

Yes, I'm supposed "maps" means "info". I'm supposed the most shrinkers
should be registered at boot time, and typically very few memcgs are
created at boot time so I didn't treat it as a hot path.

> What do you think about batching here?

Just off the top of my head, we could allocate, for example, 64
nr_deferred (64 * sizeof(atomic_long_t)) so that we just need to
expand info for every 64 shrinker registrations. Maybe define it
depends on the machine (64 bit - 64, 32 bit - 32).

Why 64? Basically a magic number. And when I was investigating that
list_lru reparent race issue
(https://lore.kernel.org/linux-mm/20201202171749.264354-1-shy828301@gmail.com/)
I happened to notice that there are at most 64 shrinkers registered in
our production environment (a typical data center configuration).

How do you think about it?

> >
> > @@ -280,9 +294,8 @@ static int expand_shrinker_info(int new_id)
> >
> >       memcg = mem_cgroup_iter(NULL, NULL, NULL);
> >       do {
> > -             if (mem_cgroup_is_root(memcg))
> > -                     continue;
> > -             ret = expand_one_shrinker_info(memcg, size, old_size);
> > +             ret = expand_one_shrinker_info(memcg, m_size, d_size,
> > +                                            old_m_size, old_d_size);
> >               if (ret) {
> >                       mem_cgroup_iter_break(NULL, memcg);
> >                       goto out;
> >
>
>
Kirill Tkhai Feb. 5, 2021, 2:37 p.m. UTC | #3
On 04.02.2021 20:17, Yang Shi wrote:
> On Thu, Feb 4, 2021 at 12:31 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>>
>> On 03.02.2021 20:20, Yang Shi wrote:
>>> Currently the number of deferred objects are per shrinker, but some slabs, for example,
>>> vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
>>>
>>> The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
>>> excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
>>> may suffer from over shrink, excessive reclaim latency, etc.
>>>
>>> For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
>>> heavy workload.  Workload in A generates excessive deferred objects, then B's vfs cache
>>> might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
>>>
>>> We observed this hit in our production environment which was running vfs heavy workload
>>> shown as the below tracing log:
>>>
>>> <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
>>> nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
>>> cache items 246404277 delta 31345 total_scan 123202138
>>> <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
>>> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
>>> last shrinker return val 123186855
>>>
>>> The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
>>> This also resulted in significant amount of page caches were dropped due to inodes eviction.
>>>
>>> Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
>>> better isolation.
>>>
>>> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
>>> would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all the time.
>>>
>>> Signed-off-by: Yang Shi <shy828301@gmail.com>
>>> ---
>>>  include/linux/memcontrol.h |  7 +++---
>>>  mm/vmscan.c                | 45 ++++++++++++++++++++++++--------------
>>>  2 files changed, 33 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>>> index 4c9253896e25..c457fc7bc631 100644
>>> --- a/include/linux/memcontrol.h
>>> +++ b/include/linux/memcontrol.h
>>> @@ -93,12 +93,13 @@ struct lruvec_stat {
>>>  };
>>>
>>>  /*
>>> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
>>> - * which have elements charged to this memcg.
>>> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
>>> + * shrinkers, which have elements charged to this memcg.
>>>   */
>>>  struct shrinker_info {
>>>       struct rcu_head rcu;
>>> -     unsigned long map[];
>>> +     atomic_long_t *nr_deferred;
>>> +     unsigned long *map;
>>>  };
>>>
>>>  /*
>>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>>> index dc0d69e081b0..d9126f12890f 100644
>>> --- a/mm/vmscan.c
>>> +++ b/mm/vmscan.c
>>> @@ -196,10 +196,12 @@ static void free_shrinker_info_rcu(struct rcu_head *head)
>>>  }
>>>
>>>  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
>>> -                                int size, int old_size)
>>> +                                 int m_size, int d_size,
>>> +                                 int old_m_size, int old_d_size)
>>>  {
>>>       struct shrinker_info *new, *old;
>>>       int nid;
>>> +     int size = m_size + d_size;
>>>
>>>       for_each_node(nid) {
>>>               old = rcu_dereference_protected(
>>> @@ -212,9 +214,15 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
>>>               if (!new)
>>>                       return -ENOMEM;
>>>
>>> -             /* Set all old bits, clear all new bits */
>>> -             memset(new->map, (int)0xff, old_size);
>>> -             memset((void *)new->map + old_size, 0, size - old_size);
>>> +             new->nr_deferred = (atomic_long_t *)(new + 1);
>>> +             new->map = (void *)new->nr_deferred + d_size;
>>> +
>>> +             /* map: set all old bits, clear all new bits */
>>> +             memset(new->map, (int)0xff, old_m_size);
>>> +             memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
>>> +             /* nr_deferred: copy old values, clear all new values */
>>> +             memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
>>> +             memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
>>>
>>>               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
>>>               call_rcu(&old->rcu, free_shrinker_info_rcu);
>>> @@ -229,9 +237,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
>>>       struct shrinker_info *info;
>>>       int nid;
>>>
>>> -     if (mem_cgroup_is_root(memcg))
>>> -             return;
>>> -
>>>       for_each_node(nid) {
>>>               pn = mem_cgroup_nodeinfo(memcg, nid);
>>>               info = rcu_dereference_protected(pn->shrinker_info, true);
>>> @@ -244,12 +249,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
>>>  {
>>>       struct shrinker_info *info;
>>>       int nid, size, ret = 0;
>>> -
>>> -     if (mem_cgroup_is_root(memcg))
>>> -             return 0;
>>> +     int m_size, d_size = 0;
>>>
>>>       down_write(&shrinker_rwsem);
>>> -     size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
>>> +     m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
>>> +     d_size = shrinker_nr_max * sizeof(atomic_long_t);
>>> +     size = m_size + d_size;
>>> +
>>>       for_each_node(nid) {
>>>               info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
>>>               if (!info) {
>>> @@ -257,6 +263,8 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
>>>                       ret = -ENOMEM;
>>>                       break;
>>>               }
>>> +             info->nr_deferred = (atomic_long_t *)(info + 1);
>>> +             info->map = (void *)info->nr_deferred + d_size;
>>>               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
>>>       }
>>>       up_write(&shrinker_rwsem);
>>> @@ -268,10 +276,16 @@ static int expand_shrinker_info(int new_id)
>>>  {
>>>       int size, old_size, ret = 0;
>>>       int new_nr_max = new_id + 1;
>>> +     int m_size, d_size = 0;
>>> +     int old_m_size, old_d_size = 0;
>>>       struct mem_cgroup *memcg;
>>>
>>> -     size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
>>> -     old_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
>>> +     m_size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
>>> +     d_size = new_nr_max * sizeof(atomic_long_t);
>>> +     size = m_size + d_size;
>>> +     old_m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
>>> +     old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
>>> +     old_size = old_m_size + old_d_size;
>>>       if (size <= old_size)
>>>               goto out;
>>
>> Before this patch we used to allocate shrinker_info with BITS_PER_LONG batching.
>> So, first registered shrinker used to allocate a map of unsigned long size, and
>> we could to allocate 63 more shrinkers without maps expanding.
>>
>> After this patch we will expand maps on every shrinker registration, won't we?
> 
> Yes, I'm supposed "maps" means "info".I'm supposed the most shrinkers
> should be registered at boot time, and typically very few memcgs are
> created at boot time so I didn't treat it as a hot path.

Not so. Every mount adds at least one shrinker, so they can actively be added
during normal system work.

E.g., on our production system (containers) several thousand shrinkers
is not a rare situation.

>> What do you think about batching here?
> 
> Just off the top of my head, we could allocate, for example, 64
> nr_deferred (64 * sizeof(atomic_long_t)) so that we just need to
> expand info for every 64 shrinker registrations. Maybe define it
> depends on the machine (64 bit - 64, 32 bit - 32).
> 
> Why 64? Basically a magic number. And when I was investigating that
> list_lru reparent race issue
> (https://lore.kernel.org/linux-mm/20201202171749.264354-1-shy828301@gmail.com/)
> I happened to notice that there are at most 64 shrinkers registered in
> our production environment (a typical data center configuration).
> 
> How do you think about it?

I think 64 is OK for now. We may use some #define to set this value, so we will
be able to change it easily in the future.

>>>
>>> @@ -280,9 +294,8 @@ static int expand_shrinker_info(int new_id)
>>>
>>>       memcg = mem_cgroup_iter(NULL, NULL, NULL);
>>>       do {
>>> -             if (mem_cgroup_is_root(memcg))
>>> -                     continue;
>>> -             ret = expand_one_shrinker_info(memcg, size, old_size);
>>> +             ret = expand_one_shrinker_info(memcg, m_size, d_size,
>>> +                                            old_m_size, old_d_size);
>>>               if (ret) {
>>>                       mem_cgroup_iter_break(NULL, memcg);
>>>                       goto out;
>>>
>>
>>
Yang Shi Feb. 5, 2021, 4:49 p.m. UTC | #4
On Fri, Feb 5, 2021 at 6:38 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>
> On 04.02.2021 20:17, Yang Shi wrote:
> > On Thu, Feb 4, 2021 at 12:31 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> >>
> >> On 03.02.2021 20:20, Yang Shi wrote:
> >>> Currently the number of deferred objects are per shrinker, but some slabs, for example,
> >>> vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
> >>>
> >>> The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
> >>> excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
> >>> may suffer from over shrink, excessive reclaim latency, etc.
> >>>
> >>> For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
> >>> heavy workload.  Workload in A generates excessive deferred objects, then B's vfs cache
> >>> might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
> >>>
> >>> We observed this hit in our production environment which was running vfs heavy workload
> >>> shown as the below tracing log:
> >>>
> >>> <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> >>> nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> >>> cache items 246404277 delta 31345 total_scan 123202138
> >>> <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> >>> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
> >>> last shrinker return val 123186855
> >>>
> >>> The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
> >>> This also resulted in significant amount of page caches were dropped due to inodes eviction.
> >>>
> >>> Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
> >>> better isolation.
> >>>
> >>> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
> >>> would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all the time.
> >>>
> >>> Signed-off-by: Yang Shi <shy828301@gmail.com>
> >>> ---
> >>>  include/linux/memcontrol.h |  7 +++---
> >>>  mm/vmscan.c                | 45 ++++++++++++++++++++++++--------------
> >>>  2 files changed, 33 insertions(+), 19 deletions(-)
> >>>
> >>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> >>> index 4c9253896e25..c457fc7bc631 100644
> >>> --- a/include/linux/memcontrol.h
> >>> +++ b/include/linux/memcontrol.h
> >>> @@ -93,12 +93,13 @@ struct lruvec_stat {
> >>>  };
> >>>
> >>>  /*
> >>> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> >>> - * which have elements charged to this memcg.
> >>> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> >>> + * shrinkers, which have elements charged to this memcg.
> >>>   */
> >>>  struct shrinker_info {
> >>>       struct rcu_head rcu;
> >>> -     unsigned long map[];
> >>> +     atomic_long_t *nr_deferred;
> >>> +     unsigned long *map;
> >>>  };
> >>>
> >>>  /*
> >>> diff --git a/mm/vmscan.c b/mm/vmscan.c
> >>> index dc0d69e081b0..d9126f12890f 100644
> >>> --- a/mm/vmscan.c
> >>> +++ b/mm/vmscan.c
> >>> @@ -196,10 +196,12 @@ static void free_shrinker_info_rcu(struct rcu_head *head)
> >>>  }
> >>>
> >>>  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> >>> -                                int size, int old_size)
> >>> +                                 int m_size, int d_size,
> >>> +                                 int old_m_size, int old_d_size)
> >>>  {
> >>>       struct shrinker_info *new, *old;
> >>>       int nid;
> >>> +     int size = m_size + d_size;
> >>>
> >>>       for_each_node(nid) {
> >>>               old = rcu_dereference_protected(
> >>> @@ -212,9 +214,15 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> >>>               if (!new)
> >>>                       return -ENOMEM;
> >>>
> >>> -             /* Set all old bits, clear all new bits */
> >>> -             memset(new->map, (int)0xff, old_size);
> >>> -             memset((void *)new->map + old_size, 0, size - old_size);
> >>> +             new->nr_deferred = (atomic_long_t *)(new + 1);
> >>> +             new->map = (void *)new->nr_deferred + d_size;
> >>> +
> >>> +             /* map: set all old bits, clear all new bits */
> >>> +             memset(new->map, (int)0xff, old_m_size);
> >>> +             memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
> >>> +             /* nr_deferred: copy old values, clear all new values */
> >>> +             memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
> >>> +             memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
> >>>
> >>>               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
> >>>               call_rcu(&old->rcu, free_shrinker_info_rcu);
> >>> @@ -229,9 +237,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
> >>>       struct shrinker_info *info;
> >>>       int nid;
> >>>
> >>> -     if (mem_cgroup_is_root(memcg))
> >>> -             return;
> >>> -
> >>>       for_each_node(nid) {
> >>>               pn = mem_cgroup_nodeinfo(memcg, nid);
> >>>               info = rcu_dereference_protected(pn->shrinker_info, true);
> >>> @@ -244,12 +249,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> >>>  {
> >>>       struct shrinker_info *info;
> >>>       int nid, size, ret = 0;
> >>> -
> >>> -     if (mem_cgroup_is_root(memcg))
> >>> -             return 0;
> >>> +     int m_size, d_size = 0;
> >>>
> >>>       down_write(&shrinker_rwsem);
> >>> -     size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> >>> +     m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> >>> +     d_size = shrinker_nr_max * sizeof(atomic_long_t);
> >>> +     size = m_size + d_size;
> >>> +
> >>>       for_each_node(nid) {
> >>>               info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
> >>>               if (!info) {
> >>> @@ -257,6 +263,8 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> >>>                       ret = -ENOMEM;
> >>>                       break;
> >>>               }
> >>> +             info->nr_deferred = (atomic_long_t *)(info + 1);
> >>> +             info->map = (void *)info->nr_deferred + d_size;
> >>>               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
> >>>       }
> >>>       up_write(&shrinker_rwsem);
> >>> @@ -268,10 +276,16 @@ static int expand_shrinker_info(int new_id)
> >>>  {
> >>>       int size, old_size, ret = 0;
> >>>       int new_nr_max = new_id + 1;
> >>> +     int m_size, d_size = 0;
> >>> +     int old_m_size, old_d_size = 0;
> >>>       struct mem_cgroup *memcg;
> >>>
> >>> -     size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
> >>> -     old_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> >>> +     m_size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
> >>> +     d_size = new_nr_max * sizeof(atomic_long_t);
> >>> +     size = m_size + d_size;
> >>> +     old_m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> >>> +     old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
> >>> +     old_size = old_m_size + old_d_size;
> >>>       if (size <= old_size)
> >>>               goto out;
> >>
> >> Before this patch we used to allocate shrinker_info with BITS_PER_LONG batching.
> >> So, first registered shrinker used to allocate a map of unsigned long size, and
> >> we could to allocate 63 more shrinkers without maps expanding.
> >>
> >> After this patch we will expand maps on every shrinker registration, won't we?
> >
> > Yes, I'm supposed "maps" means "info".I'm supposed the most shrinkers
> > should be registered at boot time, and typically very few memcgs are
> > created at boot time so I didn't treat it as a hot path.
>
> Not so. Every mount adds at least one shrinker, so they can actively be added
> during normal system work.
>
> E.g., on our production system (containers) several thousand shrinkers
> is not a rare situation.

Aha, yes, I missed this point.

>
> >> What do you think about batching here?
> >
> > Just off the top of my head, we could allocate, for example, 64
> > nr_deferred (64 * sizeof(atomic_long_t)) so that we just need to
> > expand info for every 64 shrinker registrations. Maybe define it
> > depends on the machine (64 bit - 64, 32 bit - 32).
> >
> > Why 64? Basically a magic number. And when I was investigating that
> > list_lru reparent race issue
> > (https://lore.kernel.org/linux-mm/20201202171749.264354-1-shy828301@gmail.com/)
> > I happened to notice that there are at most 64 shrinkers registered in
> > our production environment (a typical data center configuration).
> >
> > How do you think about it?
>
> I think 64 is OK for now. We may use some #define to set this value, so we will
> be able to change it easily in the future.

BITS_PER_LONG might be better. We could reuse all the existing logic
without adding too much new code.

>
> >>>
> >>> @@ -280,9 +294,8 @@ static int expand_shrinker_info(int new_id)
> >>>
> >>>       memcg = mem_cgroup_iter(NULL, NULL, NULL);
> >>>       do {
> >>> -             if (mem_cgroup_is_root(memcg))
> >>> -                     continue;
> >>> -             ret = expand_one_shrinker_info(memcg, size, old_size);
> >>> +             ret = expand_one_shrinker_info(memcg, m_size, d_size,
> >>> +                                            old_m_size, old_d_size);
> >>>               if (ret) {
> >>>                       mem_cgroup_iter_break(NULL, memcg);
> >>>                       goto out;
> >>>
> >>
> >>
>
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 4c9253896e25..c457fc7bc631 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -93,12 +93,13 @@  struct lruvec_stat {
 };
 
 /*
- * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
- * which have elements charged to this memcg.
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to this memcg.
  */
 struct shrinker_info {
 	struct rcu_head rcu;
-	unsigned long map[];
+	atomic_long_t *nr_deferred;
+	unsigned long *map;
 };
 
 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dc0d69e081b0..d9126f12890f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -196,10 +196,12 @@  static void free_shrinker_info_rcu(struct rcu_head *head)
 }
 
 static int expand_one_shrinker_info(struct mem_cgroup *memcg,
-				   int size, int old_size)
+				    int m_size, int d_size,
+				    int old_m_size, int old_d_size)
 {
 	struct shrinker_info *new, *old;
 	int nid;
+	int size = m_size + d_size;
 
 	for_each_node(nid) {
 		old = rcu_dereference_protected(
@@ -212,9 +214,15 @@  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
 		if (!new)
 			return -ENOMEM;
 
-		/* Set all old bits, clear all new bits */
-		memset(new->map, (int)0xff, old_size);
-		memset((void *)new->map + old_size, 0, size - old_size);
+		new->nr_deferred = (atomic_long_t *)(new + 1);
+		new->map = (void *)new->nr_deferred + d_size;
+
+		/* map: set all old bits, clear all new bits */
+		memset(new->map, (int)0xff, old_m_size);
+		memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
+		/* nr_deferred: copy old values, clear all new values */
+		memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
+		memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
 
 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
 		call_rcu(&old->rcu, free_shrinker_info_rcu);
@@ -229,9 +237,6 @@  void free_shrinker_info(struct mem_cgroup *memcg)
 	struct shrinker_info *info;
 	int nid;
 
-	if (mem_cgroup_is_root(memcg))
-		return;
-
 	for_each_node(nid) {
 		pn = mem_cgroup_nodeinfo(memcg, nid);
 		info = rcu_dereference_protected(pn->shrinker_info, true);
@@ -244,12 +249,13 @@  int alloc_shrinker_info(struct mem_cgroup *memcg)
 {
 	struct shrinker_info *info;
 	int nid, size, ret = 0;
-
-	if (mem_cgroup_is_root(memcg))
-		return 0;
+	int m_size, d_size = 0;
 
 	down_write(&shrinker_rwsem);
-	size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
+	m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
+	d_size = shrinker_nr_max * sizeof(atomic_long_t);
+	size = m_size + d_size;
+
 	for_each_node(nid) {
 		info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
 		if (!info) {
@@ -257,6 +263,8 @@  int alloc_shrinker_info(struct mem_cgroup *memcg)
 			ret = -ENOMEM;
 			break;
 		}
+		info->nr_deferred = (atomic_long_t *)(info + 1);
+		info->map = (void *)info->nr_deferred + d_size;
 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
 	}
 	up_write(&shrinker_rwsem);
@@ -268,10 +276,16 @@  static int expand_shrinker_info(int new_id)
 {
 	int size, old_size, ret = 0;
 	int new_nr_max = new_id + 1;
+	int m_size, d_size = 0;
+	int old_m_size, old_d_size = 0;
 	struct mem_cgroup *memcg;
 
-	size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
-	old_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
+	m_size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
+	d_size = new_nr_max * sizeof(atomic_long_t);
+	size = m_size + d_size;
+	old_m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
+	old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
+	old_size = old_m_size + old_d_size;
 	if (size <= old_size)
 		goto out;
 
@@ -280,9 +294,8 @@  static int expand_shrinker_info(int new_id)
 
 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
 	do {
-		if (mem_cgroup_is_root(memcg))
-			continue;
-		ret = expand_one_shrinker_info(memcg, size, old_size);
+		ret = expand_one_shrinker_info(memcg, m_size, d_size,
+					       old_m_size, old_d_size);
 		if (ret) {
 			mem_cgroup_iter_break(NULL, memcg);
 			goto out;