diff mbox

[v5,11/13] mm: Iterate only over charged shrinkers during memcg shrink_slab()

Message ID 152594603565.22949.12428911301395699065.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show

Commit Message

Kirill Tkhai May 10, 2018, 9:53 a.m. UTC
Using the preparations made in previous patches, in case of memcg
shrink, we may avoid shrinkers, which are not set in memcg's shrinkers
bitmap. To do that, we separate iterations over memcg-aware and
!memcg-aware shrinkers, and memcg-aware shrinkers are chosen
via for_each_set_bit() from the bitmap. In case of big nodes,
having many isolated environments, this gives significant
performance growth. See next patches for the details.

Note, that the patch does not respect to empty memcg shrinkers,
since we never clear the bitmap bits after we set it once.
Their shrinkers will be called again, with no shrinked objects
as result. This functionality is provided by next patches.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 include/linux/memcontrol.h |    1 +
 mm/vmscan.c                |   70 ++++++++++++++++++++++++++++++++++++++------
 2 files changed, 62 insertions(+), 9 deletions(-)

Comments

Vladimir Davydov May 15, 2018, 5:44 a.m. UTC | #1
On Thu, May 10, 2018 at 12:53:55PM +0300, Kirill Tkhai wrote:
> Using the preparations made in previous patches, in case of memcg
> shrink, we may avoid shrinkers, which are not set in memcg's shrinkers
> bitmap. To do that, we separate iterations over memcg-aware and
> !memcg-aware shrinkers, and memcg-aware shrinkers are chosen
> via for_each_set_bit() from the bitmap. In case of big nodes,
> having many isolated environments, this gives significant
> performance growth. See next patches for the details.
> 
> Note, that the patch does not respect to empty memcg shrinkers,
> since we never clear the bitmap bits after we set it once.
> Their shrinkers will be called again, with no shrinked objects
> as result. This functionality is provided by next patches.
> 
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> ---
>  include/linux/memcontrol.h |    1 +
>  mm/vmscan.c                |   70 ++++++++++++++++++++++++++++++++++++++------
>  2 files changed, 62 insertions(+), 9 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 82f892e77637..436691a66500 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -760,6 +760,7 @@ void mem_cgroup_split_huge_fixup(struct page *head);
>  #define MEM_CGROUP_ID_MAX	0
>  
>  struct mem_cgroup;
> +#define root_mem_cgroup NULL

Let's instead export mem_cgroup_is_root(). In case if MEMCG is disabled
it will always return false.

>  
>  static inline bool mem_cgroup_disabled(void)
>  {
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d8a2870710e0..a2e38e05adb5 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -376,6 +376,7 @@ int prealloc_shrinker(struct shrinker *shrinker)
>  			goto free_deferred;
>  	}
>  
> +	INIT_LIST_HEAD(&shrinker->list);

IMO this shouldn't be here, see my comment below.

>  	return 0;
>  
>  free_deferred:
> @@ -547,6 +548,63 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  	return freed;
>  }
>  
> +#ifdef CONFIG_MEMCG_SHRINKER
> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
> +			struct mem_cgroup *memcg, int priority)
> +{
> +	struct memcg_shrinker_map *map;
> +	unsigned long freed = 0;
> +	int ret, i;
> +
> +	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
> +		return 0;
> +
> +	if (!down_read_trylock(&shrinker_rwsem))
> +		return 0;
> +
> +	/*
> +	 * 1)Caller passes only alive memcg, so map can't be NULL.
> +	 * 2)shrinker_rwsem protects from maps expanding.

            ^^
Nit: space missing here :-)

> +	 */
> +	map = rcu_dereference_protected(MEMCG_SHRINKER_MAP(memcg, nid), true);
> +	BUG_ON(!map);
> +
> +	for_each_set_bit(i, map->map, memcg_shrinker_nr_max) {
> +		struct shrink_control sc = {
> +			.gfp_mask = gfp_mask,
> +			.nid = nid,
> +			.memcg = memcg,
> +		};
> +		struct shrinker *shrinker;
> +
> +		shrinker = idr_find(&shrinker_idr, i);
> +		if (!shrinker) {
> +			clear_bit(i, map->map);
> +			continue;
> +		}

The shrinker must be memcg aware so please add

  BUG_ON((shrinker->flags & SHRINKER_MEMCG_AWARE) == 0);

> +		if (list_empty(&shrinker->list))
> +			continue;

I don't like using shrinker->list as an indicator that the shrinker has
been initialized. IMO if you do need such a check, you should split
shrinker_idr registration in two steps - allocate a slot in 'prealloc'
and set the pointer in 'register'. However, can we really encounter an
unregistered shrinker here? AFAIU a bit can be set in the shrinker map
only after the corresponding shrinker has been initialized, no?

> +
> +		ret = do_shrink_slab(&sc, shrinker, priority);
> +		freed += ret;
> +
> +		if (rwsem_is_contended(&shrinker_rwsem)) {
> +			freed = freed ? : 1;
> +			break;
> +		}
> +	}
> +
> +	up_read(&shrinker_rwsem);
> +	return freed;
> +}
> +#else /* CONFIG_MEMCG_SHRINKER */
> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
> +			struct mem_cgroup *memcg, int priority)
> +{
> +	return 0;
> +}
> +#endif /* CONFIG_MEMCG_SHRINKER */
> +
>  /**
>   * shrink_slab - shrink slab caches
>   * @gfp_mask: allocation context
> @@ -576,8 +634,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>  	struct shrinker *shrinker;
>  	unsigned long freed = 0;
>  
> -	if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
> -		return 0;
> +	if (memcg && memcg != root_mem_cgroup)

if (!mem_cgroup_is_root(memcg))

> +		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
>  
>  	if (!down_read_trylock(&shrinker_rwsem))
>  		goto out;
> @@ -589,13 +647,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>  			.memcg = memcg,
>  		};
>  
> -		/*
> -		 * If kernel memory accounting is disabled, we ignore
> -		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
> -		 * passing NULL for memcg.
> -		 */
> -		if (memcg_kmem_enabled() &&
> -		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
> +		if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
>  			continue;

I want this check gone. It's easy to achieve, actually - just remove the
following lines from shrink_node()

		if (global_reclaim(sc))
			shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
				    sc->priority);

>  
>  		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
>
Kirill Tkhai May 15, 2018, 10:12 a.m. UTC | #2
On 15.05.2018 08:44, Vladimir Davydov wrote:
> On Thu, May 10, 2018 at 12:53:55PM +0300, Kirill Tkhai wrote:
>> Using the preparations made in previous patches, in case of memcg
>> shrink, we may avoid shrinkers, which are not set in memcg's shrinkers
>> bitmap. To do that, we separate iterations over memcg-aware and
>> !memcg-aware shrinkers, and memcg-aware shrinkers are chosen
>> via for_each_set_bit() from the bitmap. In case of big nodes,
>> having many isolated environments, this gives significant
>> performance growth. See next patches for the details.
>>
>> Note, that the patch does not respect to empty memcg shrinkers,
>> since we never clear the bitmap bits after we set it once.
>> Their shrinkers will be called again, with no shrinked objects
>> as result. This functionality is provided by next patches.
>>
>> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
>> ---
>>  include/linux/memcontrol.h |    1 +
>>  mm/vmscan.c                |   70 ++++++++++++++++++++++++++++++++++++++------
>>  2 files changed, 62 insertions(+), 9 deletions(-)
>>
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index 82f892e77637..436691a66500 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -760,6 +760,7 @@ void mem_cgroup_split_huge_fixup(struct page *head);
>>  #define MEM_CGROUP_ID_MAX	0
>>  
>>  struct mem_cgroup;
>> +#define root_mem_cgroup NULL
> 
> Let's instead export mem_cgroup_is_root(). In case if MEMCG is disabled
> it will always return false.

export == move to header file

>>  
>>  static inline bool mem_cgroup_disabled(void)
>>  {
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index d8a2870710e0..a2e38e05adb5 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -376,6 +376,7 @@ int prealloc_shrinker(struct shrinker *shrinker)
>>  			goto free_deferred;
>>  	}
>>  
>> +	INIT_LIST_HEAD(&shrinker->list);
> 
> IMO this shouldn't be here, see my comment below.
> 
>>  	return 0;
>>  
>>  free_deferred:
>> @@ -547,6 +548,63 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>>  	return freed;
>>  }
>>  
>> +#ifdef CONFIG_MEMCG_SHRINKER
>> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
>> +			struct mem_cgroup *memcg, int priority)
>> +{
>> +	struct memcg_shrinker_map *map;
>> +	unsigned long freed = 0;
>> +	int ret, i;
>> +
>> +	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
>> +		return 0;
>> +
>> +	if (!down_read_trylock(&shrinker_rwsem))
>> +		return 0;
>> +
>> +	/*
>> +	 * 1)Caller passes only alive memcg, so map can't be NULL.
>> +	 * 2)shrinker_rwsem protects from maps expanding.
> 
>             ^^
> Nit: space missing here :-)

I don't understand what you mean here. Please, clarify...

>> +	 */
>> +	map = rcu_dereference_protected(MEMCG_SHRINKER_MAP(memcg, nid), true);
>> +	BUG_ON(!map);
>> +
>> +	for_each_set_bit(i, map->map, memcg_shrinker_nr_max) {
>> +		struct shrink_control sc = {
>> +			.gfp_mask = gfp_mask,
>> +			.nid = nid,
>> +			.memcg = memcg,
>> +		};
>> +		struct shrinker *shrinker;
>> +
>> +		shrinker = idr_find(&shrinker_idr, i);
>> +		if (!shrinker) {
>> +			clear_bit(i, map->map);
>> +			continue;
>> +		}
> 
> The shrinker must be memcg aware so please add
> 
>   BUG_ON((shrinker->flags & SHRINKER_MEMCG_AWARE) == 0);
> 
>> +		if (list_empty(&shrinker->list))
>> +			continue;
> 
> I don't like using shrinker->list as an indicator that the shrinker has
> been initialized. IMO if you do need such a check, you should split
> shrinker_idr registration in two steps - allocate a slot in 'prealloc'
> and set the pointer in 'register'. However, can we really encounter an
> unregistered shrinker here? AFAIU a bit can be set in the shrinker map
> only after the corresponding shrinker has been initialized, no?

1)No, it's not so. Here is a race:
cpu#0                        cpu#1                                   cpu#2
prealloc_shrinker()
                             prealloc_shrinker()
                               memcg_expand_shrinker_maps()
                                 memcg_expand_one_shrinker_map()
                                   memset(&new->map, 0xff);          
                                                                     do_shrink_slab() (on uninitialized LRUs)
init LRUs
register_shrinker_prepared()

So, the check is needed.

2)Assigning NULL pointer can't be used here, since NULL pointer is already used
to clear unregistered shrinkers from the map. See the check right after idr_find().

list_empty() is used since it's the already existing indicator, which does not
require additional member in struct shrinker.

>> +
>> +		ret = do_shrink_slab(&sc, shrinker, priority);
>> +		freed += ret;
>> +
>> +		if (rwsem_is_contended(&shrinker_rwsem)) {
>> +			freed = freed ? : 1;
>> +			break;
>> +		}
>> +	}
>> +
>> +	up_read(&shrinker_rwsem);
>> +	return freed;
>> +}
>> +#else /* CONFIG_MEMCG_SHRINKER */
>> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
>> +			struct mem_cgroup *memcg, int priority)
>> +{
>> +	return 0;
>> +}
>> +#endif /* CONFIG_MEMCG_SHRINKER */
>> +
>>  /**
>>   * shrink_slab - shrink slab caches
>>   * @gfp_mask: allocation context
>> @@ -576,8 +634,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>>  	struct shrinker *shrinker;
>>  	unsigned long freed = 0;
>>  
>> -	if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
>> -		return 0;
>> +	if (memcg && memcg != root_mem_cgroup)
> 
> if (!mem_cgroup_is_root(memcg))
> 
>> +		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
>>  
>>  	if (!down_read_trylock(&shrinker_rwsem))
>>  		goto out;
>> @@ -589,13 +647,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>>  			.memcg = memcg,
>>  		};
>>  
>> -		/*
>> -		 * If kernel memory accounting is disabled, we ignore
>> -		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
>> -		 * passing NULL for memcg.
>> -		 */
>> -		if (memcg_kmem_enabled() &&
>> -		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
>> +		if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
>>  			continue;
> 
> I want this check gone. It's easy to achieve, actually - just remove the
> following lines from shrink_node()
> 
> 		if (global_reclaim(sc))
> 			shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
> 				    sc->priority);
> 
>>  
>>  		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
>>
Kirill Tkhai May 15, 2018, 2:49 p.m. UTC | #3
On 15.05.2018 08:44, Vladimir Davydov wrote:
> On Thu, May 10, 2018 at 12:53:55PM +0300, Kirill Tkhai wrote:
>> Using the preparations made in previous patches, in case of memcg
>> shrink, we may avoid shrinkers, which are not set in memcg's shrinkers
>> bitmap. To do that, we separate iterations over memcg-aware and
>> !memcg-aware shrinkers, and memcg-aware shrinkers are chosen
>> via for_each_set_bit() from the bitmap. In case of big nodes,
>> having many isolated environments, this gives significant
>> performance growth. See next patches for the details.
>>
>> Note, that the patch does not respect to empty memcg shrinkers,
>> since we never clear the bitmap bits after we set it once.
>> Their shrinkers will be called again, with no shrinked objects
>> as result. This functionality is provided by next patches.
>>
>> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
>> ---
>>  include/linux/memcontrol.h |    1 +
>>  mm/vmscan.c                |   70 ++++++++++++++++++++++++++++++++++++++------
>>  2 files changed, 62 insertions(+), 9 deletions(-)
>>
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index 82f892e77637..436691a66500 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -760,6 +760,7 @@ void mem_cgroup_split_huge_fixup(struct page *head);
>>  #define MEM_CGROUP_ID_MAX	0
>>  
>>  struct mem_cgroup;
>> +#define root_mem_cgroup NULL
> 
> Let's instead export mem_cgroup_is_root(). In case if MEMCG is disabled
> it will always return false.
> 
>>  
>>  static inline bool mem_cgroup_disabled(void)
>>  {
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index d8a2870710e0..a2e38e05adb5 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -376,6 +376,7 @@ int prealloc_shrinker(struct shrinker *shrinker)
>>  			goto free_deferred;
>>  	}
>>  
>> +	INIT_LIST_HEAD(&shrinker->list);
> 
> IMO this shouldn't be here, see my comment below.
> 
>>  	return 0;
>>  
>>  free_deferred:
>> @@ -547,6 +548,63 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>>  	return freed;
>>  }
>>  
>> +#ifdef CONFIG_MEMCG_SHRINKER
>> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
>> +			struct mem_cgroup *memcg, int priority)
>> +{
>> +	struct memcg_shrinker_map *map;
>> +	unsigned long freed = 0;
>> +	int ret, i;
>> +
>> +	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
>> +		return 0;
>> +
>> +	if (!down_read_trylock(&shrinker_rwsem))
>> +		return 0;
>> +
>> +	/*
>> +	 * 1)Caller passes only alive memcg, so map can't be NULL.
>> +	 * 2)shrinker_rwsem protects from maps expanding.
> 
>             ^^
> Nit: space missing here :-)
> 
>> +	 */
>> +	map = rcu_dereference_protected(MEMCG_SHRINKER_MAP(memcg, nid), true);
>> +	BUG_ON(!map);
>> +
>> +	for_each_set_bit(i, map->map, memcg_shrinker_nr_max) {
>> +		struct shrink_control sc = {
>> +			.gfp_mask = gfp_mask,
>> +			.nid = nid,
>> +			.memcg = memcg,
>> +		};
>> +		struct shrinker *shrinker;
>> +
>> +		shrinker = idr_find(&shrinker_idr, i);
>> +		if (!shrinker) {
>> +			clear_bit(i, map->map);
>> +			continue;
>> +		}
> 
> The shrinker must be memcg aware so please add
> 
>   BUG_ON((shrinker->flags & SHRINKER_MEMCG_AWARE) == 0);
> 
>> +		if (list_empty(&shrinker->list))
>> +			continue;
> 
> I don't like using shrinker->list as an indicator that the shrinker has
> been initialized. IMO if you do need such a check, you should split
> shrinker_idr registration in two steps - allocate a slot in 'prealloc'
> and set the pointer in 'register'. However, can we really encounter an
> unregistered shrinker here? AFAIU a bit can be set in the shrinker map
> only after the corresponding shrinker has been initialized, no?
> 
>> +
>> +		ret = do_shrink_slab(&sc, shrinker, priority);
>> +		freed += ret;
>> +
>> +		if (rwsem_is_contended(&shrinker_rwsem)) {
>> +			freed = freed ? : 1;
>> +			break;
>> +		}
>> +	}
>> +
>> +	up_read(&shrinker_rwsem);
>> +	return freed;
>> +}
>> +#else /* CONFIG_MEMCG_SHRINKER */
>> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
>> +			struct mem_cgroup *memcg, int priority)
>> +{
>> +	return 0;
>> +}
>> +#endif /* CONFIG_MEMCG_SHRINKER */
>> +
>>  /**
>>   * shrink_slab - shrink slab caches
>>   * @gfp_mask: allocation context
>> @@ -576,8 +634,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>>  	struct shrinker *shrinker;
>>  	unsigned long freed = 0;
>>  
>> -	if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
>> -		return 0;
>> +	if (memcg && memcg != root_mem_cgroup)
> 
> if (!mem_cgroup_is_root(memcg))
> 
>> +		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
>>  
>>  	if (!down_read_trylock(&shrinker_rwsem))
>>  		goto out;
>> @@ -589,13 +647,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>>  			.memcg = memcg,
>>  		};
>>  
>> -		/*
>> -		 * If kernel memory accounting is disabled, we ignore
>> -		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
>> -		 * passing NULL for memcg.
>> -		 */
>> -		if (memcg_kmem_enabled() &&
>> -		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
>> +		if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
>>  			continue;
> 
> I want this check gone. It's easy to achieve, actually - just remove the
> following lines from shrink_node()
> 
> 		if (global_reclaim(sc))
> 			shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
> 				    sc->priority);

This check is not related to the patchset. Let's don't mix everything
in the single series of patches, because after your last remarks it will
grow at least up to 15 patches. This patchset can't be responsible for
everything.

>>  
>>  		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
>>
Vladimir Davydov May 17, 2018, 4:16 a.m. UTC | #4
On Tue, May 15, 2018 at 05:49:59PM +0300, Kirill Tkhai wrote:
> >> @@ -589,13 +647,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
> >>  			.memcg = memcg,
> >>  		};
> >>  
> >> -		/*
> >> -		 * If kernel memory accounting is disabled, we ignore
> >> -		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
> >> -		 * passing NULL for memcg.
> >> -		 */
> >> -		if (memcg_kmem_enabled() &&
> >> -		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
> >> +		if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
> >>  			continue;
> > 
> > I want this check gone. It's easy to achieve, actually - just remove the
> > following lines from shrink_node()
> > 
> > 		if (global_reclaim(sc))
> > 			shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
> > 				    sc->priority);
> 
> This check is not related to the patchset.

Yes, it is. This patch modifies shrink_slab which is used only by
shrink_node. Simplifying shrink_node along the way looks right to me.

> Let's don't mix everything in the single series of patches, because
> after your last remarks it will grow at least up to 15 patches.

Most of which are trivial so I don't see any problem here.

> This patchset can't be responsible for everything.

I don't understand why you balk at simplifying the code a bit while you
are patching related functions anyway.

> 
> >>  
> >>  		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> >>
Vladimir Davydov May 17, 2018, 4:33 a.m. UTC | #5
On Tue, May 15, 2018 at 01:12:20PM +0300, Kirill Tkhai wrote:
> >> +#define root_mem_cgroup NULL
> > 
> > Let's instead export mem_cgroup_is_root(). In case if MEMCG is disabled
> > it will always return false.
> 
> export == move to header file

That and adding a stub function in case !MEMCG.

> >> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
> >> +			struct mem_cgroup *memcg, int priority)
> >> +{
> >> +	struct memcg_shrinker_map *map;
> >> +	unsigned long freed = 0;
> >> +	int ret, i;
> >> +
> >> +	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
> >> +		return 0;
> >> +
> >> +	if (!down_read_trylock(&shrinker_rwsem))
> >> +		return 0;
> >> +
> >> +	/*
> >> +	 * 1)Caller passes only alive memcg, so map can't be NULL.
> >> +	 * 2)shrinker_rwsem protects from maps expanding.
> > 
> >             ^^
> > Nit: space missing here :-)
> 
> I don't understand what you mean here. Please, clarify...

This is just a trivial remark regarding comment formatting. They usually
put a space between the number and the first word in the sentence, i.e.
between '1)' and 'Caller' in your case.

> 
> >> +	 */
> >> +	map = rcu_dereference_protected(MEMCG_SHRINKER_MAP(memcg, nid), true);
> >> +	BUG_ON(!map);
> >> +
> >> +	for_each_set_bit(i, map->map, memcg_shrinker_nr_max) {
> >> +		struct shrink_control sc = {
> >> +			.gfp_mask = gfp_mask,
> >> +			.nid = nid,
> >> +			.memcg = memcg,
> >> +		};
> >> +		struct shrinker *shrinker;
> >> +
> >> +		shrinker = idr_find(&shrinker_idr, i);
> >> +		if (!shrinker) {
> >> +			clear_bit(i, map->map);
> >> +			continue;
> >> +		}
> >> +		if (list_empty(&shrinker->list))
> >> +			continue;
> > 
> > I don't like using shrinker->list as an indicator that the shrinker has
> > been initialized. IMO if you do need such a check, you should split
> > shrinker_idr registration in two steps - allocate a slot in 'prealloc'
> > and set the pointer in 'register'. However, can we really encounter an
> > unregistered shrinker here? AFAIU a bit can be set in the shrinker map
> > only after the corresponding shrinker has been initialized, no?
> 
> 1)No, it's not so. Here is a race:
> cpu#0                        cpu#1                                   cpu#2
> prealloc_shrinker()
>                              prealloc_shrinker()
>                                memcg_expand_shrinker_maps()
>                                  memcg_expand_one_shrinker_map()
>                                    memset(&new->map, 0xff);          
>                                                                      do_shrink_slab() (on uninitialized LRUs)
> init LRUs
> register_shrinker_prepared()
> 
> So, the check is needed.

OK, I see.

> 
> 2)Assigning NULL pointer can't be used here, since NULL pointer is already used
> to clear unregistered shrinkers from the map. See the check right after idr_find().

But it won't break anything if we clear bit for prealloc-ed, but not yet
registered shrinkers, will it?

> 
> list_empty() is used since it's the already existing indicator, which does not
> require additional member in struct shrinker.

It just looks rather counter-intuitive to me to use shrinker->list to
differentiate between registered and unregistered shrinkers. May be, I'm
wrong. If you are sure that this is OK, I'm fine with it, but then
please add a comment here explaining what this check is needed for.

Thanks.
Kirill Tkhai May 17, 2018, 11:39 a.m. UTC | #6
On 17.05.2018 07:33, Vladimir Davydov wrote:
> On Tue, May 15, 2018 at 01:12:20PM +0300, Kirill Tkhai wrote:
>>>> +#define root_mem_cgroup NULL
>>>
>>> Let's instead export mem_cgroup_is_root(). In case if MEMCG is disabled
>>> it will always return false.
>>
>> export == move to header file
> 
> That and adding a stub function in case !MEMCG.
> 
>>>> +static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
>>>> +			struct mem_cgroup *memcg, int priority)
>>>> +{
>>>> +	struct memcg_shrinker_map *map;
>>>> +	unsigned long freed = 0;
>>>> +	int ret, i;
>>>> +
>>>> +	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
>>>> +		return 0;
>>>> +
>>>> +	if (!down_read_trylock(&shrinker_rwsem))
>>>> +		return 0;
>>>> +
>>>> +	/*
>>>> +	 * 1)Caller passes only alive memcg, so map can't be NULL.
>>>> +	 * 2)shrinker_rwsem protects from maps expanding.
>>>
>>>             ^^
>>> Nit: space missing here :-)
>>
>> I don't understand what you mean here. Please, clarify...
> 
> This is just a trivial remark regarding comment formatting. They usually
> put a space between the number and the first word in the sentence, i.e.
> between '1)' and 'Caller' in your case.
> 
>>
>>>> +	 */
>>>> +	map = rcu_dereference_protected(MEMCG_SHRINKER_MAP(memcg, nid), true);
>>>> +	BUG_ON(!map);
>>>> +
>>>> +	for_each_set_bit(i, map->map, memcg_shrinker_nr_max) {
>>>> +		struct shrink_control sc = {
>>>> +			.gfp_mask = gfp_mask,
>>>> +			.nid = nid,
>>>> +			.memcg = memcg,
>>>> +		};
>>>> +		struct shrinker *shrinker;
>>>> +
>>>> +		shrinker = idr_find(&shrinker_idr, i);
>>>> +		if (!shrinker) {
>>>> +			clear_bit(i, map->map);
>>>> +			continue;
>>>> +		}
>>>> +		if (list_empty(&shrinker->list))
>>>> +			continue;
>>>
>>> I don't like using shrinker->list as an indicator that the shrinker has
>>> been initialized. IMO if you do need such a check, you should split
>>> shrinker_idr registration in two steps - allocate a slot in 'prealloc'
>>> and set the pointer in 'register'. However, can we really encounter an
>>> unregistered shrinker here? AFAIU a bit can be set in the shrinker map
>>> only after the corresponding shrinker has been initialized, no?
>>
>> 1)No, it's not so. Here is a race:
>> cpu#0                        cpu#1                                   cpu#2
>> prealloc_shrinker()
>>                              prealloc_shrinker()
>>                                memcg_expand_shrinker_maps()
>>                                  memcg_expand_one_shrinker_map()
>>                                    memset(&new->map, 0xff);          
>>                                                                      do_shrink_slab() (on uninitialized LRUs)
>> init LRUs
>> register_shrinker_prepared()
>>
>> So, the check is needed.
> 
> OK, I see.
> 
>>
>> 2)Assigning NULL pointer can't be used here, since NULL pointer is already used
>> to clear unregistered shrinkers from the map. See the check right after idr_find().
> 
> But it won't break anything if we clear bit for prealloc-ed, but not yet
> registered shrinkers, will it?

This imposes restrictions on the code, which register a shrinker, because
there is no a rule or a guarantee in kernel, that list LRU can't be populated
before shrinker is completely registered. The separate subsystems of kernel
have to be modular, while clearing the bit will break the modularity and
imposes the restrictions on the users of this interface.

Also, if go another way and we delegete this to users, and they follow this rule,
this may require non-trivial locking scheme for them. So, let's keep the modularity.

Also, we can't move memset(0xff) to register_shrinker_preallocated(), since
then we would have to keep in memory the state of the fact the maps were expanded
in prealloc_shrinker().

>>
>> list_empty() is used since it's the already existing indicator, which does not
>> require additional member in struct shrinker.
> 
> It just looks rather counter-intuitive to me to use shrinker->list to
> differentiate between registered and unregistered shrinkers. May be, I'm
> wrong. If you are sure that this is OK, I'm fine with it, but then
> please add a comment here explaining what this check is needed for.

We may introduce new flag in shrinker::flags to indicate this fact instead,
but for me it seems the same.

Thanks,
Kirill
Kirill Tkhai May 17, 2018, 11:49 a.m. UTC | #7
On 17.05.2018 07:16, Vladimir Davydov wrote:
> On Tue, May 15, 2018 at 05:49:59PM +0300, Kirill Tkhai wrote:
>>>> @@ -589,13 +647,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>>>>  			.memcg = memcg,
>>>>  		};
>>>>  
>>>> -		/*
>>>> -		 * If kernel memory accounting is disabled, we ignore
>>>> -		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
>>>> -		 * passing NULL for memcg.
>>>> -		 */
>>>> -		if (memcg_kmem_enabled() &&
>>>> -		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
>>>> +		if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
>>>>  			continue;
>>>
>>> I want this check gone. It's easy to achieve, actually - just remove the
>>> following lines from shrink_node()
>>>
>>> 		if (global_reclaim(sc))
>>> 			shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
>>> 				    sc->priority);
>>
>> This check is not related to the patchset.
> 
> Yes, it is. This patch modifies shrink_slab which is used only by
> shrink_node. Simplifying shrink_node along the way looks right to me.

shrink_slab() is used not only in this place. I does not seem a trivial
change for me.

>> Let's don't mix everything in the single series of patches, because
>> after your last remarks it will grow at least up to 15 patches.
> 
> Most of which are trivial so I don't see any problem here.
> 
>> This patchset can't be responsible for everything.
> 
> I don't understand why you balk at simplifying the code a bit while you
> are patching related functions anyway.

Because this function is used in several places, and we have some particulars
on root_mem_cgroup initialization, and this function called from these places
with different states of root_mem_cgroup. It does not seem trivial fix for me.

Let's do it on top of the series later, what is the problem? It does not seem
critical problem.

Kirill
diff mbox

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 82f892e77637..436691a66500 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -760,6 +760,7 @@  void mem_cgroup_split_huge_fixup(struct page *head);
 #define MEM_CGROUP_ID_MAX	0
 
 struct mem_cgroup;
+#define root_mem_cgroup NULL
 
 static inline bool mem_cgroup_disabled(void)
 {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d8a2870710e0..a2e38e05adb5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -376,6 +376,7 @@  int prealloc_shrinker(struct shrinker *shrinker)
 			goto free_deferred;
 	}
 
+	INIT_LIST_HEAD(&shrinker->list);
 	return 0;
 
 free_deferred:
@@ -547,6 +548,63 @@  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 	return freed;
 }
 
+#ifdef CONFIG_MEMCG_SHRINKER
+static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
+			struct mem_cgroup *memcg, int priority)
+{
+	struct memcg_shrinker_map *map;
+	unsigned long freed = 0;
+	int ret, i;
+
+	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
+		return 0;
+
+	if (!down_read_trylock(&shrinker_rwsem))
+		return 0;
+
+	/*
+	 * 1)Caller passes only alive memcg, so map can't be NULL.
+	 * 2)shrinker_rwsem protects from maps expanding.
+	 */
+	map = rcu_dereference_protected(MEMCG_SHRINKER_MAP(memcg, nid), true);
+	BUG_ON(!map);
+
+	for_each_set_bit(i, map->map, memcg_shrinker_nr_max) {
+		struct shrink_control sc = {
+			.gfp_mask = gfp_mask,
+			.nid = nid,
+			.memcg = memcg,
+		};
+		struct shrinker *shrinker;
+
+		shrinker = idr_find(&shrinker_idr, i);
+		if (!shrinker) {
+			clear_bit(i, map->map);
+			continue;
+		}
+		if (list_empty(&shrinker->list))
+			continue;
+
+		ret = do_shrink_slab(&sc, shrinker, priority);
+		freed += ret;
+
+		if (rwsem_is_contended(&shrinker_rwsem)) {
+			freed = freed ? : 1;
+			break;
+		}
+	}
+
+	up_read(&shrinker_rwsem);
+	return freed;
+}
+#else /* CONFIG_MEMCG_SHRINKER */
+static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
+			struct mem_cgroup *memcg, int priority)
+{
+	return 0;
+}
+#endif /* CONFIG_MEMCG_SHRINKER */
+
 /**
  * shrink_slab - shrink slab caches
  * @gfp_mask: allocation context
@@ -576,8 +634,8 @@  static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 	struct shrinker *shrinker;
 	unsigned long freed = 0;
 
-	if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
-		return 0;
+	if (memcg && memcg != root_mem_cgroup)
+		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
 
 	if (!down_read_trylock(&shrinker_rwsem))
 		goto out;
@@ -589,13 +647,7 @@  static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 			.memcg = memcg,
 		};
 
-		/*
-		 * If kernel memory accounting is disabled, we ignore
-		 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
-		 * passing NULL for memcg.
-		 */
-		if (memcg_kmem_enabled() &&
-		    !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
+		if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
 			continue;
 
 		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))