diff mbox

[v6,12/17] mm: Set bit in memcg shrinker bitmap on first list_lru item apearance

Message ID 152663302275.5308.7476660277265020067.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show

Commit Message

Kirill Tkhai May 18, 2018, 8:43 a.m. UTC
Introduce set_shrinker_bit() function to set shrinker-related
bit in memcg shrinker bitmap, and set the bit after the first
item is added and in case of reparenting destroyed memcg's items.

This will allow next patch to make shrinkers be called only,
in case of they have charged objects at the moment, and
to improve shrink_slab() performance.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 include/linux/memcontrol.h |   14 ++++++++++++++
 mm/list_lru.c              |   22 ++++++++++++++++++++--
 2 files changed, 34 insertions(+), 2 deletions(-)

Comments

Vladimir Davydov May 20, 2018, 7:55 a.m. UTC | #1
On Fri, May 18, 2018 at 11:43:42AM +0300, Kirill Tkhai wrote:
> Introduce set_shrinker_bit() function to set shrinker-related
> bit in memcg shrinker bitmap, and set the bit after the first
> item is added and in case of reparenting destroyed memcg's items.
> 
> This will allow next patch to make shrinkers be called only,
> in case of they have charged objects at the moment, and
> to improve shrink_slab() performance.
> 
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> ---
>  include/linux/memcontrol.h |   14 ++++++++++++++
>  mm/list_lru.c              |   22 ++++++++++++++++++++--
>  2 files changed, 34 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index e51c6e953d7a..7ae1b94becf3 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1275,6 +1275,18 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
>  
>  extern int memcg_expand_shrinker_maps(int new_id);
>  
> +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
> +					  int nid, int shrinker_id)
> +{

> +	if (shrinker_id >= 0 && memcg && memcg != root_mem_cgroup) {

Nit: I'd remove these checks from this function and require the caller
to check that shrinker_id >= 0 and memcg != NULL or root_mem_cgroup.
See below how the call sites would look then.

> +		struct memcg_shrinker_map *map;
> +
> +		rcu_read_lock();
> +		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
> +		set_bit(shrinker_id, map->map);
> +		rcu_read_unlock();
> +	}
> +}
>  #else
>  #define for_each_memcg_cache_index(_idx)	\
>  	for (; NULL; )
> @@ -1297,6 +1309,8 @@ static inline void memcg_put_cache_ids(void)
>  {
>  }
>  
> +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
> +					  int nid, int shrinker_id) { }
>  #endif /* CONFIG_MEMCG_KMEM */
>  
>  #endif /* _LINUX_MEMCONTROL_H */
> diff --git a/mm/list_lru.c b/mm/list_lru.c
> index cab8fad7f7e2..7df71ab0de1c 100644
> --- a/mm/list_lru.c
> +++ b/mm/list_lru.c
> @@ -31,6 +31,11 @@ static void list_lru_unregister(struct list_lru *lru)
>  	mutex_unlock(&list_lrus_mutex);
>  }
>  
> +static int lru_shrinker_id(struct list_lru *lru)
> +{
> +	return lru->shrinker_id;
> +}
> +
>  static inline bool list_lru_memcg_aware(struct list_lru *lru)
>  {
>  	/*
> @@ -94,6 +99,11 @@ static void list_lru_unregister(struct list_lru *lru)
>  {
>  }
>  
> +static int lru_shrinker_id(struct list_lru *lru)
> +{
> +	return -1;
> +}
> +
>  static inline bool list_lru_memcg_aware(struct list_lru *lru)
>  {
>  	return false;
> @@ -119,13 +129,17 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
>  {
>  	int nid = page_to_nid(virt_to_page(item));
>  	struct list_lru_node *nlru = &lru->node[nid];
> +	struct mem_cgroup *memcg;
>  	struct list_lru_one *l;
>  
>  	spin_lock(&nlru->lock);
>  	if (list_empty(item)) {
> -		l = list_lru_from_kmem(nlru, item, NULL);
> +		l = list_lru_from_kmem(nlru, item, &memcg);
>  		list_add_tail(item, &l->list);
> -		l->nr_items++;
> +		/* Set shrinker bit if the first element was added */
> +		if (!l->nr_items++)
> +			memcg_set_shrinker_bit(memcg, nid,
> +					       lru_shrinker_id(lru));

This would turn into

	if (!l->nr_items++ && memcg)
		memcg_set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));

Note, you don't need to check that lru_shrinker_id(lru) is >= 0 here as
the fact that memcg != NULL guarantees that. Also, memcg can't be
root_mem_cgroup here as kmem objects allocated for the root cgroup go
unaccounted.

>  		nlru->nr_items++;
>  		spin_unlock(&nlru->lock);
>  		return true;
> @@ -520,6 +534,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
>  	struct list_lru_node *nlru = &lru->node[nid];
>  	int dst_idx = dst_memcg->kmemcg_id;
>  	struct list_lru_one *src, *dst;
> +	bool set;
>  
>  	/*
>  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
> @@ -531,7 +546,10 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
>  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
>  
>  	list_splice_init(&src->list, &dst->list);
> +	set = (!dst->nr_items && src->nr_items);
>  	dst->nr_items += src->nr_items;
> +	if (set)
> +		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));

This would turn into

	if (set && dst_idx >= 0)
		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));

Again, the shrinker is guaranteed to be memcg aware in this function and
dst_memcg != NULL.

IMHO such a change would make the code a bit more straightforward.

>  	src->nr_items = 0;
>  
>  	spin_unlock_irq(&nlru->lock);
>
Kirill Tkhai May 21, 2018, 9:31 a.m. UTC | #2
On 20.05.2018 10:55, Vladimir Davydov wrote:
> On Fri, May 18, 2018 at 11:43:42AM +0300, Kirill Tkhai wrote:
>> Introduce set_shrinker_bit() function to set shrinker-related
>> bit in memcg shrinker bitmap, and set the bit after the first
>> item is added and in case of reparenting destroyed memcg's items.
>>
>> This will allow next patch to make shrinkers be called only,
>> in case of they have charged objects at the moment, and
>> to improve shrink_slab() performance.
>>
>> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
>> ---
>>  include/linux/memcontrol.h |   14 ++++++++++++++
>>  mm/list_lru.c              |   22 ++++++++++++++++++++--
>>  2 files changed, 34 insertions(+), 2 deletions(-)
>>
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index e51c6e953d7a..7ae1b94becf3 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -1275,6 +1275,18 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
>>  
>>  extern int memcg_expand_shrinker_maps(int new_id);
>>  
>> +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
>> +					  int nid, int shrinker_id)
>> +{
> 
>> +	if (shrinker_id >= 0 && memcg && memcg != root_mem_cgroup) {
> 
> Nit: I'd remove these checks from this function and require the caller
> to check that shrinker_id >= 0 and memcg != NULL or root_mem_cgroup.
> See below how the call sites would look then.
> 
>> +		struct memcg_shrinker_map *map;
>> +
>> +		rcu_read_lock();
>> +		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
>> +		set_bit(shrinker_id, map->map);
>> +		rcu_read_unlock();
>> +	}
>> +}
>>  #else
>>  #define for_each_memcg_cache_index(_idx)	\
>>  	for (; NULL; )
>> @@ -1297,6 +1309,8 @@ static inline void memcg_put_cache_ids(void)
>>  {
>>  }
>>  
>> +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
>> +					  int nid, int shrinker_id) { }
>>  #endif /* CONFIG_MEMCG_KMEM */
>>  
>>  #endif /* _LINUX_MEMCONTROL_H */
>> diff --git a/mm/list_lru.c b/mm/list_lru.c
>> index cab8fad7f7e2..7df71ab0de1c 100644
>> --- a/mm/list_lru.c
>> +++ b/mm/list_lru.c
>> @@ -31,6 +31,11 @@ static void list_lru_unregister(struct list_lru *lru)
>>  	mutex_unlock(&list_lrus_mutex);
>>  }
>>  
>> +static int lru_shrinker_id(struct list_lru *lru)
>> +{
>> +	return lru->shrinker_id;
>> +}
>> +
>>  static inline bool list_lru_memcg_aware(struct list_lru *lru)
>>  {
>>  	/*
>> @@ -94,6 +99,11 @@ static void list_lru_unregister(struct list_lru *lru)
>>  {
>>  }
>>  
>> +static int lru_shrinker_id(struct list_lru *lru)
>> +{
>> +	return -1;
>> +}
>> +
>>  static inline bool list_lru_memcg_aware(struct list_lru *lru)
>>  {
>>  	return false;
>> @@ -119,13 +129,17 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
>>  {
>>  	int nid = page_to_nid(virt_to_page(item));
>>  	struct list_lru_node *nlru = &lru->node[nid];
>> +	struct mem_cgroup *memcg;
>>  	struct list_lru_one *l;
>>  
>>  	spin_lock(&nlru->lock);
>>  	if (list_empty(item)) {
>> -		l = list_lru_from_kmem(nlru, item, NULL);
>> +		l = list_lru_from_kmem(nlru, item, &memcg);
>>  		list_add_tail(item, &l->list);
>> -		l->nr_items++;
>> +		/* Set shrinker bit if the first element was added */
>> +		if (!l->nr_items++)
>> +			memcg_set_shrinker_bit(memcg, nid,
>> +					       lru_shrinker_id(lru));
> 
> This would turn into
> 
> 	if (!l->nr_items++ && memcg)
> 		memcg_set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
> 
> Note, you don't need to check that lru_shrinker_id(lru) is >= 0 here as
> the fact that memcg != NULL guarantees that. Also, memcg can't be
> root_mem_cgroup here as kmem objects allocated for the root cgroup go
> unaccounted.
> 
>>  		nlru->nr_items++;
>>  		spin_unlock(&nlru->lock);
>>  		return true;
>> @@ -520,6 +534,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
>>  	struct list_lru_node *nlru = &lru->node[nid];
>>  	int dst_idx = dst_memcg->kmemcg_id;
>>  	struct list_lru_one *src, *dst;
>> +	bool set;
>>  
>>  	/*
>>  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
>> @@ -531,7 +546,10 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
>>  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
>>  
>>  	list_splice_init(&src->list, &dst->list);
>> +	set = (!dst->nr_items && src->nr_items);
>>  	dst->nr_items += src->nr_items;
>> +	if (set)
>> +		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
> 
> This would turn into
> 
> 	if (set && dst_idx >= 0)
> 		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
> 
> Again, the shrinker is guaranteed to be memcg aware in this function and
> dst_memcg != NULL.
> 
> IMHO such a change would make the code a bit more straightforward.

IMHO, this makes the code less readable. Using single generic function with
generic check is easier, then using two different checks for different places.
Next a person, who will modify the logic, does not have to think about particulars
of strange checks in list_lru_add() and memcg_drain_list_lru_node(), if he/she
does not involved in the change of maps logic. Memory cgroup is already fell
into many corner cases, let's do not introduce them in new places.

>>  	src->nr_items = 0;
>>  
>>  	spin_unlock_irq(&nlru->lock);

Kirill
Vladimir Davydov May 21, 2018, 6:16 p.m. UTC | #3
On Mon, May 21, 2018 at 12:31:34PM +0300, Kirill Tkhai wrote:
> On 20.05.2018 10:55, Vladimir Davydov wrote:
> > On Fri, May 18, 2018 at 11:43:42AM +0300, Kirill Tkhai wrote:
> >> Introduce set_shrinker_bit() function to set shrinker-related
> >> bit in memcg shrinker bitmap, and set the bit after the first
> >> item is added and in case of reparenting destroyed memcg's items.
> >>
> >> This will allow next patch to make shrinkers be called only,
> >> in case of they have charged objects at the moment, and
> >> to improve shrink_slab() performance.
> >>
> >> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> >> ---
> >>  include/linux/memcontrol.h |   14 ++++++++++++++
> >>  mm/list_lru.c              |   22 ++++++++++++++++++++--
> >>  2 files changed, 34 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> >> index e51c6e953d7a..7ae1b94becf3 100644
> >> --- a/include/linux/memcontrol.h
> >> +++ b/include/linux/memcontrol.h
> >> @@ -1275,6 +1275,18 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
> >>  
> >>  extern int memcg_expand_shrinker_maps(int new_id);
> >>  
> >> +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
> >> +					  int nid, int shrinker_id)
> >> +{
> > 
> >> +	if (shrinker_id >= 0 && memcg && memcg != root_mem_cgroup) {
> > 
> > Nit: I'd remove these checks from this function and require the caller
> > to check that shrinker_id >= 0 and memcg != NULL or root_mem_cgroup.
> > See below how the call sites would look then.
> > 
> >> +		struct memcg_shrinker_map *map;
> >> +
> >> +		rcu_read_lock();
> >> +		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
> >> +		set_bit(shrinker_id, map->map);
> >> +		rcu_read_unlock();
> >> +	}
> >> +}
> >>  #else
> >>  #define for_each_memcg_cache_index(_idx)	\
> >>  	for (; NULL; )
> >> @@ -1297,6 +1309,8 @@ static inline void memcg_put_cache_ids(void)
> >>  {
> >>  }
> >>  
> >> +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
> >> +					  int nid, int shrinker_id) { }
> >>  #endif /* CONFIG_MEMCG_KMEM */
> >>  
> >>  #endif /* _LINUX_MEMCONTROL_H */
> >> diff --git a/mm/list_lru.c b/mm/list_lru.c
> >> index cab8fad7f7e2..7df71ab0de1c 100644
> >> --- a/mm/list_lru.c
> >> +++ b/mm/list_lru.c
> >> @@ -31,6 +31,11 @@ static void list_lru_unregister(struct list_lru *lru)
> >>  	mutex_unlock(&list_lrus_mutex);
> >>  }
> >>  
> >> +static int lru_shrinker_id(struct list_lru *lru)
> >> +{
> >> +	return lru->shrinker_id;
> >> +}
> >> +
> >>  static inline bool list_lru_memcg_aware(struct list_lru *lru)
> >>  {
> >>  	/*
> >> @@ -94,6 +99,11 @@ static void list_lru_unregister(struct list_lru *lru)
> >>  {
> >>  }
> >>  
> >> +static int lru_shrinker_id(struct list_lru *lru)
> >> +{
> >> +	return -1;
> >> +}
> >> +
> >>  static inline bool list_lru_memcg_aware(struct list_lru *lru)
> >>  {
> >>  	return false;
> >> @@ -119,13 +129,17 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
> >>  {
> >>  	int nid = page_to_nid(virt_to_page(item));
> >>  	struct list_lru_node *nlru = &lru->node[nid];
> >> +	struct mem_cgroup *memcg;
> >>  	struct list_lru_one *l;
> >>  
> >>  	spin_lock(&nlru->lock);
> >>  	if (list_empty(item)) {
> >> -		l = list_lru_from_kmem(nlru, item, NULL);
> >> +		l = list_lru_from_kmem(nlru, item, &memcg);
> >>  		list_add_tail(item, &l->list);
> >> -		l->nr_items++;
> >> +		/* Set shrinker bit if the first element was added */
> >> +		if (!l->nr_items++)
> >> +			memcg_set_shrinker_bit(memcg, nid,
> >> +					       lru_shrinker_id(lru));
> > 
> > This would turn into
> > 
> > 	if (!l->nr_items++ && memcg)
> > 		memcg_set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
> > 
> > Note, you don't need to check that lru_shrinker_id(lru) is >= 0 here as
> > the fact that memcg != NULL guarantees that. Also, memcg can't be
> > root_mem_cgroup here as kmem objects allocated for the root cgroup go
> > unaccounted.
> > 
> >>  		nlru->nr_items++;
> >>  		spin_unlock(&nlru->lock);
> >>  		return true;
> >> @@ -520,6 +534,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
> >>  	struct list_lru_node *nlru = &lru->node[nid];
> >>  	int dst_idx = dst_memcg->kmemcg_id;
> >>  	struct list_lru_one *src, *dst;
> >> +	bool set;
> >>  
> >>  	/*
> >>  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
> >> @@ -531,7 +546,10 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
> >>  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
> >>  
> >>  	list_splice_init(&src->list, &dst->list);
> >> +	set = (!dst->nr_items && src->nr_items);
> >>  	dst->nr_items += src->nr_items;
> >> +	if (set)
> >> +		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
> > 
> > This would turn into
> > 
> > 	if (set && dst_idx >= 0)
> > 		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
> > 
> > Again, the shrinker is guaranteed to be memcg aware in this function and
> > dst_memcg != NULL.
> > 
> > IMHO such a change would make the code a bit more straightforward.
> 
> IMHO, this makes the code less readable. Using single generic function with
> generic check is easier, then using two different checks for different places.
> Next a person, who will modify the logic, does not have to think about particulars
> of strange checks in list_lru_add() and memcg_drain_list_lru_node(), if he/she

I'd prefer them to think through all corner cases before touching this
code :-)

> does not involved in the change of maps logic. Memory cgroup is already fell
> into many corner cases, let's do not introduce them in new places.

The reason why I'd rather move those checks from memcg_set_shrinker_bit
to call sites is that now looking at the function code makes me wonder
why this function has to turn into a no-op if shrinker_id < 0 or memcg
is NULL, why these corner cases are even possible. To understand that, I
have to look at all places where this function is called, which are
located in a different source file. This is rather inconvenient IMO. But
I guess it's bikesheding so I don't insist.
diff mbox

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e51c6e953d7a..7ae1b94becf3 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1275,6 +1275,18 @@  static inline int memcg_cache_id(struct mem_cgroup *memcg)
 
 extern int memcg_expand_shrinker_maps(int new_id);
 
+static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+					  int nid, int shrinker_id)
+{
+	if (shrinker_id >= 0 && memcg && memcg != root_mem_cgroup) {
+		struct memcg_shrinker_map *map;
+
+		rcu_read_lock();
+		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
+		set_bit(shrinker_id, map->map);
+		rcu_read_unlock();
+	}
+}
 #else
 #define for_each_memcg_cache_index(_idx)	\
 	for (; NULL; )
@@ -1297,6 +1309,8 @@  static inline void memcg_put_cache_ids(void)
 {
 }
 
+static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+					  int nid, int shrinker_id) { }
 #endif /* CONFIG_MEMCG_KMEM */
 
 #endif /* _LINUX_MEMCONTROL_H */
diff --git a/mm/list_lru.c b/mm/list_lru.c
index cab8fad7f7e2..7df71ab0de1c 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -31,6 +31,11 @@  static void list_lru_unregister(struct list_lru *lru)
 	mutex_unlock(&list_lrus_mutex);
 }
 
+static int lru_shrinker_id(struct list_lru *lru)
+{
+	return lru->shrinker_id;
+}
+
 static inline bool list_lru_memcg_aware(struct list_lru *lru)
 {
 	/*
@@ -94,6 +99,11 @@  static void list_lru_unregister(struct list_lru *lru)
 {
 }
 
+static int lru_shrinker_id(struct list_lru *lru)
+{
+	return -1;
+}
+
 static inline bool list_lru_memcg_aware(struct list_lru *lru)
 {
 	return false;
@@ -119,13 +129,17 @@  bool list_lru_add(struct list_lru *lru, struct list_head *item)
 {
 	int nid = page_to_nid(virt_to_page(item));
 	struct list_lru_node *nlru = &lru->node[nid];
+	struct mem_cgroup *memcg;
 	struct list_lru_one *l;
 
 	spin_lock(&nlru->lock);
 	if (list_empty(item)) {
-		l = list_lru_from_kmem(nlru, item, NULL);
+		l = list_lru_from_kmem(nlru, item, &memcg);
 		list_add_tail(item, &l->list);
-		l->nr_items++;
+		/* Set shrinker bit if the first element was added */
+		if (!l->nr_items++)
+			memcg_set_shrinker_bit(memcg, nid,
+					       lru_shrinker_id(lru));
 		nlru->nr_items++;
 		spin_unlock(&nlru->lock);
 		return true;
@@ -520,6 +534,7 @@  static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
 	struct list_lru_node *nlru = &lru->node[nid];
 	int dst_idx = dst_memcg->kmemcg_id;
 	struct list_lru_one *src, *dst;
+	bool set;
 
 	/*
 	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
@@ -531,7 +546,10 @@  static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
 	dst = list_lru_from_memcg_idx(nlru, dst_idx);
 
 	list_splice_init(&src->list, &dst->list);
+	set = (!dst->nr_items && src->nr_items);
 	dst->nr_items += src->nr_items;
+	if (set)
+		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
 	src->nr_items = 0;
 
 	spin_unlock_irq(&nlru->lock);