diff mbox series

[v4,08/11] mm: vmscan: use per memcg nr_deferred of shrinker

Message ID 20210121230621.654304-9-shy828301@gmail.com (mailing list archive)
State New
Headers show
Series Make shrinker's nr_deferred memcg aware | expand

Commit Message

Yang Shi Jan. 21, 2021, 11:06 p.m. UTC
Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's nr_deferred
will be used in the following cases:
    1. Non memcg aware shrinkers
    2. !CONFIG_MEMCG
    3. memcg is disabled by boot parameter

Signed-off-by: Yang Shi <shy828301@gmail.com>
---
 mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 69 insertions(+), 12 deletions(-)

Comments

Kirill Tkhai Jan. 25, 2021, 9:35 a.m. UTC | #1
On 22.01.2021 02:06, Yang Shi wrote:
> Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's nr_deferred
> will be used in the following cases:
>     1. Non memcg aware shrinkers
>     2. !CONFIG_MEMCG
>     3. memcg is disabled by boot parameter
> 
> Signed-off-by: Yang Shi <shy828301@gmail.com>
> ---
>  mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 69 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 722aa71b13b2..d8e77ea13815 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -359,6 +359,27 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
>  	up_write(&shrinker_rwsem);
>  }
>  
> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> +				    struct mem_cgroup *memcg)
> +{
> +	struct shrinker_info *info;
> +
> +	info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> +					 true);

Since now these rcu_dereference_protected() are in separate functions and there is
no taking a lock near them, it seems it would be better to underling the desired
lock with rcu_dereference_protected(, lockdep_assert_held(lock_you_need_here_locked));


> +	return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
> +}
> +
> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> +				  struct mem_cgroup *memcg)
> +{
> +	struct shrinker_info *info;
> +
> +	info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> +					 true);
> +
> +	return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
> +}
> +
>  static bool cgroup_reclaim(struct scan_control *sc)
>  {
>  	return sc->target_mem_cgroup;
> @@ -397,6 +418,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
>  {
>  }
>  
> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> +				    struct mem_cgroup *memcg)
> +{
> +	return 0;
> +}
> +
> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> +				  struct mem_cgroup *memcg)
> +{
> +	return 0;
> +}
> +
>  static bool cgroup_reclaim(struct scan_control *sc)
>  {
>  	return false;
> @@ -408,6 +441,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
>  }
>  #endif
>  
> +static long count_nr_deferred(struct shrinker *shrinker,
> +			      struct shrink_control *sc)
> +{
> +	int nid = sc->nid;
> +
> +	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +		nid = 0;
> +
> +	if (sc->memcg &&
> +	    (shrinker->flags & SHRINKER_MEMCG_AWARE))
> +		return count_nr_deferred_memcg(nid, shrinker,
> +					       sc->memcg);
> +
> +	return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +}
> +
> +
> +static long set_nr_deferred(long nr, struct shrinker *shrinker,
> +			    struct shrink_control *sc)
> +{
> +	int nid = sc->nid;
> +
> +	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +		nid = 0;
> +
> +	if (sc->memcg &&
> +	    (shrinker->flags & SHRINKER_MEMCG_AWARE))
> +		return set_nr_deferred_memcg(nr, nid, shrinker,
> +					     sc->memcg);
> +
> +	return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> +}
> +
>  /*
>   * This misses isolated pages which are not accounted for to save counters.
>   * As the data only determines if reclaim or compaction continues, it is
> @@ -544,14 +610,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  	long freeable;
>  	long nr;
>  	long new_nr;
> -	int nid = shrinkctl->nid;
>  	long batch_size = shrinker->batch ? shrinker->batch
>  					  : SHRINK_BATCH;
>  	long scanned = 0, next_deferred;
>  
> -	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> -		nid = 0;
> -
>  	freeable = shrinker->count_objects(shrinker, shrinkctl);
>  	if (freeable == 0 || freeable == SHRINK_EMPTY)
>  		return freeable;
> @@ -561,7 +623,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  	 * and zero it so that other concurrent shrinker invocations
>  	 * don't also do this scanning work.
>  	 */
> -	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +	nr = count_nr_deferred(shrinker, shrinkctl);
>  
>  	total_scan = nr;
>  	if (shrinker->seeks) {
> @@ -652,14 +714,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  		next_deferred = 0;
>  	/*
>  	 * move the unused scan count back into the shrinker in a
> -	 * manner that handles concurrent updates. If we exhausted the
> -	 * scan, there is no need to do an update.
> +	 * manner that handles concurrent updates.
>  	 */
> -	if (next_deferred > 0)
> -		new_nr = atomic_long_add_return(next_deferred,
> -						&shrinker->nr_deferred[nid]);
> -	else
> -		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
> +	new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
>  
>  	trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
>  	return freed;
>
Yang Shi Jan. 25, 2021, 9:16 p.m. UTC | #2
On Mon, Jan 25, 2021 at 1:35 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>
> On 22.01.2021 02:06, Yang Shi wrote:
> > Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's nr_deferred
> > will be used in the following cases:
> >     1. Non memcg aware shrinkers
> >     2. !CONFIG_MEMCG
> >     3. memcg is disabled by boot parameter
> >
> > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > ---
> >  mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
> >  1 file changed, 69 insertions(+), 12 deletions(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 722aa71b13b2..d8e77ea13815 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -359,6 +359,27 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> >       up_write(&shrinker_rwsem);
> >  }
> >
> > +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> > +                                 struct mem_cgroup *memcg)
> > +{
> > +     struct shrinker_info *info;
> > +
> > +     info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> > +                                      true);
>
> Since now these rcu_dereference_protected() are in separate functions and there is
> no taking a lock near them, it seems it would be better to underling the desired
> lock with rcu_dereference_protected(, lockdep_assert_held(lock_you_need_here_locked));

Sure. Will incorporate in v5. BTW I noticed using
lockdep_assert_held() in the parameter of the functions will result in
compilation failure with gcc 10.0.1 (shipped with Fedora 32), but fine
with gcc 8.3.1.

In file included from ./include/linux/rbtree.h:22,
                 from ./include/linux/mm_types.h:10,
                 from ./include/linux/mmzone.h:21,
                 from ./include/linux/gfp.h:6,
                 from ./include/linux/mm.h:10,
                 from mm/vmscan.c:15:
mm/vmscan.c: In function ‘shrinker_info_protected’:
./include/linux/lockdep.h:386:34: error: expected expression before ‘do’
  386 | #define lockdep_assert_held(l)   do { (void)(l); } while (0)
      |                                  ^~
./include/linux/rcupdate.h:337:52: note: in definition of macro
‘RCU_LOCKDEP_WARN’
  337 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
      |                                                    ^
./include/linux/rcupdate.h:554:2: note: in expansion of macro
‘__rcu_dereference_protected’
  554 |  __rcu_dereference_protected((p), (c), __rcu)
      |  ^~~~~~~~~~~~~~~~~~~~~~~~~~~
mm/vmscan.c:389:9: note: in expansion of macro ‘rcu_dereference_protected’
  389 |  return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
      |         ^~~~~~~~~~~~~~~~~~~~~~~~~
mm/vmscan.c:390:7: note: in expansion of macro ‘lockdep_assert_held’
  390 |       lockdep_assert_held(&shrinker_rwsem));
      |       ^~~~~~~~~~~~~~~~~~~


I didn't dig into the root cause. Just use lockdep_is_held() instead
of lockdep_assert_held().

>
>
> > +     return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
> > +}
> > +
> > +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> > +                               struct mem_cgroup *memcg)
> > +{
> > +     struct shrinker_info *info;
> > +
> > +     info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> > +                                      true);
> > +
> > +     return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
> > +}
> > +
> >  static bool cgroup_reclaim(struct scan_control *sc)
> >  {
> >       return sc->target_mem_cgroup;
> > @@ -397,6 +418,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> >  {
> >  }
> >
> > +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> > +                                 struct mem_cgroup *memcg)
> > +{
> > +     return 0;
> > +}
> > +
> > +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> > +                               struct mem_cgroup *memcg)
> > +{
> > +     return 0;
> > +}
> > +
> >  static bool cgroup_reclaim(struct scan_control *sc)
> >  {
> >       return false;
> > @@ -408,6 +441,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
> >  }
> >  #endif
> >
> > +static long count_nr_deferred(struct shrinker *shrinker,
> > +                           struct shrink_control *sc)
> > +{
> > +     int nid = sc->nid;
> > +
> > +     if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> > +             nid = 0;
> > +
> > +     if (sc->memcg &&
> > +         (shrinker->flags & SHRINKER_MEMCG_AWARE))
> > +             return count_nr_deferred_memcg(nid, shrinker,
> > +                                            sc->memcg);
> > +
> > +     return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> > +}
> > +
> > +
> > +static long set_nr_deferred(long nr, struct shrinker *shrinker,
> > +                         struct shrink_control *sc)
> > +{
> > +     int nid = sc->nid;
> > +
> > +     if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> > +             nid = 0;
> > +
> > +     if (sc->memcg &&
> > +         (shrinker->flags & SHRINKER_MEMCG_AWARE))
> > +             return set_nr_deferred_memcg(nr, nid, shrinker,
> > +                                          sc->memcg);
> > +
> > +     return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> > +}
> > +
> >  /*
> >   * This misses isolated pages which are not accounted for to save counters.
> >   * As the data only determines if reclaim or compaction continues, it is
> > @@ -544,14 +610,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> >       long freeable;
> >       long nr;
> >       long new_nr;
> > -     int nid = shrinkctl->nid;
> >       long batch_size = shrinker->batch ? shrinker->batch
> >                                         : SHRINK_BATCH;
> >       long scanned = 0, next_deferred;
> >
> > -     if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> > -             nid = 0;
> > -
> >       freeable = shrinker->count_objects(shrinker, shrinkctl);
> >       if (freeable == 0 || freeable == SHRINK_EMPTY)
> >               return freeable;
> > @@ -561,7 +623,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> >        * and zero it so that other concurrent shrinker invocations
> >        * don't also do this scanning work.
> >        */
> > -     nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> > +     nr = count_nr_deferred(shrinker, shrinkctl);
> >
> >       total_scan = nr;
> >       if (shrinker->seeks) {
> > @@ -652,14 +714,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> >               next_deferred = 0;
> >       /*
> >        * move the unused scan count back into the shrinker in a
> > -      * manner that handles concurrent updates. If we exhausted the
> > -      * scan, there is no need to do an update.
> > +      * manner that handles concurrent updates.
> >        */
> > -     if (next_deferred > 0)
> > -             new_nr = atomic_long_add_return(next_deferred,
> > -                                             &shrinker->nr_deferred[nid]);
> > -     else
> > -             new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
> > +     new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
> >
> >       trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
> >       return freed;
> >
>
>
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 722aa71b13b2..d8e77ea13815 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -359,6 +359,27 @@  static void unregister_memcg_shrinker(struct shrinker *shrinker)
 	up_write(&shrinker_rwsem);
 }
 
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+				    struct mem_cgroup *memcg)
+{
+	struct shrinker_info *info;
+
+	info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+					 true);
+	return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+				  struct mem_cgroup *memcg)
+{
+	struct shrinker_info *info;
+
+	info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+					 true);
+
+	return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
 	return sc->target_mem_cgroup;
@@ -397,6 +418,18 @@  static void unregister_memcg_shrinker(struct shrinker *shrinker)
 {
 }
 
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+				    struct mem_cgroup *memcg)
+{
+	return 0;
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+				  struct mem_cgroup *memcg)
+{
+	return 0;
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
 	return false;
@@ -408,6 +441,39 @@  static bool writeback_throttling_sane(struct scan_control *sc)
 }
 #endif
 
+static long count_nr_deferred(struct shrinker *shrinker,
+			      struct shrink_control *sc)
+{
+	int nid = sc->nid;
+
+	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+		nid = 0;
+
+	if (sc->memcg &&
+	    (shrinker->flags & SHRINKER_MEMCG_AWARE))
+		return count_nr_deferred_memcg(nid, shrinker,
+					       sc->memcg);
+
+	return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+}
+
+
+static long set_nr_deferred(long nr, struct shrinker *shrinker,
+			    struct shrink_control *sc)
+{
+	int nid = sc->nid;
+
+	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+		nid = 0;
+
+	if (sc->memcg &&
+	    (shrinker->flags & SHRINKER_MEMCG_AWARE))
+		return set_nr_deferred_memcg(nr, nid, shrinker,
+					     sc->memcg);
+
+	return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
+}
+
 /*
  * This misses isolated pages which are not accounted for to save counters.
  * As the data only determines if reclaim or compaction continues, it is
@@ -544,14 +610,10 @@  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 	long freeable;
 	long nr;
 	long new_nr;
-	int nid = shrinkctl->nid;
 	long batch_size = shrinker->batch ? shrinker->batch
 					  : SHRINK_BATCH;
 	long scanned = 0, next_deferred;
 
-	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
-		nid = 0;
-
 	freeable = shrinker->count_objects(shrinker, shrinkctl);
 	if (freeable == 0 || freeable == SHRINK_EMPTY)
 		return freeable;
@@ -561,7 +623,7 @@  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 	 * and zero it so that other concurrent shrinker invocations
 	 * don't also do this scanning work.
 	 */
-	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+	nr = count_nr_deferred(shrinker, shrinkctl);
 
 	total_scan = nr;
 	if (shrinker->seeks) {
@@ -652,14 +714,9 @@  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 		next_deferred = 0;
 	/*
 	 * move the unused scan count back into the shrinker in a
-	 * manner that handles concurrent updates. If we exhausted the
-	 * scan, there is no need to do an update.
+	 * manner that handles concurrent updates.
 	 */
-	if (next_deferred > 0)
-		new_nr = atomic_long_add_return(next_deferred,
-						&shrinker->nr_deferred[nid]);
-	else
-		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+	new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
 
 	trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
 	return freed;