Message ID | 20210127233345.339910-5-shy828301@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Make shrinker's nr_deferred memcg aware | expand |
On 1/28/21 12:33 AM, Yang Shi wrote: > Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the > map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both. > Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the > bit map. > > Signed-off-by: Yang Shi <shy828301@gmail.com> > --- > mm/vmscan.c | 18 ++++++++---------- > 1 file changed, 8 insertions(+), 10 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index d3f3701dfcd2..847369c19775 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list); > static DECLARE_RWSEM(shrinker_rwsem); > > #ifdef CONFIG_MEMCG > - > -static int memcg_shrinker_map_size; > +static int shrinker_nr_max; > > static void free_shrinker_map_rcu(struct rcu_head *head) > { > @@ -248,7 +247,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) > return 0; > > down_write(&shrinker_rwsem); > - size = memcg_shrinker_map_size; > + size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); > for_each_node(nid) { > map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); > if (!map) { > @@ -266,12 +265,13 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) > static int expand_shrinker_maps(int new_id) > { > int size, old_size, ret = 0; > + int new_nr_max = new_id + 1; > struct mem_cgroup *memcg; > > - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > - old_size = memcg_shrinker_map_size; > + size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); > + old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); What's wrong with using DIV_ROUND_UP() here? > if (size <= old_size) > - return 0; > + goto out; Can this even happen? Seems to me it can't, so just remove this? > > if (!root_mem_cgroup) > goto out; > @@ -286,9 +286,10 @@ static int expand_shrinker_maps(int new_id) > goto out; > } > } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); > + > out: > if (!ret) > - memcg_shrinker_map_size = size; > + shrinker_nr_max = new_nr_max; > > return ret; > } > @@ -321,7 +322,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) > #define SHRINKER_REGISTERING ((struct shrinker *)~0UL) > > static DEFINE_IDR(shrinker_idr); > -static int shrinker_nr_max; > > static int prealloc_memcg_shrinker(struct shrinker *shrinker) > { > @@ -338,8 +338,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) > idr_remove(&shrinker_idr, id); > goto unlock; > } > - > - shrinker_nr_max = id + 1; > } > shrinker->id = id; > ret = 0; >
On Thu, Jan 28, 2021 at 8:53 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > On 1/28/21 12:33 AM, Yang Shi wrote: > > Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the > > map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both. > > Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the > > bit map. > > > > Signed-off-by: Yang Shi <shy828301@gmail.com> > > --- > > mm/vmscan.c | 18 ++++++++---------- > > 1 file changed, 8 insertions(+), 10 deletions(-) > > > > diff --git a/mm/vmscan.c b/mm/vmscan.c > > index d3f3701dfcd2..847369c19775 100644 > > --- a/mm/vmscan.c > > +++ b/mm/vmscan.c > > @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list); > > static DECLARE_RWSEM(shrinker_rwsem); > > > > #ifdef CONFIG_MEMCG > > - > > -static int memcg_shrinker_map_size; > > +static int shrinker_nr_max; > > > > static void free_shrinker_map_rcu(struct rcu_head *head) > > { > > @@ -248,7 +247,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) > > return 0; > > > > down_write(&shrinker_rwsem); > > - size = memcg_shrinker_map_size; > > + size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); > > for_each_node(nid) { > > map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); > > if (!map) { > > @@ -266,12 +265,13 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) > > static int expand_shrinker_maps(int new_id) > > { > > int size, old_size, ret = 0; > > + int new_nr_max = new_id + 1; > > struct mem_cgroup *memcg; > > > > - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > > - old_size = memcg_shrinker_map_size; > > + size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); > > + old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); > > What's wrong with using DIV_ROUND_UP() here? I don't think there is anything wrong with DIV_ROUND_UP. Should be just different taste and result in shorter statement. > > > if (size <= old_size) > > - return 0; > > + goto out; > > Can this even happen? Seems to me it can't, so just remove this? Yes, it can. The maps use unsigned long value for bitmap, so any shrinker ID < 31 would fall into the same unsigned long, so we may see size <= old_size, but we need increase shrinker_nr_max since expand_shrinker_maps() is called iff id >= shrinker_nr_max. > > > > > if (!root_mem_cgroup) > > goto out; > > @@ -286,9 +286,10 @@ static int expand_shrinker_maps(int new_id) > > goto out; > > } > > } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); > > + > > out: > > if (!ret) > > - memcg_shrinker_map_size = size; > > + shrinker_nr_max = new_nr_max; > > > > return ret; > > } > > @@ -321,7 +322,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) > > #define SHRINKER_REGISTERING ((struct shrinker *)~0UL) > > > > static DEFINE_IDR(shrinker_idr); > > -static int shrinker_nr_max; > > > > static int prealloc_memcg_shrinker(struct shrinker *shrinker) > > { > > @@ -338,8 +338,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) > > idr_remove(&shrinker_idr, id); > > goto unlock; > > } > > - > > - shrinker_nr_max = id + 1; > > } > > shrinker->id = id; > > ret = 0; > > >
On 1/28/21 10:22 PM, Yang Shi wrote: >> > @@ -266,12 +265,13 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) >> > static int expand_shrinker_maps(int new_id) >> > { >> > int size, old_size, ret = 0; >> > + int new_nr_max = new_id + 1; >> > struct mem_cgroup *memcg; >> > >> > - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); >> > - old_size = memcg_shrinker_map_size; >> > + size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); >> > + old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); >> >> What's wrong with using DIV_ROUND_UP() here? > > I don't think there is anything wrong with DIV_ROUND_UP. Should be > just different taste and result in shorter statement. IMHO it's not just taste. DIV_ROUND_UP() says what it does and you don't need to guess it from the math expression. Also your expression is shorter as it simply adds + 1, so if shrinker_nr_max is a multiple of BITS_PER_LONG, there's an extra unsigned long that shouldn't be needed. People reading that code will wonder whether there was some non-obvious intention behind that, and possibly send cleanup patches. >> >> > if (size <= old_size) >> > - return 0; >> > + goto out; >> >> Can this even happen? Seems to me it can't, so just remove this? > > Yes, it can. The maps use unsigned long value for bitmap, so any > shrinker ID < 31 would fall into the same unsigned long, so we may see > size <= old_size, but we need increase shrinker_nr_max since > expand_shrinker_maps() is called iff id >= shrinker_nr_max. Ah, good point.
On Fri, Jan 29, 2021 at 3:22 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > On 1/28/21 10:22 PM, Yang Shi wrote: > >> > @@ -266,12 +265,13 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) > >> > static int expand_shrinker_maps(int new_id) > >> > { > >> > int size, old_size, ret = 0; > >> > + int new_nr_max = new_id + 1; > >> > struct mem_cgroup *memcg; > >> > > >> > - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > >> > - old_size = memcg_shrinker_map_size; > >> > + size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); > >> > + old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); > >> > >> What's wrong with using DIV_ROUND_UP() here? > > > > I don't think there is anything wrong with DIV_ROUND_UP. Should be > > just different taste and result in shorter statement. > > IMHO it's not just taste. DIV_ROUND_UP() says what it does and you don't need to > guess it from the math expression. Also your expression is shorter as it simply > adds + 1, so if shrinker_nr_max is a multiple of BITS_PER_LONG, there's an extra > unsigned long that shouldn't be needed. People reading that code will wonder > whether there was some non-obvious intention behind that, and possibly send > cleanup patches. OK, will replace back to DIV_ROUND_UP(). And, a helper macro is introduced in patch #6, will add that helper in this patch and use DIV_ROUND_UP() in the helper. > > >> > >> > if (size <= old_size) > >> > - return 0; > >> > + goto out; > >> > >> Can this even happen? Seems to me it can't, so just remove this? > > > > Yes, it can. The maps use unsigned long value for bitmap, so any > > shrinker ID < 31 would fall into the same unsigned long, so we may see > > size <= old_size, but we need increase shrinker_nr_max since > > expand_shrinker_maps() is called iff id >= shrinker_nr_max. > > Ah, good point.
diff --git a/mm/vmscan.c b/mm/vmscan.c index d3f3701dfcd2..847369c19775 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG - -static int memcg_shrinker_map_size; +static int shrinker_nr_max; static void free_shrinker_map_rcu(struct rcu_head *head) { @@ -248,7 +247,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) return 0; down_write(&shrinker_rwsem); - size = memcg_shrinker_map_size; + size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); for_each_node(nid) { map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); if (!map) { @@ -266,12 +265,13 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) static int expand_shrinker_maps(int new_id) { int size, old_size, ret = 0; + int new_nr_max = new_id + 1; struct mem_cgroup *memcg; - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); - old_size = memcg_shrinker_map_size; + size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); + old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long); if (size <= old_size) - return 0; + goto out; if (!root_mem_cgroup) goto out; @@ -286,9 +286,10 @@ static int expand_shrinker_maps(int new_id) goto out; } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); + out: if (!ret) - memcg_shrinker_map_size = size; + shrinker_nr_max = new_nr_max; return ret; } @@ -321,7 +322,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) #define SHRINKER_REGISTERING ((struct shrinker *)~0UL) static DEFINE_IDR(shrinker_idr); -static int shrinker_nr_max; static int prealloc_memcg_shrinker(struct shrinker *shrinker) { @@ -338,8 +338,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) idr_remove(&shrinker_idr, id); goto unlock; } - - shrinker_nr_max = id + 1; } shrinker->id = id; ret = 0;
Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both. Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the bit map. Signed-off-by: Yang Shi <shy828301@gmail.com> --- mm/vmscan.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-)