Message ID | 20210105225817.1036378-5-shy828301@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Make shrinker's nr_deferred memcg aware | expand |
On 06.01.2021 01:58, Yang Shi wrote: > Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the > map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both. > Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the > bit map. > > Signed-off-by: Yang Shi <shy828301@gmail.com> > --- > mm/vmscan.c | 12 ++++-------- > 1 file changed, 4 insertions(+), 8 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index ddb9f972f856..8da765a85569 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list); > static DECLARE_RWSEM(shrinker_rwsem); > > #ifdef CONFIG_MEMCG > - > -static int memcg_shrinker_map_size; > +static int shrinker_nr_max; > > static void memcg_free_shrinker_map_rcu(struct rcu_head *head) > { > @@ -248,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) > return 0; > > down_read(&shrinker_rwsem); > - size = memcg_shrinker_map_size; > + size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > for_each_node(nid) { > map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); > if (!map) { > @@ -269,7 +268,7 @@ static int memcg_expand_shrinker_maps(int new_id) > struct mem_cgroup *memcg; > > size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > - old_size = memcg_shrinker_map_size; > + old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > if (size <= old_size) > return 0; These bunch of DIV_ROUND_UP() looks too complex. Since now all the shrinker maps allocation logic in the only file, can't we simplify this to look better? I mean something like below to merge in your patch: diff --git a/mm/vmscan.c b/mm/vmscan.c index b951c289ef3a..27b6371a1656 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -247,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) return 0; down_read(&shrinker_rwsem); - size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); + size = shrinker_nr_max / BITS_PER_BYTE; for_each_node(nid) { map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); if (!map) { @@ -264,13 +264,11 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) static int memcg_expand_shrinker_maps(int new_id) { - int size, old_size, ret = 0; + int size, old_size, new_nr_max, ret = 0; struct mem_cgroup *memcg; size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); - old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); - if (size <= old_size) - return 0; + new_nr_max = size * BITS_PER_BYTE; if (!root_mem_cgroup) goto out; @@ -287,6 +285,9 @@ static int memcg_expand_shrinker_maps(int new_id) } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); out: + if (ret == 0) + shrinker_nr_max = new_nr_max; + return ret; } @@ -334,8 +335,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) idr_remove(&shrinker_idr, id); goto unlock; } - - shrinker_nr_max = id + 1; } shrinker->id = id; ret = 0;
On Wed, Jan 6, 2021 at 2:16 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote: > > On 06.01.2021 01:58, Yang Shi wrote: > > Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the > > map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both. > > Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the > > bit map. > > > > Signed-off-by: Yang Shi <shy828301@gmail.com> > > --- > > mm/vmscan.c | 12 ++++-------- > > 1 file changed, 4 insertions(+), 8 deletions(-) > > > > diff --git a/mm/vmscan.c b/mm/vmscan.c > > index ddb9f972f856..8da765a85569 100644 > > --- a/mm/vmscan.c > > +++ b/mm/vmscan.c > > @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list); > > static DECLARE_RWSEM(shrinker_rwsem); > > > > #ifdef CONFIG_MEMCG > > - > > -static int memcg_shrinker_map_size; > > +static int shrinker_nr_max; > > > > static void memcg_free_shrinker_map_rcu(struct rcu_head *head) > > { > > @@ -248,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) > > return 0; > > > > down_read(&shrinker_rwsem); > > - size = memcg_shrinker_map_size; > > + size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > > for_each_node(nid) { > > map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); > > if (!map) { > > @@ -269,7 +268,7 @@ static int memcg_expand_shrinker_maps(int new_id) > > struct mem_cgroup *memcg; > > > > size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > > - old_size = memcg_shrinker_map_size; > > + old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > > if (size <= old_size) > > return 0; > > These bunch of DIV_ROUND_UP() looks too complex. Since now all the shrinker maps allocation > logic in the only file, can't we simplify this to look better? I mean something like below > to merge in your patch: Thanks for the suggestion. Will incorporate in v4. > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index b951c289ef3a..27b6371a1656 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -247,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) > return 0; > > down_read(&shrinker_rwsem); > - size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > + size = shrinker_nr_max / BITS_PER_BYTE; > for_each_node(nid) { > map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); > if (!map) { > @@ -264,13 +264,11 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) > > static int memcg_expand_shrinker_maps(int new_id) > { > - int size, old_size, ret = 0; > + int size, old_size, new_nr_max, ret = 0; > struct mem_cgroup *memcg; > > size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > - old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > - if (size <= old_size) > - return 0; BTW, it seems the above chunk needs to be kept. > + new_nr_max = size * BITS_PER_BYTE; > > if (!root_mem_cgroup) > goto out; > @@ -287,6 +285,9 @@ static int memcg_expand_shrinker_maps(int new_id) > } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); > > out: > + if (ret == 0) > + shrinker_nr_max = new_nr_max; > + > return ret; > } > > @@ -334,8 +335,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) > idr_remove(&shrinker_idr, id); > goto unlock; > } > - > - shrinker_nr_max = id + 1; > } > shrinker->id = id; > ret = 0; >
On Wed, Jan 6, 2021 at 2:16 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote: > > On 06.01.2021 01:58, Yang Shi wrote: > > Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the > > map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both. > > Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the > > bit map. > > > > Signed-off-by: Yang Shi <shy828301@gmail.com> > > --- > > mm/vmscan.c | 12 ++++-------- > > 1 file changed, 4 insertions(+), 8 deletions(-) > > > > diff --git a/mm/vmscan.c b/mm/vmscan.c > > index ddb9f972f856..8da765a85569 100644 > > --- a/mm/vmscan.c > > +++ b/mm/vmscan.c > > @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list); > > static DECLARE_RWSEM(shrinker_rwsem); > > > > #ifdef CONFIG_MEMCG > > - > > -static int memcg_shrinker_map_size; > > +static int shrinker_nr_max; > > > > static void memcg_free_shrinker_map_rcu(struct rcu_head *head) > > { > > @@ -248,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) > > return 0; > > > > down_read(&shrinker_rwsem); > > - size = memcg_shrinker_map_size; > > + size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > > for_each_node(nid) { > > map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); > > if (!map) { > > @@ -269,7 +268,7 @@ static int memcg_expand_shrinker_maps(int new_id) > > struct mem_cgroup *memcg; > > > > size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > > - old_size = memcg_shrinker_map_size; > > + old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > > if (size <= old_size) > > return 0; > > These bunch of DIV_ROUND_UP() looks too complex. Since now all the shrinker maps allocation > logic in the only file, can't we simplify this to look better? I mean something like below > to merge in your patch: > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index b951c289ef3a..27b6371a1656 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -247,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) > return 0; > > down_read(&shrinker_rwsem); > - size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > + size = shrinker_nr_max / BITS_PER_BYTE; The type of shrinker_maps->map is "unsigned long *", I think we should do "(shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long)". And the "/ BITS_PER_BYTE" makes calculating the pointer of nr_deferred array harder in the following patch since the length of the map array may be not multiple of "unsigned long". Without the nr_deferred array, this change seems fine. > for_each_node(nid) { > map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); > if (!map) { > @@ -264,13 +264,11 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) > > static int memcg_expand_shrinker_maps(int new_id) > { > - int size, old_size, ret = 0; > + int size, old_size, new_nr_max, ret = 0; > struct mem_cgroup *memcg; > > size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); > - old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); > - if (size <= old_size) > - return 0; > + new_nr_max = size * BITS_PER_BYTE; > > if (!root_mem_cgroup) > goto out; > @@ -287,6 +285,9 @@ static int memcg_expand_shrinker_maps(int new_id) > } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); > > out: > + if (ret == 0) > + shrinker_nr_max = new_nr_max; > + > return ret; > } > > @@ -334,8 +335,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) > idr_remove(&shrinker_idr, id); > goto unlock; > } > - > - shrinker_nr_max = id + 1; > } > shrinker->id = id; > ret = 0; >
diff --git a/mm/vmscan.c b/mm/vmscan.c index ddb9f972f856..8da765a85569 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG - -static int memcg_shrinker_map_size; +static int shrinker_nr_max; static void memcg_free_shrinker_map_rcu(struct rcu_head *head) { @@ -248,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) return 0; down_read(&shrinker_rwsem); - size = memcg_shrinker_map_size; + size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); for_each_node(nid) { map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); if (!map) { @@ -269,7 +268,7 @@ static int memcg_expand_shrinker_maps(int new_id) struct mem_cgroup *memcg; size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); - old_size = memcg_shrinker_map_size; + old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long); if (size <= old_size) return 0; @@ -286,10 +285,8 @@ static int memcg_expand_shrinker_maps(int new_id) goto out; } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); -out: - if (!ret) - memcg_shrinker_map_size = size; +out: return ret; } @@ -321,7 +318,6 @@ void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) #define SHRINKER_REGISTERING ((struct shrinker *)~0UL) static DEFINE_IDR(shrinker_idr); -static int shrinker_nr_max; static int prealloc_memcg_shrinker(struct shrinker *shrinker) {
Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both. Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the bit map. Signed-off-by: Yang Shi <shy828301@gmail.com> --- mm/vmscan.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-)