diff mbox series

[03/11] mm: vmscan: simplify lruvec_lru_size()

Message ID 20190603210746.15800-4-hannes@cmpxchg.org (mailing list archive)
State New, archived
Headers show
Series mm: fix page aging across multiple cgroups | expand

Commit Message

Johannes Weiner June 3, 2019, 9:07 p.m. UTC
This function currently takes the node or lruvec size and subtracts
the zones that are excluded by the classzone index of the
allocation. It uses four different types of counters to do this.

Just add up the eligible zones.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/vmscan.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

Comments

Shakeel Butt Nov. 7, 2019, 2:51 a.m. UTC | #1
On Mon, Jun 3, 2019 at 2:59 PM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> This function currently takes the node or lruvec size and subtracts
> the zones that are excluded by the classzone index of the
> allocation. It uses four different types of counters to do this.
>
> Just add up the eligible zones.
>
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

I think this became part of other series. Anyways:

Reviewed-by: Shakeel Butt <shakeelb@google.com>


> ---
>  mm/vmscan.c | 19 +++++--------------
>  1 file changed, 5 insertions(+), 14 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 853be16ee5e2..69c4c82a9b5a 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -342,30 +342,21 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
>   */
>  unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
>  {
> -       unsigned long lru_size;
> +       unsigned long size = 0;
>         int zid;
>
> -       if (!mem_cgroup_disabled())
> -               lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
> -       else
> -               lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
> -
> -       for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
> +       for (zid = 0; zid <= zone_idx; zid++) {
>                 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
> -               unsigned long size;
>
>                 if (!managed_zone(zone))
>                         continue;
>
>                 if (!mem_cgroup_disabled())
> -                       size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
> +                       size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
>                 else
> -                       size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
> -                                      NR_ZONE_LRU_BASE + lru);
> -               lru_size -= min(size, lru_size);
> +                       size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
>         }
> -
> -       return lru_size;
> +       return size;
>
>  }
>
> --
> 2.21.0
>
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 853be16ee5e2..69c4c82a9b5a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -342,30 +342,21 @@  unsigned long zone_reclaimable_pages(struct zone *zone)
  */
 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
 {
-	unsigned long lru_size;
+	unsigned long size = 0;
 	int zid;
 
-	if (!mem_cgroup_disabled())
-		lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
-	else
-		lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
-
-	for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
+	for (zid = 0; zid <= zone_idx; zid++) {
 		struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
-		unsigned long size;
 
 		if (!managed_zone(zone))
 			continue;
 
 		if (!mem_cgroup_disabled())
-			size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
+			size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
 		else
-			size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
-				       NR_ZONE_LRU_BASE + lru);
-		lru_size -= min(size, lru_size);
+			size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
 	}
-
-	return lru_size;
+	return size;
 
 }