@@ -374,8 +374,8 @@ static ssize_t node_read_meminfo(struct device *dev,
unsigned long sreclaimable, sunreclaimable;
si_meminfo_node(&i, nid);
- sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
- sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
+ sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE_B);
+ sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE_B);
len = sysfs_emit_at(buf, len,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
@@ -446,9 +446,9 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram),
- nid, node_page_state(pgdat, NR_KERNEL_STACK_B) / SZ_1K,
+ nid, K(node_page_state(pgdat, NR_KERNEL_STACK_B)),
#ifdef CONFIG_SHADOW_CALL_STACK
- nid, node_page_state(pgdat, NR_KERNEL_SCS_B) / SZ_1K,
+ nid, K(node_page_state(pgdat, NR_KERNEL_SCS_B)),
#endif
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
nid, 0UL,
@@ -517,7 +517,7 @@ static ssize_t node_read_vmstat(struct device *dev,
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
len += sysfs_emit_at(buf, len, "%s %lu\n",
node_stat_name(i),
- node_page_state_pages(pgdat, i));
+ node_page_state(pgdat, i));
return len;
}
@@ -52,8 +52,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
available = si_mem_available();
- sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B);
- sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B);
+ sreclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE_B);
+ sunreclaim = global_node_page_state(NR_SLAB_UNRECLAIMABLE_B);
show_val_kb(m, "MemTotal: ", i.totalram);
show_val_kb(m, "MemFree: ", i.freeram);
@@ -100,11 +100,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "Slab: ", sreclaimable + sunreclaim);
show_val_kb(m, "SReclaimable: ", sreclaimable);
show_val_kb(m, "SUnreclaim: ", sunreclaim);
- seq_printf(m, "KernelStack: %8lu kB\n",
- global_node_page_state(NR_KERNEL_STACK_B) / SZ_1K);
+ show_val_kb(m, "KernelStack: ",
+ global_node_page_state(NR_KERNEL_STACK_B));
#ifdef CONFIG_SHADOW_CALL_STACK
- seq_printf(m, "ShadowCallStack:%8lu kB\n",
- global_node_page_state(NR_KERNEL_SCS_B) / SZ_1K);
+ show_val_kb(m, "ShadowCallStack:",
+ global_node_page_state(NR_KERNEL_SCS_B));
#endif
show_val_kb(m, "PageTables: ",
global_zone_page_state(NR_PAGETABLE));
@@ -193,8 +193,7 @@ static inline unsigned long global_zone_page_state(enum zone_stat_item item)
return x;
}
-static inline
-unsigned long global_node_page_state_pages(enum node_stat_item item)
+static inline unsigned long global_node_page_state(enum node_stat_item item)
{
long x = atomic_long_read(&vm_node_stat[item]);
@@ -207,17 +206,6 @@ unsigned long global_node_page_state_pages(enum node_stat_item item)
return x;
}
-static inline unsigned long global_node_page_state(enum node_stat_item item)
-{
- long x = atomic_long_read(&vm_node_stat[item]);
-
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
-
static inline unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
@@ -258,12 +246,9 @@ extern unsigned long sum_zone_node_page_state(int node,
extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
extern unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item);
-extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
- enum node_stat_item item);
#else
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
#define node_page_state(node, item) global_node_page_state(item)
-#define node_page_state_pages(node, item) global_node_page_state_pages(item)
#endif /* CONFIG_NUMA */
#ifdef CONFIG_SMP
@@ -1705,7 +1705,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
{
unsigned long size;
- size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
+ size = global_node_page_state(NR_SLAB_RECLAIMABLE_B)
+ global_node_page_state(NR_ACTIVE_ANON)
+ global_node_page_state(NR_INACTIVE_ANON)
+ global_node_page_state(NR_ACTIVE_FILE)
@@ -188,7 +188,7 @@ static bool should_dump_unreclaim_slab(void)
global_node_page_state(NR_ISOLATED_FILE) +
global_node_page_state(NR_UNEVICTABLE);
- return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
+ return (global_node_page_state(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
}
/**
@@ -5372,7 +5372,7 @@ long si_mem_available(void)
* items that are in use, and cannot be freed. Cap this estimate at the
* low watermark.
*/
- reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
+ reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE_B) +
global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
available += reclaimable - min(reclaimable / 2, wmark_low);
@@ -5516,8 +5516,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
global_node_page_state(NR_UNEVICTABLE),
global_node_page_state(NR_FILE_DIRTY),
global_node_page_state(NR_WRITEBACK),
- global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
- global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
+ global_node_page_state(NR_SLAB_RECLAIMABLE_B),
+ global_node_page_state(NR_SLAB_UNRECLAIMABLE_B),
global_node_page_state(NR_FILE_MAPPED),
global_node_page_state(NR_SHMEM),
global_zone_page_state(NR_PAGETABLE),
@@ -5572,9 +5572,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(node_page_state(pgdat, NR_ANON_THPS)),
#endif
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
- node_page_state(pgdat, NR_KERNEL_STACK_B) / SZ_1K,
+ K(node_page_state(pgdat, NR_KERNEL_STACK_B)),
#ifdef CONFIG_SHADOW_CALL_STACK
- node_page_state(pgdat, NR_KERNEL_SCS_B) / SZ_1K,
+ K(node_page_state(pgdat, NR_KERNEL_SCS_B)),
#endif
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
"yes" : "no");
@@ -4220,7 +4220,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
* unmapped file backed pages.
*/
if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
- node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
+ node_page_state(pgdat, NR_SLAB_RECLAIMABLE_B) <=
pgdat->min_slab_pages)
return NODE_RECLAIM_FULL;
@@ -1000,22 +1000,9 @@ unsigned long sum_zone_numa_state(int node,
}
/*
- * Determine the per node value of a stat item.
+ * Determine the per node value of a stat item. This always returns
+ * values in pages.
*/
-unsigned long node_page_state_pages(struct pglist_data *pgdat,
- enum node_stat_item item)
-{
- long x = atomic_long_read(&pgdat->vm_stat[item]);
-
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- if (vmstat_item_in_bytes(item))
- x >>= PAGE_SHIFT;
- return x;
-}
-
unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item)
{
@@ -1025,6 +1012,8 @@ unsigned long node_page_state(struct pglist_data *pgdat,
if (x < 0)
x = 0;
#endif
+ if (vmstat_item_in_bytes(item))
+ x >>= PAGE_SHIFT;
return x;
}
#endif
@@ -1626,7 +1615,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
seq_printf(m, "\n per-node stats");
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
seq_printf(m, "\n %-12s %lu", node_stat_name(i),
- node_page_state_pages(pgdat, i));
+ node_page_state(pgdat, i));
}
}
seq_printf(m,
@@ -1747,7 +1736,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- v[i] = global_node_page_state_pages(i);
+ v[i] = global_node_page_state(i);
v += NR_VM_NODE_STAT_ITEMS;
global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
Now the unit of the vmstat counters are either pages or bytes. So we can adjust the node_page_state to always returns values in pages and remove the node_page_state_pages. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- drivers/base/node.c | 10 +++++----- fs/proc/meminfo.c | 12 ++++++------ include/linux/vmstat.h | 17 +---------------- kernel/power/snapshot.c | 2 +- mm/oom_kill.c | 2 +- mm/page_alloc.c | 10 +++++----- mm/vmscan.c | 2 +- mm/vmstat.c | 23 ++++++----------------- 8 files changed, 26 insertions(+), 52 deletions(-)