@@ -353,6 +353,8 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(item == NR_KERNEL_STACK_KB))
+ t <<= PAGE_SHIFT;
if (unlikely(abs(x) > t)) {
node_page_state_add(x, pgdat, item);
@@ -573,6 +575,8 @@ static inline void mod_node_state(struct pglist_data *pgdat,
* for all cpus in a node.
*/
t = this_cpu_read(pcp->stat_threshold);
+ if (unlikely(item == NR_KERNEL_STACK_KB))
+ t <<= PAGE_SHIFT;
o = this_cpu_read(*p);
n = delta + o;
The kernel stack is being accounted in KiB not page, so the stat_threshold should also adjust to byte. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- mm/vmstat.c | 4 ++++ 1 file changed, 4 insertions(+)