@@ -216,17 +216,12 @@ enum node_stat_item {
*/
static __always_inline bool vmstat_item_in_bytes(int idx)
{
- /*
- * Global and per-node slab counters track slab pages.
- * It's expected that changes are multiples of PAGE_SIZE.
- * Internally values are stored in pages.
- *
- * Per-memcg and per-lruvec counters track memory, consumed
- * by individual slab objects. These counters are actually
- * byte-precise.
- */
return (idx == NR_SLAB_RECLAIMABLE_B ||
- idx == NR_SLAB_UNRECLAIMABLE_B);
+ idx == NR_SLAB_UNRECLAIMABLE_B ||
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ idx == NR_KERNEL_SCS_B ||
+#endif
+ idx == NR_KERNEL_STACK_B);
}
/*
@@ -340,7 +335,7 @@ struct per_cpu_pageset {
struct per_cpu_nodestat {
s8 stat_threshold;
- s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
+ s32 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};
#endif /* !__GENERATING_BOUNDS.H */
@@ -211,7 +211,6 @@ static inline unsigned long global_node_page_state(enum node_stat_item item)
{
long x = atomic_long_read(&vm_node_stat[item]);
- VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -341,13 +341,15 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
long delta)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
long x;
long t;
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
+ if (vmstat_item_in_bytes(item))
+ t <<= PAGE_SHIFT;
if (unlikely(abs(x) > t)) {
node_page_state_add(x, pgdat, item);
@@ -399,15 +401,15 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 v, t;
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
- s8 overstep = t >> 1;
+ s32 overstep = t >> 1;
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
@@ -445,8 +447,8 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 v, t;
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
@@ -546,7 +548,7 @@ static inline void mod_node_state(struct pglist_data *pgdat,
enum node_stat_item item, int delta, int overstep_mode)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s32 __percpu *p = pcp->vm_node_stat_diff + item;
long o, n, t, z;
do {
@@ -563,6 +565,8 @@ static inline void mod_node_state(struct pglist_data *pgdat,
* for all cpus in a node.
*/
t = this_cpu_read(pcp->stat_threshold);
+ if (vmstat_item_in_bytes(item))
+ t <<= PAGE_SHIFT;
o = this_cpu_read(*p);
n = delta + o;
@@ -829,7 +833,7 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
- int v;
+ s32 v;
v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
if (v) {
@@ -899,7 +903,7 @@ void cpu_vm_stats_fold(int cpu)
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
if (p->vm_node_stat_diff[i]) {
- int v;
+ s32 v;
v = p->vm_node_stat_diff[i];
p->vm_node_stat_diff[i] = 0;
@@ -1017,8 +1021,6 @@ unsigned long node_page_state(struct pglist_data *pgdat,
{
long x = atomic_long_read(&pgdat->vm_stat[item]);
- VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
-
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
Some vmstat counters are being accounted in bytes not pages, so the stat_threshold should also scale to bytes. The vmstat counters are already long type for memcg (can reference to struct lruvec_stat). For the global per-node vmstat counters also can scale to long. But the maximum vmstat threshold is 125, so the type of s32 is enough. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- include/linux/mmzone.h | 17 ++++++----------- include/linux/vmstat.h | 1 - mm/vmstat.c | 24 +++++++++++++----------- 3 files changed, 19 insertions(+), 23 deletions(-)