@@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d AnonPages: %8lu kB\n"
"Node %d Shmem: %8lu kB\n"
"Node %d KernelStack: %8lu kB\n"
+#ifdef CONFIG_SHADOW_CALL_STACK
+ "Node %d ShadowCallStack:%8lu kB\n"
+#endif
"Node %d PageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
@@ -438,6 +441,9 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram),
nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
+#ifdef CONFIG_SHADOW_CALL_STACK
+ nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_BYTES) / 1024,
+#endif
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
@@ -103,6 +103,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "SUnreclaim: ", sunreclaim);
seq_printf(m, "KernelStack: %8lu kB\n",
global_zone_page_state(NR_KERNEL_STACK_KB));
+#ifdef CONFIG_SHADOW_CALL_STACK
+ seq_printf(m, "ShadowCallStack:%8lu kB\n",
+ global_zone_page_state(NR_KERNEL_SCS_BYTES) / 1024);
+#endif
show_val_kb(m, "PageTables: ",
global_zone_page_state(NR_PAGETABLE));
@@ -200,6 +200,9 @@ enum zone_stat_item {
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_PAGETABLE, /* used for pagetables */
NR_KERNEL_STACK_KB, /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ NR_KERNEL_SCS_BYTES, /* measured in bytes */
+#endif
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -12,6 +12,7 @@
#include <linux/scs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/vmstat.h>
#include <asm/scs.h>
static inline void *__scs_base(struct task_struct *tsk)
@@ -89,6 +90,11 @@ static void scs_free(void *s)
vfree_atomic(s);
}
+static struct page *__scs_page(struct task_struct *tsk)
+{
+ return vmalloc_to_page(__scs_base(tsk));
+}
+
static int scs_cleanup(unsigned int cpu)
{
int i;
@@ -135,6 +141,11 @@ static inline void scs_free(void *s)
kmem_cache_free(scs_cache, s);
}
+static struct page *__scs_page(struct task_struct *tsk)
+{
+ return virt_to_page(__scs_base(tsk));
+}
+
void __init scs_init(void)
{
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, SCS_SIZE,
@@ -153,6 +164,12 @@ void scs_task_reset(struct task_struct *tsk)
task_set_scs(tsk, __scs_base(tsk));
}
+static void scs_account(struct task_struct *tsk, int account)
+{
+ mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_BYTES,
+ account * SCS_SIZE);
+}
+
int scs_prepare(struct task_struct *tsk, int node)
{
void *s;
@@ -162,6 +179,8 @@ int scs_prepare(struct task_struct *tsk, int node)
return -ENOMEM;
task_set_scs(tsk, s);
+ scs_account(tsk, 1);
+
return 0;
}
@@ -182,6 +201,7 @@ void scs_release(struct task_struct *tsk)
WARN_ON(scs_corrupted(tsk));
+ scs_account(tsk, -1);
task_set_scs(tsk, NULL);
scs_free(s);
}
@@ -5338,6 +5338,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
" managed:%lukB"
" mlocked:%lukB"
" kernel_stack:%lukB"
+#ifdef CONFIG_SHADOW_CALL_STACK
+ " shadow_call_stack:%lukB"
+#endif
" pagetables:%lukB"
" bounce:%lukB"
" free_pcp:%lukB"
@@ -5360,6 +5363,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(zone_managed_pages(zone)),
K(zone_page_state(zone, NR_MLOCK)),
zone_page_state(zone, NR_KERNEL_STACK_KB),
+#ifdef CONFIG_SHADOW_CALL_STACK
+ zone_page_state(zone, NR_KERNEL_SCS_BYTES) / 1024,
+#endif
K(zone_page_state(zone, NR_PAGETABLE)),
K(zone_page_state(zone, NR_BOUNCE)),
K(free_pcp),
@@ -1119,6 +1119,9 @@ const char * const vmstat_text[] = {
"nr_mlock",
"nr_page_table_pages",
"nr_kernel_stack",
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ "nr_shadow_call_stack_bytes",
+#endif
"nr_bounce",
#if IS_ENABLED(CONFIG_ZSMALLOC)
"nr_zspages",