diff mbox series

[RFC,2/3] mm/slub: sort objects in cache by frequency of stack trace

Message ID 20210521121127.24653-2-glittao@gmail.com (mailing list archive)
State New
Headers show
Series [RFC,1/3] mm/slub: aggregate objects in cache by stack trace | expand

Commit Message

Oliver Glitta May 21, 2021, 12:11 p.m. UTC
From: Oliver Glitta <glittao@gmail.com>

Sort objects in slub cache by the frequency of stack trace used
in object location in alloc_calls and free_calls implementation
in debugfs. Most frequently used stack traces will be the first.

Signed-off-by: Oliver Glitta <glittao@gmail.com>
---
 mm/slub.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

Comments

Vlastimil Babka May 26, 2021, 2:06 p.m. UTC | #1
On 5/21/21 2:11 PM, glittao@gmail.com wrote:
> From: Oliver Glitta <glittao@gmail.com>
> 
> Sort objects in slub cache by the frequency of stack trace used
> in object location in alloc_calls and free_calls implementation
> in debugfs. Most frequently used stack traces will be the first.

That will make it much more convenient.

> Signed-off-by: Oliver Glitta <glittao@gmail.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/slub.c | 17 +++++++++++++++++
>  1 file changed, 17 insertions(+)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index d5ed6ed7d68b..247983d647cd 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -37,6 +37,7 @@
>  #include <linux/memcontrol.h>
>  #include <linux/random.h>
>  #include <kunit/test.h>
> +#include <linux/sort.h>
>  
>  #include <linux/debugfs.h>
>  #include <trace/events/kmem.h>
> @@ -5893,6 +5894,17 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
>  	return NULL;
>  }
>  
> +static int cmp_loc_by_count(const void *a, const void *b, const void *data)
> +{
> +	struct location *loc1 = (struct location *)a;
> +	struct location *loc2 = (struct location *)b;
> +
> +	if (loc1->count > loc2->count)
> +		return -1;
> +	else
> +		return 1;
> +}
> +
>  static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
>  {
>  	struct kmem_cache_node *n;
> @@ -5944,6 +5956,11 @@ static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
>  				process_slab(&t, s, page, alloc);
>  			spin_unlock_irqrestore(&n->list_lock, flags);
>  		}
> +
> +		/* Sort locations by count */
> +		sort_r(t.loc, t.count, sizeof(struct location),
> +				cmp_loc_by_count, NULL, NULL);
> +
>  	}
>  
>  	if (*ppos < t.count) {
>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index d5ed6ed7d68b..247983d647cd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -37,6 +37,7 @@ 
 #include <linux/memcontrol.h>
 #include <linux/random.h>
 #include <kunit/test.h>
+#include <linux/sort.h>
 
 #include <linux/debugfs.h>
 #include <trace/events/kmem.h>
@@ -5893,6 +5894,17 @@  static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
 	return NULL;
 }
 
+static int cmp_loc_by_count(const void *a, const void *b, const void *data)
+{
+	struct location *loc1 = (struct location *)a;
+	struct location *loc2 = (struct location *)b;
+
+	if (loc1->count > loc2->count)
+		return -1;
+	else
+		return 1;
+}
+
 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
 {
 	struct kmem_cache_node *n;
@@ -5944,6 +5956,11 @@  static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
 				process_slab(&t, s, page, alloc);
 			spin_unlock_irqrestore(&n->list_lock, flags);
 		}
+
+		/* Sort locations by count */
+		sort_r(t.loc, t.count, sizeof(struct location),
+				cmp_loc_by_count, NULL, NULL);
+
 	}
 
 	if (*ppos < t.count) {