diff mbox series

[v2,sl-b,3/5] mm: Make mem_dump_obj() handle vmalloc() memory

Message ID 20201209011303.32737-3-paulmck@kernel.org (mailing list archive)
State New, archived
Headers show
Series [v2,sl-b,1/5] mm: Add mem_dump_obj() to print source of memory block | expand

Commit Message

Paul E. McKenney Dec. 9, 2020, 1:13 a.m. UTC
From: "Paul E. McKenney" <paulmck@kernel.org>

This commit adds vmalloc() support to mem_dump_obj().  Note that the
vmalloc_dump_obj() function combines the checking and dumping, in
contrast with the split between kmem_valid_obj() and kmem_dump_obj().
The reason for the difference is that the checking in the vmalloc()
case involves acquiring a global lock, and redundant acquisitions of
global locks should be avoided, even on not-so-fast paths.

Note that this change causes on-stack variables to be reported as
vmalloc() storage from kernel_clone() or similar, depending on the degree
of inlining that your compiler does.  This is likely more helpful than
the earlier "non-paged (local) memory".

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: <linux-mm@kvack.org>
Reported-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/vmalloc.h |  6 ++++++
 mm/util.c               | 12 +++++++-----
 mm/vmalloc.c            | 12 ++++++++++++
 3 files changed, 25 insertions(+), 5 deletions(-)

Comments

Vlastimil Babka Dec. 9, 2020, 5:51 p.m. UTC | #1
On 12/9/20 2:13 AM, paulmck@kernel.org wrote:
> From: "Paul E. McKenney" <paulmck@kernel.org>
> 
> This commit adds vmalloc() support to mem_dump_obj().  Note that the
> vmalloc_dump_obj() function combines the checking and dumping, in
> contrast with the split between kmem_valid_obj() and kmem_dump_obj().
> The reason for the difference is that the checking in the vmalloc()
> case involves acquiring a global lock, and redundant acquisitions of
> global locks should be avoided, even on not-so-fast paths.
> 
> Note that this change causes on-stack variables to be reported as
> vmalloc() storage from kernel_clone() or similar, depending on the degree
> of inlining that your compiler does.  This is likely more helpful than
> the earlier "non-paged (local) memory".
> 
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: <linux-mm@kvack.org>
> Reported-by: Andrii Nakryiko <andrii@kernel.org>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

...

> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3431,6 +3431,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
>  }
>  #endif	/* CONFIG_SMP */
>  
> +bool vmalloc_dump_obj(void *object)
> +{
> +	struct vm_struct *vm;
> +	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
> +
> +	vm = find_vm_area(objp);
> +	if (!vm)
> +		return false;
> +	pr_cont(" vmalloc allocated at %pS\n", vm->caller);

Would it be useful to print the vm area boundaries too?

> +	return true;
> +}
> +
>  #ifdef CONFIG_PROC_FS
>  static void *s_start(struct seq_file *m, loff_t *pos)
>  	__acquires(&vmap_purge_lock)
>
Uladzislau Rezki Dec. 9, 2020, 7:36 p.m. UTC | #2
On Tue, Dec 08, 2020 at 05:13:01PM -0800, paulmck@kernel.org wrote:
> From: "Paul E. McKenney" <paulmck@kernel.org>
> 
> This commit adds vmalloc() support to mem_dump_obj().  Note that the
> vmalloc_dump_obj() function combines the checking and dumping, in
> contrast with the split between kmem_valid_obj() and kmem_dump_obj().
> The reason for the difference is that the checking in the vmalloc()
> case involves acquiring a global lock, and redundant acquisitions of
> global locks should be avoided, even on not-so-fast paths.
> 
> Note that this change causes on-stack variables to be reported as
> vmalloc() storage from kernel_clone() or similar, depending on the degree
> of inlining that your compiler does.  This is likely more helpful than
> the earlier "non-paged (local) memory".
> 
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: <linux-mm@kvack.org>
> Reported-by: Andrii Nakryiko <andrii@kernel.org>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> ---
>  include/linux/vmalloc.h |  6 ++++++
>  mm/util.c               | 12 +++++++-----
>  mm/vmalloc.c            | 12 ++++++++++++
>  3 files changed, 25 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> index 938eaf9..c89c2be 100644
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -248,4 +248,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
>  int register_vmap_purge_notifier(struct notifier_block *nb);
>  int unregister_vmap_purge_notifier(struct notifier_block *nb);
>  
> +#ifdef CONFIG_MMU
> +bool vmalloc_dump_obj(void *object);
> +#else
> +static inline bool vmalloc_dump_obj(void *object) { return false; }
> +#endif
> +
>  #endif /* _LINUX_VMALLOC_H */
> diff --git a/mm/util.c b/mm/util.c
> index 8c2449f..ee99a0a 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -984,6 +984,12 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
>   */
>  void mem_dump_obj(void *object)
>  {
> +	if (kmem_valid_obj(object)) {
> +		kmem_dump_obj(object);
> +		return;
> +	}
> +	if (vmalloc_dump_obj(object))
> +		return;
>  	if (!virt_addr_valid(object)) {
>  		if (object == NULL)
>  			pr_cont(" NULL pointer.\n");
> @@ -993,10 +999,6 @@ void mem_dump_obj(void *object)
>  			pr_cont(" non-paged (local) memory.\n");
>  		return;
>  	}
> -	if (kmem_valid_obj(object)) {
> -		kmem_dump_obj(object);
> -		return;
> -	}
> -	pr_cont(" non-slab memory.\n");
> +	pr_cont(" non-slab/vmalloc memory.\n");
>  }
>  EXPORT_SYMBOL_GPL(mem_dump_obj);
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6ae491a..7421719 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3431,6 +3431,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
>  }
>  #endif	/* CONFIG_SMP */
>  
> +bool vmalloc_dump_obj(void *object)
> +{
> +	struct vm_struct *vm;
> +	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
>
Paul, vmalloced addresses are already aligned to PAGE_SIZE, so that one
is odd.

--
Vlad Rezki
Uladzislau Rezki Dec. 9, 2020, 7:39 p.m. UTC | #3
On Wed, Dec 09, 2020 at 06:51:20PM +0100, Vlastimil Babka wrote:
> On 12/9/20 2:13 AM, paulmck@kernel.org wrote:
> > From: "Paul E. McKenney" <paulmck@kernel.org>
> > 
> > This commit adds vmalloc() support to mem_dump_obj().  Note that the
> > vmalloc_dump_obj() function combines the checking and dumping, in
> > contrast with the split between kmem_valid_obj() and kmem_dump_obj().
> > The reason for the difference is that the checking in the vmalloc()
> > case involves acquiring a global lock, and redundant acquisitions of
> > global locks should be avoided, even on not-so-fast paths.
> > 
> > Note that this change causes on-stack variables to be reported as
> > vmalloc() storage from kernel_clone() or similar, depending on the degree
> > of inlining that your compiler does.  This is likely more helpful than
> > the earlier "non-paged (local) memory".
> > 
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > Cc: <linux-mm@kvack.org>
> > Reported-by: Andrii Nakryiko <andrii@kernel.org>
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> 
> ...
> 
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -3431,6 +3431,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
> >  }
> >  #endif	/* CONFIG_SMP */
> >  
> > +bool vmalloc_dump_obj(void *object)
> > +{
> > +	struct vm_struct *vm;
> > +	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
> > +
> > +	vm = find_vm_area(objp);
> > +	if (!vm)
> > +		return false;
> > +	pr_cont(" vmalloc allocated at %pS\n", vm->caller);
> 
> Would it be useful to print the vm area boundaries too?
> 
Do you mean va_start/va_end information?

--
Vlad Rezki
Paul E. McKenney Dec. 9, 2020, 7:42 p.m. UTC | #4
On Wed, Dec 09, 2020 at 08:36:37PM +0100, Uladzislau Rezki wrote:
> On Tue, Dec 08, 2020 at 05:13:01PM -0800, paulmck@kernel.org wrote:
> > From: "Paul E. McKenney" <paulmck@kernel.org>
> > 
> > This commit adds vmalloc() support to mem_dump_obj().  Note that the
> > vmalloc_dump_obj() function combines the checking and dumping, in
> > contrast with the split between kmem_valid_obj() and kmem_dump_obj().
> > The reason for the difference is that the checking in the vmalloc()
> > case involves acquiring a global lock, and redundant acquisitions of
> > global locks should be avoided, even on not-so-fast paths.
> > 
> > Note that this change causes on-stack variables to be reported as
> > vmalloc() storage from kernel_clone() or similar, depending on the degree
> > of inlining that your compiler does.  This is likely more helpful than
> > the earlier "non-paged (local) memory".
> > 
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > Cc: <linux-mm@kvack.org>
> > Reported-by: Andrii Nakryiko <andrii@kernel.org>
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > ---
> >  include/linux/vmalloc.h |  6 ++++++
> >  mm/util.c               | 12 +++++++-----
> >  mm/vmalloc.c            | 12 ++++++++++++
> >  3 files changed, 25 insertions(+), 5 deletions(-)
> > 
> > diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> > index 938eaf9..c89c2be 100644
> > --- a/include/linux/vmalloc.h
> > +++ b/include/linux/vmalloc.h
> > @@ -248,4 +248,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
> >  int register_vmap_purge_notifier(struct notifier_block *nb);
> >  int unregister_vmap_purge_notifier(struct notifier_block *nb);
> >  
> > +#ifdef CONFIG_MMU
> > +bool vmalloc_dump_obj(void *object);
> > +#else
> > +static inline bool vmalloc_dump_obj(void *object) { return false; }
> > +#endif
> > +
> >  #endif /* _LINUX_VMALLOC_H */
> > diff --git a/mm/util.c b/mm/util.c
> > index 8c2449f..ee99a0a 100644
> > --- a/mm/util.c
> > +++ b/mm/util.c
> > @@ -984,6 +984,12 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
> >   */
> >  void mem_dump_obj(void *object)
> >  {
> > +	if (kmem_valid_obj(object)) {
> > +		kmem_dump_obj(object);
> > +		return;
> > +	}
> > +	if (vmalloc_dump_obj(object))
> > +		return;
> >  	if (!virt_addr_valid(object)) {
> >  		if (object == NULL)
> >  			pr_cont(" NULL pointer.\n");
> > @@ -993,10 +999,6 @@ void mem_dump_obj(void *object)
> >  			pr_cont(" non-paged (local) memory.\n");
> >  		return;
> >  	}
> > -	if (kmem_valid_obj(object)) {
> > -		kmem_dump_obj(object);
> > -		return;
> > -	}
> > -	pr_cont(" non-slab memory.\n");
> > +	pr_cont(" non-slab/vmalloc memory.\n");
> >  }
> >  EXPORT_SYMBOL_GPL(mem_dump_obj);
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 6ae491a..7421719 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -3431,6 +3431,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
> >  }
> >  #endif	/* CONFIG_SMP */
> >  
> > +bool vmalloc_dump_obj(void *object)
> > +{
> > +	struct vm_struct *vm;
> > +	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
> >
> Paul, vmalloced addresses are already aligned to PAGE_SIZE, so that one
> is odd.

They are, but this is to handle things like this:

	struct foo {
		int a;
		struct rcu_head rh;
	};

	void silly(struct foo *fp)
	{
		call_rcu(&fp->rh, my_rcu_cb);
		call_rcu(&fp->rh, my_other_rcu_cb);
	}

In kernels built with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y, this would
result in a call to mem_dump_obj() and then to vmalloc_dump_obj()
with a non-page-aligned pointer.

							Thanx, Paul
Uladzislau Rezki Dec. 9, 2020, 8:04 p.m. UTC | #5
On Wed, Dec 09, 2020 at 11:42:39AM -0800, Paul E. McKenney wrote:
> On Wed, Dec 09, 2020 at 08:36:37PM +0100, Uladzislau Rezki wrote:
> > On Tue, Dec 08, 2020 at 05:13:01PM -0800, paulmck@kernel.org wrote:
> > > From: "Paul E. McKenney" <paulmck@kernel.org>
> > > 
> > > This commit adds vmalloc() support to mem_dump_obj().  Note that the
> > > vmalloc_dump_obj() function combines the checking and dumping, in
> > > contrast with the split between kmem_valid_obj() and kmem_dump_obj().
> > > The reason for the difference is that the checking in the vmalloc()
> > > case involves acquiring a global lock, and redundant acquisitions of
> > > global locks should be avoided, even on not-so-fast paths.
> > > 
> > > Note that this change causes on-stack variables to be reported as
> > > vmalloc() storage from kernel_clone() or similar, depending on the degree
> > > of inlining that your compiler does.  This is likely more helpful than
> > > the earlier "non-paged (local) memory".
> > > 
> > > Cc: Andrew Morton <akpm@linux-foundation.org>
> > > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > > Cc: <linux-mm@kvack.org>
> > > Reported-by: Andrii Nakryiko <andrii@kernel.org>
> > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > ---
> > >  include/linux/vmalloc.h |  6 ++++++
> > >  mm/util.c               | 12 +++++++-----
> > >  mm/vmalloc.c            | 12 ++++++++++++
> > >  3 files changed, 25 insertions(+), 5 deletions(-)
> > > 
> > > diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> > > index 938eaf9..c89c2be 100644
> > > --- a/include/linux/vmalloc.h
> > > +++ b/include/linux/vmalloc.h
> > > @@ -248,4 +248,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
> > >  int register_vmap_purge_notifier(struct notifier_block *nb);
> > >  int unregister_vmap_purge_notifier(struct notifier_block *nb);
> > >  
> > > +#ifdef CONFIG_MMU
> > > +bool vmalloc_dump_obj(void *object);
> > > +#else
> > > +static inline bool vmalloc_dump_obj(void *object) { return false; }
> > > +#endif
> > > +
> > >  #endif /* _LINUX_VMALLOC_H */
> > > diff --git a/mm/util.c b/mm/util.c
> > > index 8c2449f..ee99a0a 100644
> > > --- a/mm/util.c
> > > +++ b/mm/util.c
> > > @@ -984,6 +984,12 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
> > >   */
> > >  void mem_dump_obj(void *object)
> > >  {
> > > +	if (kmem_valid_obj(object)) {
> > > +		kmem_dump_obj(object);
> > > +		return;
> > > +	}
> > > +	if (vmalloc_dump_obj(object))
> > > +		return;
> > >  	if (!virt_addr_valid(object)) {
> > >  		if (object == NULL)
> > >  			pr_cont(" NULL pointer.\n");
> > > @@ -993,10 +999,6 @@ void mem_dump_obj(void *object)
> > >  			pr_cont(" non-paged (local) memory.\n");
> > >  		return;
> > >  	}
> > > -	if (kmem_valid_obj(object)) {
> > > -		kmem_dump_obj(object);
> > > -		return;
> > > -	}
> > > -	pr_cont(" non-slab memory.\n");
> > > +	pr_cont(" non-slab/vmalloc memory.\n");
> > >  }
> > >  EXPORT_SYMBOL_GPL(mem_dump_obj);
> > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > > index 6ae491a..7421719 100644
> > > --- a/mm/vmalloc.c
> > > +++ b/mm/vmalloc.c
> > > @@ -3431,6 +3431,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
> > >  }
> > >  #endif	/* CONFIG_SMP */
> > >  
> > > +bool vmalloc_dump_obj(void *object)
> > > +{
> > > +	struct vm_struct *vm;
> > > +	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
> > >
> > Paul, vmalloced addresses are already aligned to PAGE_SIZE, so that one
> > is odd.
> 
> They are, but this is to handle things like this:
> 
> 	struct foo {
> 		int a;
> 		struct rcu_head rh;
> 	};
> 
> 	void silly(struct foo *fp)
> 	{
> 		call_rcu(&fp->rh, my_rcu_cb);
> 		call_rcu(&fp->rh, my_other_rcu_cb);
> 	}
> 
> In kernels built with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y, this would
> result in a call to mem_dump_obj() and then to vmalloc_dump_obj()
> with a non-page-aligned pointer.
> 
OK, i got it. I thought the functions deals with original vmalloc
pointer. In fact it is not :)

--
Vlad Rezki
Paul E. McKenney Dec. 9, 2020, 11:23 p.m. UTC | #6
On Wed, Dec 09, 2020 at 06:51:20PM +0100, Vlastimil Babka wrote:
> On 12/9/20 2:13 AM, paulmck@kernel.org wrote:
> > From: "Paul E. McKenney" <paulmck@kernel.org>
> > 
> > This commit adds vmalloc() support to mem_dump_obj().  Note that the
> > vmalloc_dump_obj() function combines the checking and dumping, in
> > contrast with the split between kmem_valid_obj() and kmem_dump_obj().
> > The reason for the difference is that the checking in the vmalloc()
> > case involves acquiring a global lock, and redundant acquisitions of
> > global locks should be avoided, even on not-so-fast paths.
> > 
> > Note that this change causes on-stack variables to be reported as
> > vmalloc() storage from kernel_clone() or similar, depending on the degree
> > of inlining that your compiler does.  This is likely more helpful than
> > the earlier "non-paged (local) memory".
> > 
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > Cc: <linux-mm@kvack.org>
> > Reported-by: Andrii Nakryiko <andrii@kernel.org>
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> 
> ...
> 
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -3431,6 +3431,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
> >  }
> >  #endif	/* CONFIG_SMP */
> >  
> > +bool vmalloc_dump_obj(void *object)
> > +{
> > +	struct vm_struct *vm;
> > +	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
> > +
> > +	vm = find_vm_area(objp);
> > +	if (!vm)
> > +		return false;
> > +	pr_cont(" vmalloc allocated at %pS\n", vm->caller);
> 
> Would it be useful to print the vm area boundaries too?

Like this?

I also considered instead using vm->size, but that always seems to include
an extra page, so a 4-page span is listed as having 20480 bytes and a
one-page span is 8192 bytes.  This might be more accurate in some sense,
but would be quite confusing to someone trying to compare this size with
that requested in the vmalloc() call.

							Thanx, Paul

------------------------------------------------------------------------

commit 33e0469c289c2f78e5f0d0c463c8ee3357d273c0
Author: Paul E. McKenney <paulmck@kernel.org>
Date:   Wed Dec 9 15:15:27 2020 -0800

    mm: Make mem_obj_dump() vmalloc() dumps include start and length
    
    This commit adds the starting address and number of pages to the vmalloc()
    information dumped by way of vmalloc_dump_obj().
    
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
    Cc: <linux-mm@kvack.org>
    Reported-by: Andrii Nakryiko <andrii@kernel.org>
    Suggested-by: Vlastimil Babka <vbabka@suse.cz>
    Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7421719..77b1100 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3439,7 +3439,8 @@ bool vmalloc_dump_obj(void *object)
 	vm = find_vm_area(objp);
 	if (!vm)
 		return false;
-	pr_cont(" vmalloc allocated at %pS\n", vm->caller);
+	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
+		vm->nr_pages, (unsigned long)vm->addr, vm->caller);
 	return true;
 }
Vlastimil Babka Dec. 10, 2020, 10:49 a.m. UTC | #7
On 12/10/20 12:23 AM, Paul E. McKenney wrote:
> On Wed, Dec 09, 2020 at 06:51:20PM +0100, Vlastimil Babka wrote:
>> On 12/9/20 2:13 AM, paulmck@kernel.org wrote:
>> > From: "Paul E. McKenney" <paulmck@kernel.org>
>> > 
>> > This commit adds vmalloc() support to mem_dump_obj().  Note that the
>> > vmalloc_dump_obj() function combines the checking and dumping, in
>> > contrast with the split between kmem_valid_obj() and kmem_dump_obj().
>> > The reason for the difference is that the checking in the vmalloc()
>> > case involves acquiring a global lock, and redundant acquisitions of
>> > global locks should be avoided, even on not-so-fast paths.
>> > 
>> > Note that this change causes on-stack variables to be reported as
>> > vmalloc() storage from kernel_clone() or similar, depending on the degree
>> > of inlining that your compiler does.  This is likely more helpful than
>> > the earlier "non-paged (local) memory".
>> > 
>> > Cc: Andrew Morton <akpm@linux-foundation.org>
>> > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
>> > Cc: <linux-mm@kvack.org>
>> > Reported-by: Andrii Nakryiko <andrii@kernel.org>
>> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
>> 
>> ...
>> 
>> > --- a/mm/vmalloc.c
>> > +++ b/mm/vmalloc.c
>> > @@ -3431,6 +3431,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
>> >  }
>> >  #endif	/* CONFIG_SMP */
>> >  
>> > +bool vmalloc_dump_obj(void *object)
>> > +{
>> > +	struct vm_struct *vm;
>> > +	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
>> > +
>> > +	vm = find_vm_area(objp);
>> > +	if (!vm)
>> > +		return false;
>> > +	pr_cont(" vmalloc allocated at %pS\n", vm->caller);
>> 
>> Would it be useful to print the vm area boundaries too?
> 
> Like this?

Yeah, thanks!

> I also considered instead using vm->size, but that always seems to include
> an extra page, so a 4-page span is listed as having 20480 bytes and a
> one-page span is 8192 bytes.  This might be more accurate in some sense,
> but would be quite confusing to someone trying to compare this size with
> that requested in the vmalloc() call.

Right.

> 
> 							Thanx, Paul
> 
> ------------------------------------------------------------------------
> 
> commit 33e0469c289c2f78e5f0d0c463c8ee3357d273c0
> Author: Paul E. McKenney <paulmck@kernel.org>
> Date:   Wed Dec 9 15:15:27 2020 -0800
> 
>     mm: Make mem_obj_dump() vmalloc() dumps include start and length
>     
>     This commit adds the starting address and number of pages to the vmalloc()
>     information dumped by way of vmalloc_dump_obj().
>     
>     Cc: Andrew Morton <akpm@linux-foundation.org>
>     Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
>     Cc: <linux-mm@kvack.org>
>     Reported-by: Andrii Nakryiko <andrii@kernel.org>
>     Suggested-by: Vlastimil Babka <vbabka@suse.cz>
>     Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 7421719..77b1100 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3439,7 +3439,8 @@ bool vmalloc_dump_obj(void *object)
>  	vm = find_vm_area(objp);
>  	if (!vm)
>  		return false;
> -	pr_cont(" vmalloc allocated at %pS\n", vm->caller);
> +	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
> +		vm->nr_pages, (unsigned long)vm->addr, vm->caller);
>  	return true;
>  }
>  
>
diff mbox series

Patch

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 938eaf9..c89c2be 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -248,4 +248,10 @@  pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 int register_vmap_purge_notifier(struct notifier_block *nb);
 int unregister_vmap_purge_notifier(struct notifier_block *nb);
 
+#ifdef CONFIG_MMU
+bool vmalloc_dump_obj(void *object);
+#else
+static inline bool vmalloc_dump_obj(void *object) { return false; }
+#endif
+
 #endif /* _LINUX_VMALLOC_H */
diff --git a/mm/util.c b/mm/util.c
index 8c2449f..ee99a0a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -984,6 +984,12 @@  int __weak memcmp_pages(struct page *page1, struct page *page2)
  */
 void mem_dump_obj(void *object)
 {
+	if (kmem_valid_obj(object)) {
+		kmem_dump_obj(object);
+		return;
+	}
+	if (vmalloc_dump_obj(object))
+		return;
 	if (!virt_addr_valid(object)) {
 		if (object == NULL)
 			pr_cont(" NULL pointer.\n");
@@ -993,10 +999,6 @@  void mem_dump_obj(void *object)
 			pr_cont(" non-paged (local) memory.\n");
 		return;
 	}
-	if (kmem_valid_obj(object)) {
-		kmem_dump_obj(object);
-		return;
-	}
-	pr_cont(" non-slab memory.\n");
+	pr_cont(" non-slab/vmalloc memory.\n");
 }
 EXPORT_SYMBOL_GPL(mem_dump_obj);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6ae491a..7421719 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3431,6 +3431,18 @@  void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 }
 #endif	/* CONFIG_SMP */
 
+bool vmalloc_dump_obj(void *object)
+{
+	struct vm_struct *vm;
+	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
+
+	vm = find_vm_area(objp);
+	if (!vm)
+		return false;
+	pr_cont(" vmalloc allocated at %pS\n", vm->caller);
+	return true;
+}
+
 #ifdef CONFIG_PROC_FS
 static void *s_start(struct seq_file *m, loff_t *pos)
 	__acquires(&vmap_purge_lock)