diff mbox series

mm:vmalloc add vm_struct for vm_map_ram

Message ID 1541675689-13363-1-git-send-email-huangzhaoyang@gmail.com (mailing list archive)
State New, archived
Headers show
Series mm:vmalloc add vm_struct for vm_map_ram | expand

Commit Message

Zhaoyang Huang Nov. 8, 2018, 11:14 a.m. UTC
From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>

There is no caller and pages information etc for the area which is
created by vm_map_ram as well as the page count > VMAP_MAX_ALLOC.
Add them on in this commit.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
 mm/vmalloc.c | 30 ++++++++++++++++++++----------
 1 file changed, 20 insertions(+), 10 deletions(-)

Comments

Andrew Morton Nov. 8, 2018, 10:52 p.m. UTC | #1
On Thu,  8 Nov 2018 19:14:49 +0800 Zhaoyang Huang <huangzhaoyang@gmail.com> wrote:

> There is no caller and pages information etc for the area which is
> created by vm_map_ram as well as the page count > VMAP_MAX_ALLOC.
> Add them on in this commit.

Well I can kind of see what this is doing - it increases the amount of
info in /proc/vmallocinfo for regions which were created by
vm_map_area(), yes?

But I'd like to hear it in your words, please.  What problem are we
trying to solve?  Why is it actually a problem?  Why is the additional
information considered useful, etc?

It would help a lot if the changelog were to include before-and-after
examples from the /proc/vmallocinfo output.

Thanks.
Xishi Qiu Nov. 12, 2018, 3:24 a.m. UTC | #2
On 2018/11/8 19:14, Zhaoyang Huang wrote:
> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> 
> There is no caller and pages information etc for the area which is
> created by vm_map_ram as well as the page count > VMAP_MAX_ALLOC.
> Add them on in this commit.
> 
> Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> ---
>  mm/vmalloc.c | 30 ++++++++++++++++++++----------
>  1 file changed, 20 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index cfea25b..819b690 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -45,7 +45,8 @@ struct vfree_deferred {
>  static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
>  
>  static void __vunmap(const void *, int);
> -
> +static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
> +			      unsigned long flags, const void *caller);
>  static void free_work(struct work_struct *w)
>  {
>  	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
> @@ -1138,6 +1139,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
>  	BUG_ON(!va);
>  	debug_check_no_locks_freed((void *)va->va_start,
>  				    (va->va_end - va->va_start));
> +	kfree(va->vm);
>  	free_unmap_vmap_area(va);
>  }
>  EXPORT_SYMBOL(vm_unmap_ram);
> @@ -1170,6 +1172,8 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
>  		addr = (unsigned long)mem;
>  	} else {
>  		struct vmap_area *va;
> +		struct vm_struct *area;
> +
>  		va = alloc_vmap_area(size, PAGE_SIZE,
>  				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
>  		if (IS_ERR(va))
> @@ -1177,11 +1181,17 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
>  
>  		addr = va->va_start;
>  		mem = (void *)addr;
> +		area = kzalloc_node(sizeof(*area), GFP_KERNEL, node);
> +		if (likely(area)) {
> +			setup_vmalloc_vm(area, va, 0, __builtin_return_address(0));
> +			va->flags &= ~VM_VM_AREA;
> +		}
Hi Zhaoyang,

I think if we set the flag VM_VM_AREA, that means we have some info,
so how about do not clear the flag after setup_vmalloc_vm, and just
update the print in s_show.

	...
	if (v->flags & VM_ALLOC)
		seq_puts(m, " vmalloc");
+	if (v->flags & VM_MAP_RAM)  // add a new flag for vm_map_ram?
+		seq_puts(m, " vm_map_ram");
	...

Thanks,
Xishi QIu
>  	}
>  	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
>  		vm_unmap_ram(mem, count);
>  		return NULL;
>  	}
> +
>  	return mem;
>  }
>  EXPORT_SYMBOL(vm_map_ram);
> @@ -2688,19 +2698,19 @@ static int s_show(struct seq_file *m, void *p)
>  	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
>  	 * behalf of vmap area is being tear down or vm_map_ram allocation.
>  	 */
> -	if (!(va->flags & VM_VM_AREA)) {
> -		seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
> -			(void *)va->va_start, (void *)va->va_end,
> -			va->va_end - va->va_start,
> -			va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
> -
> +	if (!(va->flags & VM_VM_AREA) && !va->vm)
>  		return 0;
> -	}
>  
>  	v = va->vm;
>  
> -	seq_printf(m, "0x%pK-0x%pK %7ld",
> -		v->addr, v->addr + v->size, v->size);
> +	if (!(va->flags & VM_VM_AREA))
> +		seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
> +				(void *)va->va_start, (void *)va->va_end,
> +				va->va_end - va->va_start,
> +				va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
> +	else
> +		seq_printf(m, "0x%pK-0x%pK %7ld",
> +				v->addr, v->addr + v->size, v->size);
>  
>  	if (v->caller)
>  		seq_printf(m, " %pS", v->caller);
>
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index cfea25b..819b690 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -45,7 +45,8 @@  struct vfree_deferred {
 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
 
 static void __vunmap(const void *, int);
-
+static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+			      unsigned long flags, const void *caller);
 static void free_work(struct work_struct *w)
 {
 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
@@ -1138,6 +1139,7 @@  void vm_unmap_ram(const void *mem, unsigned int count)
 	BUG_ON(!va);
 	debug_check_no_locks_freed((void *)va->va_start,
 				    (va->va_end - va->va_start));
+	kfree(va->vm);
 	free_unmap_vmap_area(va);
 }
 EXPORT_SYMBOL(vm_unmap_ram);
@@ -1170,6 +1172,8 @@  void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 		addr = (unsigned long)mem;
 	} else {
 		struct vmap_area *va;
+		struct vm_struct *area;
+
 		va = alloc_vmap_area(size, PAGE_SIZE,
 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
 		if (IS_ERR(va))
@@ -1177,11 +1181,17 @@  void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 
 		addr = va->va_start;
 		mem = (void *)addr;
+		area = kzalloc_node(sizeof(*area), GFP_KERNEL, node);
+		if (likely(area)) {
+			setup_vmalloc_vm(area, va, 0, __builtin_return_address(0));
+			va->flags &= ~VM_VM_AREA;
+		}
 	}
 	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
 		vm_unmap_ram(mem, count);
 		return NULL;
 	}
+
 	return mem;
 }
 EXPORT_SYMBOL(vm_map_ram);
@@ -2688,19 +2698,19 @@  static int s_show(struct seq_file *m, void *p)
 	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
 	 * behalf of vmap area is being tear down or vm_map_ram allocation.
 	 */
-	if (!(va->flags & VM_VM_AREA)) {
-		seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
-			(void *)va->va_start, (void *)va->va_end,
-			va->va_end - va->va_start,
-			va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
-
+	if (!(va->flags & VM_VM_AREA) && !va->vm)
 		return 0;
-	}
 
 	v = va->vm;
 
-	seq_printf(m, "0x%pK-0x%pK %7ld",
-		v->addr, v->addr + v->size, v->size);
+	if (!(va->flags & VM_VM_AREA))
+		seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
+				(void *)va->va_start, (void *)va->va_end,
+				va->va_end - va->va_start,
+				va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
+	else
+		seq_printf(m, "0x%pK-0x%pK %7ld",
+				v->addr, v->addr + v->size, v->size);
 
 	if (v->caller)
 		seq_printf(m, " %pS", v->caller);