diff mbox series

mm/vmalloc.c: use helper function va_size()

Message ID 20240906102539.3537207-1-zhangpeng362@huawei.com (mailing list archive)
State New
Headers show
Series mm/vmalloc.c: use helper function va_size() | expand

Commit Message

Peng Zhang Sept. 6, 2024, 10:25 a.m. UTC
From: ZhangPeng <zhangpeng362@huawei.com>

Use helper function va_size() to improve code readability. No functional
modification involved.

Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
---
 mm/vmalloc.c | 17 ++++++++---------
 1 file changed, 8 insertions(+), 9 deletions(-)

Comments

Uladzislau Rezki Sept. 6, 2024, 5:18 p.m. UTC | #1
On Fri, Sep 06, 2024 at 06:25:39PM +0800, Peng Zhang wrote:
> From: ZhangPeng <zhangpeng362@huawei.com>
> 
> Use helper function va_size() to improve code readability. No functional
> modification involved.
> 
> Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
> ---
>  mm/vmalloc.c | 17 ++++++++---------
>  1 file changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 37b6e987234e..e4c8d0b0f70c 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
>  {
>  	vm->flags = flags;
>  	vm->addr = (void *)va->va_start;
> -	vm->size = va->va_end - va->va_start;
> +	vm->size = va_size(va);
>  	vm->caller = caller;
>  	va->vm = vm;
>  }
> @@ -2018,7 +2018,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  
>  	if (vm) {
>  		vm->addr = (void *)va->va_start;
> -		vm->size = va->va_end - va->va_start;
> +		vm->size = va_size(va);
>  		va->vm = vm;
>  	}
>  
> @@ -2215,7 +2215,7 @@ static void purge_vmap_node(struct work_struct *work)
>  	vn->nr_purged = 0;
>  
>  	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
> -		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
> +		unsigned long nr = va_size(va) >> PAGE_SHIFT;
>  		unsigned int vn_id = decode_vn_id(va->flags);
>  
>  		list_del_init(&va->list);
> @@ -2355,8 +2355,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
>  	if (WARN_ON_ONCE(!list_empty(&va->list)))
>  		return;
>  
> -	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
> -				PAGE_SHIFT, &vmap_lazy_nr);
> +	nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT,
> +					 &vmap_lazy_nr);
>  
>  	/*
>  	 * If it was request by a certain node we would like to
> @@ -2952,8 +2952,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
>  	if (WARN_ON_ONCE(!va))
>  		return;
>  
> -	debug_check_no_locks_freed((void *)va->va_start,
> -				    (va->va_end - va->va_start));
> +	debug_check_no_locks_freed((void *)va->va_start, va_size(va));
>  	free_unmap_vmap_area(va);
>  }
>  EXPORT_SYMBOL(vm_unmap_ram);
> @@ -4953,7 +4952,7 @@ static void show_purge_info(struct seq_file *m)
>  		list_for_each_entry(va, &vn->lazy.head, list) {
>  			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
>  				(void *)va->va_start, (void *)va->va_end,
> -				va->va_end - va->va_start);
> +				va_size(va));
>  		}
>  		spin_unlock(&vn->lazy.lock);
>  	}
> @@ -4975,7 +4974,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
>  				if (va->flags & VMAP_RAM)
>  					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
>  						(void *)va->va_start, (void *)va->va_end,
> -						va->va_end - va->va_start);
> +						va_size(va));
>  
>  				continue;
>  			}
> -- 
> 2.25.1
> 
LGTM.

Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 37b6e987234e..e4c8d0b0f70c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1940,7 +1940,7 @@  static inline void setup_vmalloc_vm(struct vm_struct *vm,
 {
 	vm->flags = flags;
 	vm->addr = (void *)va->va_start;
-	vm->size = va->va_end - va->va_start;
+	vm->size = va_size(va);
 	vm->caller = caller;
 	va->vm = vm;
 }
@@ -2018,7 +2018,7 @@  static struct vmap_area *alloc_vmap_area(unsigned long size,
 
 	if (vm) {
 		vm->addr = (void *)va->va_start;
-		vm->size = va->va_end - va->va_start;
+		vm->size = va_size(va);
 		va->vm = vm;
 	}
 
@@ -2215,7 +2215,7 @@  static void purge_vmap_node(struct work_struct *work)
 	vn->nr_purged = 0;
 
 	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
-		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+		unsigned long nr = va_size(va) >> PAGE_SHIFT;
 		unsigned int vn_id = decode_vn_id(va->flags);
 
 		list_del_init(&va->list);
@@ -2355,8 +2355,8 @@  static void free_vmap_area_noflush(struct vmap_area *va)
 	if (WARN_ON_ONCE(!list_empty(&va->list)))
 		return;
 
-	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
-				PAGE_SHIFT, &vmap_lazy_nr);
+	nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT,
+					 &vmap_lazy_nr);
 
 	/*
 	 * If it was request by a certain node we would like to
@@ -2952,8 +2952,7 @@  void vm_unmap_ram(const void *mem, unsigned int count)
 	if (WARN_ON_ONCE(!va))
 		return;
 
-	debug_check_no_locks_freed((void *)va->va_start,
-				    (va->va_end - va->va_start));
+	debug_check_no_locks_freed((void *)va->va_start, va_size(va));
 	free_unmap_vmap_area(va);
 }
 EXPORT_SYMBOL(vm_unmap_ram);
@@ -4953,7 +4952,7 @@  static void show_purge_info(struct seq_file *m)
 		list_for_each_entry(va, &vn->lazy.head, list) {
 			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
 				(void *)va->va_start, (void *)va->va_end,
-				va->va_end - va->va_start);
+				va_size(va));
 		}
 		spin_unlock(&vn->lazy.lock);
 	}
@@ -4975,7 +4974,7 @@  static int vmalloc_info_show(struct seq_file *m, void *p)
 				if (va->flags & VMAP_RAM)
 					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
 						(void *)va->va_start, (void *)va->va_end,
-						va->va_end - va->va_start);
+						va_size(va));
 
 				continue;
 			}