diff mbox series

[3/5] mm/vmalloc: fix typo in local variable name

Message ID 20220606083909.363350-4-bhe@redhat.com (mailing list archive)
State New
Headers show
Series Cleanup patches of vmalloc | expand

Commit Message

Baoquan He June 6, 2022, 8:39 a.m. UTC
In __purge_vmap_area_lazy(), rename local_pure_list to local_purge_list.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/vmalloc.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Uladzislau Rezki June 6, 2022, 8:47 p.m. UTC | #1
On Mon, Jun 06, 2022 at 04:39:07PM +0800, Baoquan He wrote:
> In __purge_vmap_area_lazy(), rename local_pure_list to local_purge_list.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/vmalloc.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index c7e1634ff2b9..11dfc897de40 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1673,32 +1673,32 @@ static void purge_fragmented_blocks_allcpus(void);
>  static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
>  {
>  	unsigned long resched_threshold;
> -	struct list_head local_pure_list;
> +	struct list_head local_purge_list;
>  	struct vmap_area *va, *n_va;
>  
>  	lockdep_assert_held(&vmap_purge_lock);
>  
>  	spin_lock(&purge_vmap_area_lock);
>  	purge_vmap_area_root = RB_ROOT;
> -	list_replace_init(&purge_vmap_area_list, &local_pure_list);
> +	list_replace_init(&purge_vmap_area_list, &local_purge_list);
>  	spin_unlock(&purge_vmap_area_lock);
>  
> -	if (unlikely(list_empty(&local_pure_list)))
> +	if (unlikely(list_empty(&local_purge_list)))
>  		return false;
>  
>  	start = min(start,
> -		list_first_entry(&local_pure_list,
> +		list_first_entry(&local_purge_list,
>  			struct vmap_area, list)->va_start);
>  
>  	end = max(end,
> -		list_last_entry(&local_pure_list,
> +		list_last_entry(&local_purge_list,
>  			struct vmap_area, list)->va_end);
>  
>  	flush_tlb_kernel_range(start, end);
>  	resched_threshold = lazy_max_pages() << 1;
>  
>  	spin_lock(&free_vmap_area_lock);
> -	list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
> +	list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
>  		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
>  		unsigned long orig_start = va->va_start;
>  		unsigned long orig_end = va->va_end;
> -- 
> 2.34.1
> 
Makes sense to me: Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c7e1634ff2b9..11dfc897de40 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1673,32 +1673,32 @@  static void purge_fragmented_blocks_allcpus(void);
 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 {
 	unsigned long resched_threshold;
-	struct list_head local_pure_list;
+	struct list_head local_purge_list;
 	struct vmap_area *va, *n_va;
 
 	lockdep_assert_held(&vmap_purge_lock);
 
 	spin_lock(&purge_vmap_area_lock);
 	purge_vmap_area_root = RB_ROOT;
-	list_replace_init(&purge_vmap_area_list, &local_pure_list);
+	list_replace_init(&purge_vmap_area_list, &local_purge_list);
 	spin_unlock(&purge_vmap_area_lock);
 
-	if (unlikely(list_empty(&local_pure_list)))
+	if (unlikely(list_empty(&local_purge_list)))
 		return false;
 
 	start = min(start,
-		list_first_entry(&local_pure_list,
+		list_first_entry(&local_purge_list,
 			struct vmap_area, list)->va_start);
 
 	end = max(end,
-		list_last_entry(&local_pure_list,
+		list_last_entry(&local_purge_list,
 			struct vmap_area, list)->va_end);
 
 	flush_tlb_kernel_range(start, end);
 	resched_threshold = lazy_max_pages() << 1;
 
 	spin_lock(&free_vmap_area_lock);
-	list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
+	list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 		unsigned long orig_start = va->va_start;
 		unsigned long orig_end = va->va_end;