diff mbox series

[04/10] mm: move vmalloc_init and free_work down in vmalloc.c

Message ID 20230119100226.789506-5-hch@lst.de (mailing list archive)
State New
Headers show
Series [01/10] vmalloc: reject vmap with VM_FLUSH_RESET_PERMS | expand

Commit Message

Christoph Hellwig Jan. 19, 2023, 10:02 a.m. UTC
Move these two functions around a bit to avoid forward declarations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/vmalloc.c | 105 +++++++++++++++++++++++++--------------------------
 1 file changed, 52 insertions(+), 53 deletions(-)

Comments

Uladzislau Rezki Jan. 19, 2023, 6:48 p.m. UTC | #1
On Thu, Jan 19, 2023 at 11:02:20AM +0100, Christoph Hellwig wrote:
> Move these two functions around a bit to avoid forward declarations.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  mm/vmalloc.c | 105 +++++++++++++++++++++++++--------------------------
>  1 file changed, 52 insertions(+), 53 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index fafb6227f4428f..daeb28b54663d5 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -89,17 +89,6 @@ struct vfree_deferred {
>  };
>  static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
>  
> -static void __vunmap(const void *, int);
> -
> -static void free_work(struct work_struct *w)
> -{
> -	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
> -	struct llist_node *t, *llnode;
> -
> -	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
> -		__vunmap((void *)llnode, 1);
> -}
> -
>  /*** Page table manipulation functions ***/
>  static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
>  			phys_addr_t phys_addr, pgprot_t prot,
> @@ -2449,48 +2438,6 @@ static void vmap_init_free_space(void)
>  	}
>  }
>  
> -void __init vmalloc_init(void)
> -{
> -	struct vmap_area *va;
> -	struct vm_struct *tmp;
> -	int i;
> -
> -	/*
> -	 * Create the cache for vmap_area objects.
> -	 */
> -	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
> -
> -	for_each_possible_cpu(i) {
> -		struct vmap_block_queue *vbq;
> -		struct vfree_deferred *p;
> -
> -		vbq = &per_cpu(vmap_block_queue, i);
> -		spin_lock_init(&vbq->lock);
> -		INIT_LIST_HEAD(&vbq->free);
> -		p = &per_cpu(vfree_deferred, i);
> -		init_llist_head(&p->list);
> -		INIT_WORK(&p->wq, free_work);
> -	}
> -
> -	/* Import existing vmlist entries. */
> -	for (tmp = vmlist; tmp; tmp = tmp->next) {
> -		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
> -		if (WARN_ON_ONCE(!va))
> -			continue;
> -
> -		va->va_start = (unsigned long)tmp->addr;
> -		va->va_end = va->va_start + tmp->size;
> -		va->vm = tmp;
> -		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
> -	}
> -
> -	/*
> -	 * Now we can initialize a free vmap space.
> -	 */
> -	vmap_init_free_space();
> -	vmap_initialized = true;
> -}
> -
>  static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
>  	struct vmap_area *va, unsigned long flags, const void *caller)
>  {
> @@ -2769,6 +2716,15 @@ static void __vunmap(const void *addr, int deallocate_pages)
>  	kfree(area);
>  }
>  
> +static void delayed_vfree_work(struct work_struct *w)
> +{
> +	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
> +	struct llist_node *t, *llnode;
> +
> +	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
> +		__vunmap((void *)llnode, 1);
> +}
> +
>  /**
>   * vfree_atomic - release memory allocated by vmalloc()
>   * @addr:	  memory base address
> @@ -4315,3 +4271,46 @@ static int __init proc_vmalloc_init(void)
>  module_init(proc_vmalloc_init);
>  
>  #endif
> +
> +void __init vmalloc_init(void)
> +{
> +	struct vmap_area *va;
> +	struct vm_struct *tmp;
> +	int i;
> +
> +	/*
> +	 * Create the cache for vmap_area objects.
> +	 */
> +	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
> +
> +	for_each_possible_cpu(i) {
> +		struct vmap_block_queue *vbq;
> +		struct vfree_deferred *p;
> +
> +		vbq = &per_cpu(vmap_block_queue, i);
> +		spin_lock_init(&vbq->lock);
> +		INIT_LIST_HEAD(&vbq->free);
> +		p = &per_cpu(vfree_deferred, i);
> +		init_llist_head(&p->list);
> +		INIT_WORK(&p->wq, delayed_vfree_work);
> +	}
> +
> +	/* Import existing vmlist entries. */
> +	for (tmp = vmlist; tmp; tmp = tmp->next) {
> +		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
> +		if (WARN_ON_ONCE(!va))
> +			continue;
> +
> +		va->va_start = (unsigned long)tmp->addr;
> +		va->va_end = va->va_start + tmp->size;
> +		va->vm = tmp;
> +		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
> +	}
> +
> +	/*
> +	 * Now we can initialize a free vmap space.
> +	 */
> +	vmap_init_free_space();
> +	vmap_initialized = true;
> +}
> +
> -- 
> 2.39.0
> 
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fafb6227f4428f..daeb28b54663d5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -89,17 +89,6 @@  struct vfree_deferred {
 };
 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
 
-static void __vunmap(const void *, int);
-
-static void free_work(struct work_struct *w)
-{
-	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
-	struct llist_node *t, *llnode;
-
-	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
-		__vunmap((void *)llnode, 1);
-}
-
 /*** Page table manipulation functions ***/
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 			phys_addr_t phys_addr, pgprot_t prot,
@@ -2449,48 +2438,6 @@  static void vmap_init_free_space(void)
 	}
 }
 
-void __init vmalloc_init(void)
-{
-	struct vmap_area *va;
-	struct vm_struct *tmp;
-	int i;
-
-	/*
-	 * Create the cache for vmap_area objects.
-	 */
-	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
-
-	for_each_possible_cpu(i) {
-		struct vmap_block_queue *vbq;
-		struct vfree_deferred *p;
-
-		vbq = &per_cpu(vmap_block_queue, i);
-		spin_lock_init(&vbq->lock);
-		INIT_LIST_HEAD(&vbq->free);
-		p = &per_cpu(vfree_deferred, i);
-		init_llist_head(&p->list);
-		INIT_WORK(&p->wq, free_work);
-	}
-
-	/* Import existing vmlist entries. */
-	for (tmp = vmlist; tmp; tmp = tmp->next) {
-		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
-		if (WARN_ON_ONCE(!va))
-			continue;
-
-		va->va_start = (unsigned long)tmp->addr;
-		va->va_end = va->va_start + tmp->size;
-		va->vm = tmp;
-		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
-	}
-
-	/*
-	 * Now we can initialize a free vmap space.
-	 */
-	vmap_init_free_space();
-	vmap_initialized = true;
-}
-
 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
 	struct vmap_area *va, unsigned long flags, const void *caller)
 {
@@ -2769,6 +2716,15 @@  static void __vunmap(const void *addr, int deallocate_pages)
 	kfree(area);
 }
 
+static void delayed_vfree_work(struct work_struct *w)
+{
+	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+	struct llist_node *t, *llnode;
+
+	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
+		__vunmap((void *)llnode, 1);
+}
+
 /**
  * vfree_atomic - release memory allocated by vmalloc()
  * @addr:	  memory base address
@@ -4315,3 +4271,46 @@  static int __init proc_vmalloc_init(void)
 module_init(proc_vmalloc_init);
 
 #endif
+
+void __init vmalloc_init(void)
+{
+	struct vmap_area *va;
+	struct vm_struct *tmp;
+	int i;
+
+	/*
+	 * Create the cache for vmap_area objects.
+	 */
+	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
+
+	for_each_possible_cpu(i) {
+		struct vmap_block_queue *vbq;
+		struct vfree_deferred *p;
+
+		vbq = &per_cpu(vmap_block_queue, i);
+		spin_lock_init(&vbq->lock);
+		INIT_LIST_HEAD(&vbq->free);
+		p = &per_cpu(vfree_deferred, i);
+		init_llist_head(&p->list);
+		INIT_WORK(&p->wq, delayed_vfree_work);
+	}
+
+	/* Import existing vmlist entries. */
+	for (tmp = vmlist; tmp; tmp = tmp->next) {
+		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+		if (WARN_ON_ONCE(!va))
+			continue;
+
+		va->va_start = (unsigned long)tmp->addr;
+		va->va_end = va->va_start + tmp->size;
+		va->vm = tmp;
+		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+	}
+
+	/*
+	 * Now we can initialize a free vmap space.
+	 */
+	vmap_init_free_space();
+	vmap_initialized = true;
+}
+