diff mbox series

[09/10] mm: split __vunmap

Message ID 20230119100226.789506-10-hch@lst.de (mailing list archive)
State New
Headers show
Series [01/10] vmalloc: reject vmap with VM_FLUSH_RESET_PERMS | expand

Commit Message

Christoph Hellwig Jan. 19, 2023, 10:02 a.m. UTC
vunmap only needs to find and free the vmap_area and vm_strut, so open
code that there and merge the rest of the code into vfree.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/vmalloc.c | 85 ++++++++++++++++++++++++++--------------------------
 1 file changed, 42 insertions(+), 43 deletions(-)

Comments

Uladzislau Rezki Jan. 19, 2023, 6:50 p.m. UTC | #1
On Thu, Jan 19, 2023 at 11:02:25AM +0100, Christoph Hellwig wrote:
> vunmap only needs to find and free the vmap_area and vm_strut, so open
> code that there and merge the rest of the code into vfree.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  mm/vmalloc.c | 85 ++++++++++++++++++++++++++--------------------------
>  1 file changed, 42 insertions(+), 43 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 4cb189bdd51499..791d906d7e407c 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2666,45 +2666,6 @@ static void va_remove_mappings(struct vm_struct *area, int deallocate_pages)
>  	set_area_direct_map(area, set_direct_map_default_noflush);
>  }
>  
> -static void __vunmap(const void *addr, int deallocate_pages)
> -{
> -	struct vm_struct *area;
> -
> -	if (!addr)
> -		return;
> -
> -	area = remove_vm_area(addr);
> -	if (unlikely(!area)) {
> -		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
> -				addr);
> -		return;
> -	}
> -
> -	va_remove_mappings(area, deallocate_pages);
> -
> -	if (deallocate_pages) {
> -		int i;
> -
> -		for (i = 0; i < area->nr_pages; i++) {
> -			struct page *page = area->pages[i];
> -
> -			BUG_ON(!page);
> -			mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
> -			/*
> -			 * High-order allocs for huge vmallocs are split, so
> -			 * can be freed as an array of order-0 allocations
> -			 */
> -			__free_pages(page, 0);
> -			cond_resched();
> -		}
> -		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
> -
> -		kvfree(area->pages);
> -	}
> -
> -	kfree(area);
> -}
> -
>  static void delayed_vfree_work(struct work_struct *w)
>  {
>  	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
> @@ -2757,6 +2718,9 @@ void vfree_atomic(const void *addr)
>   */
>  void vfree(const void *addr)
>  {
> +	struct vm_struct *vm;
> +	int i;
> +
>  	if (unlikely(in_interrupt())) {
>  		vfree_atomic(addr);
>  		return;
> @@ -2766,8 +2730,32 @@ void vfree(const void *addr)
>  	kmemleak_free(addr);
>  	might_sleep();
>  
> -	if (addr)
> -		__vunmap(addr, 1);
> +	if (!addr)
> +		return;
> +
> +	vm = remove_vm_area(addr);
> +	if (unlikely(!vm)) {
> +		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
> +				addr);
> +		return;
> +	}
> +
> +	va_remove_mappings(vm, true);
> +	for (i = 0; i < vm->nr_pages; i++) {
> +		struct page *page = vm->pages[i];
> +
> +		BUG_ON(!page);
> +		mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
> +		/*
> +		 * High-order allocs for huge vmallocs are split, so
> +		 * can be freed as an array of order-0 allocations
> +		 */
> +		__free_pages(page, 0);
> +		cond_resched();
> +	}
> +	atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
> +	kvfree(vm->pages);
> +	kfree(vm);
>  }
>  EXPORT_SYMBOL(vfree);
>  
> @@ -2782,10 +2770,21 @@ EXPORT_SYMBOL(vfree);
>   */
>  void vunmap(const void *addr)
>  {
> +	struct vm_struct *vm;
> +
>  	BUG_ON(in_interrupt());
>  	might_sleep();
> -	if (addr)
> -		__vunmap(addr, 0);
> +
> +	if (!addr)
> +		return;
> +	vm = remove_vm_area(addr);
> +	if (unlikely(!vm)) {
> +		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
> +				addr);
> +		return;
> +	}
> +	WARN_ON_ONCE(vm->flags & VM_FLUSH_RESET_PERMS);
> +	kfree(vm);
>  }
>  EXPORT_SYMBOL(vunmap);
>  
> -- 
> 2.39.0
> 
After this patch same check in the end of the vunmap() becomes odd
because we fail a vmap() call on its entry if VM_FLUSH_RESET_PERMS
flag is set. See the [1] patch in this series.

Is there any reason for such duplication?

--
Uladzislau Rezki
Christoph Hellwig Jan. 20, 2023, 7:42 a.m. UTC | #2
On Thu, Jan 19, 2023 at 07:50:09PM +0100, Uladzislau Rezki wrote:
> After this patch same check in the end of the vunmap() becomes odd
> because we fail a vmap() call on its entry if VM_FLUSH_RESET_PERMS
> flag is set. See the [1] patch in this series.
> 
> Is there any reason for such duplication?

Mostly just documentation for me to explain why no flushing is needed.
But I can drop it.
Uladzislau Rezki Jan. 20, 2023, 11:32 a.m. UTC | #3
On Fri, Jan 20, 2023 at 08:42:17AM +0100, Christoph Hellwig wrote:
> On Thu, Jan 19, 2023 at 07:50:09PM +0100, Uladzislau Rezki wrote:
> > After this patch same check in the end of the vunmap() becomes odd
> > because we fail a vmap() call on its entry if VM_FLUSH_RESET_PERMS
> > flag is set. See the [1] patch in this series.
> > 
> > Is there any reason for such duplication?
> 
> Mostly just documentation for me to explain why no flushing is needed.
> But I can drop it.
>
Thanks.

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4cb189bdd51499..791d906d7e407c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2666,45 +2666,6 @@  static void va_remove_mappings(struct vm_struct *area, int deallocate_pages)
 	set_area_direct_map(area, set_direct_map_default_noflush);
 }
 
-static void __vunmap(const void *addr, int deallocate_pages)
-{
-	struct vm_struct *area;
-
-	if (!addr)
-		return;
-
-	area = remove_vm_area(addr);
-	if (unlikely(!area)) {
-		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
-				addr);
-		return;
-	}
-
-	va_remove_mappings(area, deallocate_pages);
-
-	if (deallocate_pages) {
-		int i;
-
-		for (i = 0; i < area->nr_pages; i++) {
-			struct page *page = area->pages[i];
-
-			BUG_ON(!page);
-			mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
-			/*
-			 * High-order allocs for huge vmallocs are split, so
-			 * can be freed as an array of order-0 allocations
-			 */
-			__free_pages(page, 0);
-			cond_resched();
-		}
-		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
-
-		kvfree(area->pages);
-	}
-
-	kfree(area);
-}
-
 static void delayed_vfree_work(struct work_struct *w)
 {
 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
@@ -2757,6 +2718,9 @@  void vfree_atomic(const void *addr)
  */
 void vfree(const void *addr)
 {
+	struct vm_struct *vm;
+	int i;
+
 	if (unlikely(in_interrupt())) {
 		vfree_atomic(addr);
 		return;
@@ -2766,8 +2730,32 @@  void vfree(const void *addr)
 	kmemleak_free(addr);
 	might_sleep();
 
-	if (addr)
-		__vunmap(addr, 1);
+	if (!addr)
+		return;
+
+	vm = remove_vm_area(addr);
+	if (unlikely(!vm)) {
+		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
+				addr);
+		return;
+	}
+
+	va_remove_mappings(vm, true);
+	for (i = 0; i < vm->nr_pages; i++) {
+		struct page *page = vm->pages[i];
+
+		BUG_ON(!page);
+		mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
+		/*
+		 * High-order allocs for huge vmallocs are split, so
+		 * can be freed as an array of order-0 allocations
+		 */
+		__free_pages(page, 0);
+		cond_resched();
+	}
+	atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
+	kvfree(vm->pages);
+	kfree(vm);
 }
 EXPORT_SYMBOL(vfree);
 
@@ -2782,10 +2770,21 @@  EXPORT_SYMBOL(vfree);
  */
 void vunmap(const void *addr)
 {
+	struct vm_struct *vm;
+
 	BUG_ON(in_interrupt());
 	might_sleep();
-	if (addr)
-		__vunmap(addr, 0);
+
+	if (!addr)
+		return;
+	vm = remove_vm_area(addr);
+	if (unlikely(!vm)) {
+		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
+				addr);
+		return;
+	}
+	WARN_ON_ONCE(vm->flags & VM_FLUSH_RESET_PERMS);
+	kfree(vm);
 }
 EXPORT_SYMBOL(vunmap);