diff mbox series

[03/10] mm: remove __vfree_deferred

Message ID 20230119100226.789506-4-hch@lst.de (mailing list archive)
State New
Headers show
Series [01/10] vmalloc: reject vmap with VM_FLUSH_RESET_PERMS | expand

Commit Message

Christoph Hellwig Jan. 19, 2023, 10:02 a.m. UTC
Fold __vfree_deferred into vfree_atomic, and call vfree_atomic early on
from vfree if called from interrupt context so that the extra low-level
helper can be avoided.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/vmalloc.c | 43 +++++++++++++++++--------------------------
 1 file changed, 17 insertions(+), 26 deletions(-)

Comments

Uladzislau Rezki Jan. 19, 2023, 6:47 p.m. UTC | #1
On Thu, Jan 19, 2023 at 11:02:19AM +0100, Christoph Hellwig wrote:
> Fold __vfree_deferred into vfree_atomic, and call vfree_atomic early on
> from vfree if called from interrupt context so that the extra low-level
> helper can be avoided.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  mm/vmalloc.c | 43 +++++++++++++++++--------------------------
>  1 file changed, 17 insertions(+), 26 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index b989828b45109a..fafb6227f4428f 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2769,20 +2769,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
>  	kfree(area);
>  }
>  
> -static inline void __vfree_deferred(const void *addr)
> -{
> -	/*
> -	 * Use raw_cpu_ptr() because this can be called from preemptible
> -	 * context. Preemption is absolutely fine here, because the llist_add()
> -	 * implementation is lockless, so it works even if we are adding to
> -	 * another cpu's list. schedule_work() should be fine with this too.
> -	 */
> -	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
> -
> -	if (llist_add((struct llist_node *)addr, &p->list))
> -		schedule_work(&p->wq);
> -}
> -
>  /**
>   * vfree_atomic - release memory allocated by vmalloc()
>   * @addr:	  memory base address
> @@ -2792,13 +2778,19 @@ static inline void __vfree_deferred(const void *addr)
>   */
>  void vfree_atomic(const void *addr)
>  {
> -	BUG_ON(in_nmi());
> +	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
>  
> +	BUG_ON(in_nmi());
>  	kmemleak_free(addr);
>  
> -	if (!addr)
> -		return;
> -	__vfree_deferred(addr);
> +	/*
> +	 * Use raw_cpu_ptr() because this can be called from preemptible
> +	 * context. Preemption is absolutely fine here, because the llist_add()
> +	 * implementation is lockless, so it works even if we are adding to
> +	 * another cpu's list. schedule_work() should be fine with this too.
> +	 */
> +	if (addr && llist_add((struct llist_node *)addr, &p->list))
> +		schedule_work(&p->wq);
>  }
>  
>  /**
> @@ -2820,17 +2812,16 @@ void vfree_atomic(const void *addr)
>   */
>  void vfree(const void *addr)
>  {
> -	BUG_ON(in_nmi());
> +	if (unlikely(in_interrupt())) {
> +		vfree_atomic(addr);
> +		return;
> +	}
>  
> +	BUG_ON(in_nmi());
>  	kmemleak_free(addr);
> +	might_sleep();
>  
> -	might_sleep_if(!in_interrupt());
> -
> -	if (!addr)
> -		return;
> -	if (unlikely(in_interrupt()))
> -		__vfree_deferred(addr);
> -	else
> +	if (addr)
>  		__vunmap(addr, 1);
>  }
>  EXPORT_SYMBOL(vfree);
> -- 
> 2.39.0
> 
Such folding makes sense to me.

Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b989828b45109a..fafb6227f4428f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2769,20 +2769,6 @@  static void __vunmap(const void *addr, int deallocate_pages)
 	kfree(area);
 }
 
-static inline void __vfree_deferred(const void *addr)
-{
-	/*
-	 * Use raw_cpu_ptr() because this can be called from preemptible
-	 * context. Preemption is absolutely fine here, because the llist_add()
-	 * implementation is lockless, so it works even if we are adding to
-	 * another cpu's list. schedule_work() should be fine with this too.
-	 */
-	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
-
-	if (llist_add((struct llist_node *)addr, &p->list))
-		schedule_work(&p->wq);
-}
-
 /**
  * vfree_atomic - release memory allocated by vmalloc()
  * @addr:	  memory base address
@@ -2792,13 +2778,19 @@  static inline void __vfree_deferred(const void *addr)
  */
 void vfree_atomic(const void *addr)
 {
-	BUG_ON(in_nmi());
+	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
 
+	BUG_ON(in_nmi());
 	kmemleak_free(addr);
 
-	if (!addr)
-		return;
-	__vfree_deferred(addr);
+	/*
+	 * Use raw_cpu_ptr() because this can be called from preemptible
+	 * context. Preemption is absolutely fine here, because the llist_add()
+	 * implementation is lockless, so it works even if we are adding to
+	 * another cpu's list. schedule_work() should be fine with this too.
+	 */
+	if (addr && llist_add((struct llist_node *)addr, &p->list))
+		schedule_work(&p->wq);
 }
 
 /**
@@ -2820,17 +2812,16 @@  void vfree_atomic(const void *addr)
  */
 void vfree(const void *addr)
 {
-	BUG_ON(in_nmi());
+	if (unlikely(in_interrupt())) {
+		vfree_atomic(addr);
+		return;
+	}
 
+	BUG_ON(in_nmi());
 	kmemleak_free(addr);
+	might_sleep();
 
-	might_sleep_if(!in_interrupt());
-
-	if (!addr)
-		return;
-	if (unlikely(in_interrupt()))
-		__vfree_deferred(addr);
-	else
+	if (addr)
 		__vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);