diff mbox series

[3/5] mm/vmalloc.c: optimize code in decay_va_pool_node() a little bit

Message ID 20250415023952.27850-4-bhe@redhat.com (mailing list archive)
State New
Headers show
Series mm/vmalloc.c: code cleanup and improvements | expand

Commit Message

Baoquan He April 15, 2025, 2:39 a.m. UTC
When purge lazily freed vmap areas, VA stored in vn->pool[] will also be
taken away into free vmap tree partially or completely accordingly, that
is done in decay_va_pool_node(). When doing that, for each pool of node,
the whole list is detached from the pool for handling. At this time,
that pool is empty. It's not necessary to update the pool size each time
when one VA is removed and addded into free vmap tree.

Here change code to update the pool size when attaching the pool back.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/vmalloc.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

Comments

Shivank Garg April 15, 2025, 10:29 a.m. UTC | #1
On 4/15/2025 8:09 AM, Baoquan He wrote:
> When purge lazily freed vmap areas, VA stored in vn->pool[] will also be
> taken away into free vmap tree partially or completely accordingly, that
> is done in decay_va_pool_node(). When doing that, for each pool of node,
> the whole list is detached from the pool for handling. At this time,
> that pool is empty. It's not necessary to update the pool size each time
> when one VA is removed and addded into free vmap tree.
> 
> Here change code to update the pool size when attaching the pool back.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/vmalloc.c | 23 +++++++++++------------
>  1 file changed, 11 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 488d69b56765..bf735c890878 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2150,7 +2150,7 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>  	LIST_HEAD(decay_list);
>  	struct rb_root decay_root = RB_ROOT;
>  	struct vmap_area *va, *nva;
> -	unsigned long n_decay;
> +	unsigned long n_decay, len;
>  	int i;
>  
>  	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
> @@ -2164,22 +2164,20 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>  		list_replace_init(&vn->pool[i].head, &tmp_list);
>  		spin_unlock(&vn->pool_lock);
>  
> -		if (full_decay)
> -			WRITE_ONCE(vn->pool[i].len, 0);
> +		len = n_decay = vn->pool[i].len;
> +		WRITE_ONCE(vn->pool[i].len, 0);
>  
>  		/* Decay a pool by ~25% out of left objects. */
> -		n_decay = vn->pool[i].len >> 2;
> +		if (!full_decay)
> +			n_decay >>= 2;
> +		len -= n_decay;
>  
>  		list_for_each_entry_safe(va, nva, &tmp_list, list) {
> +			if (!n_decay)
> +				break;
>  			list_del_init(&va->list);
>  			merge_or_add_vmap_area(va, &decay_root, &decay_list);
> -
> -			if (!full_decay) {
> -				WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
> -
> -				if (!--n_decay)
> -					break;
> -			}
> +			n_decay--;
>  		}
>  
>  		/*
> @@ -2188,9 +2186,10 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>  		 * can populate the pool therefore a simple list replace
>  		 * operation takes place here.
>  		 */
> -		if (!full_decay && !list_empty(&tmp_list)) {
> +		if (!list_empty(&tmp_list)) {
>  			spin_lock(&vn->pool_lock);
>  			list_replace_init(&tmp_list, &vn->pool[i].head);
> +			vn->pool[i].len = len;

Current logic uses WRITE_ONCE() to update vn->pool[i].len.
Could this lead to consistency issues?

>  			spin_unlock(&vn->pool_lock);
>  		}
>  	}
Baoquan He April 15, 2025, 2:05 p.m. UTC | #2
On 04/15/25 at 03:59pm, Shivank Garg wrote:
> On 4/15/2025 8:09 AM, Baoquan He wrote:
> > When purge lazily freed vmap areas, VA stored in vn->pool[] will also be
> > taken away into free vmap tree partially or completely accordingly, that
> > is done in decay_va_pool_node(). When doing that, for each pool of node,
> > the whole list is detached from the pool for handling. At this time,
> > that pool is empty. It's not necessary to update the pool size each time
> > when one VA is removed and addded into free vmap tree.
> > 
> > Here change code to update the pool size when attaching the pool back.
> > 
> > Signed-off-by: Baoquan He <bhe@redhat.com>
> > ---
> >  mm/vmalloc.c | 23 +++++++++++------------
> >  1 file changed, 11 insertions(+), 12 deletions(-)
> > 
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 488d69b56765..bf735c890878 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -2150,7 +2150,7 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
> >  	LIST_HEAD(decay_list);
> >  	struct rb_root decay_root = RB_ROOT;
> >  	struct vmap_area *va, *nva;
> > -	unsigned long n_decay;
> > +	unsigned long n_decay, len;
> >  	int i;
> >  
> >  	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
> > @@ -2164,22 +2164,20 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
> >  		list_replace_init(&vn->pool[i].head, &tmp_list);
> >  		spin_unlock(&vn->pool_lock);
> >  
> > -		if (full_decay)
> > -			WRITE_ONCE(vn->pool[i].len, 0);
> > +		len = n_decay = vn->pool[i].len;
> > +		WRITE_ONCE(vn->pool[i].len, 0);
> >  
> >  		/* Decay a pool by ~25% out of left objects. */
> > -		n_decay = vn->pool[i].len >> 2;
> > +		if (!full_decay)
> > +			n_decay >>= 2;
> > +		len -= n_decay;
> >  
> >  		list_for_each_entry_safe(va, nva, &tmp_list, list) {
> > +			if (!n_decay)
> > +				break;
> >  			list_del_init(&va->list);
> >  			merge_or_add_vmap_area(va, &decay_root, &decay_list);
> > -
> > -			if (!full_decay) {
> > -				WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
> > -
> > -				if (!--n_decay)
> > -					break;
> > -			}
> > +			n_decay--;
> >  		}
> >  
> >  		/*
> > @@ -2188,9 +2186,10 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
> >  		 * can populate the pool therefore a simple list replace
> >  		 * operation takes place here.
> >  		 */
> > -		if (!full_decay && !list_empty(&tmp_list)) {
> > +		if (!list_empty(&tmp_list)) {
> >  			spin_lock(&vn->pool_lock);
> >  			list_replace_init(&tmp_list, &vn->pool[i].head);
> > +			vn->pool[i].len = len;
> 
> Current logic uses WRITE_ONCE() to update vn->pool[i].len.
> Could this lead to consistency issues?

Seems no necessary to use WRITE_ONCE(). I can change back to use
WRITE_ONCE() just in case. Currently, it's only updated in
node_alloc(), decay_va_pool_node(), purge_vmap_node(). And the latter
two are inside vmap_purge_lock area.

> 
> >  			spin_unlock(&vn->pool_lock);
> >  		}
> >  	}
>
Shivank Garg April 15, 2025, 7:02 p.m. UTC | #3
On 4/15/2025 7:35 PM, Baoquan He wrote:
> On 04/15/25 at 03:59pm, Shivank Garg wrote:
>> On 4/15/2025 8:09 AM, Baoquan He wrote:
>>> When purge lazily freed vmap areas, VA stored in vn->pool[] will also be
>>> taken away into free vmap tree partially or completely accordingly, that
>>> is done in decay_va_pool_node(). When doing that, for each pool of node,
>>> the whole list is detached from the pool for handling. At this time,
>>> that pool is empty. It's not necessary to update the pool size each time
>>> when one VA is removed and addded into free vmap tree.
>>>
>>> Here change code to update the pool size when attaching the pool back.
>>>
>>> Signed-off-by: Baoquan He <bhe@redhat.com>
>>> ---
>>>  mm/vmalloc.c | 23 +++++++++++------------
>>>  1 file changed, 11 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>>> index 488d69b56765..bf735c890878 100644
>>> --- a/mm/vmalloc.c
>>> +++ b/mm/vmalloc.c
>>> @@ -2150,7 +2150,7 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>>>  	LIST_HEAD(decay_list);
>>>  	struct rb_root decay_root = RB_ROOT;
>>>  	struct vmap_area *va, *nva;
>>> -	unsigned long n_decay;
>>> +	unsigned long n_decay, len;
>>>  	int i;
>>>  
>>>  	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
>>> @@ -2164,22 +2164,20 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>>>  		list_replace_init(&vn->pool[i].head, &tmp_list);
>>>  		spin_unlock(&vn->pool_lock);
>>>  
>>> -		if (full_decay)
>>> -			WRITE_ONCE(vn->pool[i].len, 0);
>>> +		len = n_decay = vn->pool[i].len;
>>> +		WRITE_ONCE(vn->pool[i].len, 0);
>>>  
>>>  		/* Decay a pool by ~25% out of left objects. */
>>> -		n_decay = vn->pool[i].len >> 2;
>>> +		if (!full_decay)
>>> +			n_decay >>= 2;
>>> +		len -= n_decay;
>>>  
>>>  		list_for_each_entry_safe(va, nva, &tmp_list, list) {
>>> +			if (!n_decay)
>>> +				break;
>>>  			list_del_init(&va->list);
>>>  			merge_or_add_vmap_area(va, &decay_root, &decay_list);
>>> -
>>> -			if (!full_decay) {
>>> -				WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
>>> -
>>> -				if (!--n_decay)
>>> -					break;
>>> -			}
>>> +			n_decay--;
>>>  		}
>>>  
>>>  		/*
>>> @@ -2188,9 +2186,10 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>>>  		 * can populate the pool therefore a simple list replace
>>>  		 * operation takes place here.
>>>  		 */
>>> -		if (!full_decay && !list_empty(&tmp_list)) {
>>> +		if (!list_empty(&tmp_list)) {
>>>  			spin_lock(&vn->pool_lock);
>>>  			list_replace_init(&tmp_list, &vn->pool[i].head);
>>> +			vn->pool[i].len = len;
>>
>> Current logic uses WRITE_ONCE() to update vn->pool[i].len.
>> Could this lead to consistency issues?
> 
> Seems no necessary to use WRITE_ONCE(). I can change back to use
> WRITE_ONCE() just in case. Currently, it's only updated in
> node_alloc(), decay_va_pool_node(), purge_vmap_node(). And the latter
> two are inside vmap_purge_lock area.
> 

Okay.

Reviewed-by: Shivank Garg <shivankg@amd.com>
Tested-by: Shivank Garg <shivankg@amd.com>

Best,
Shivank

>>
>>>  			spin_unlock(&vn->pool_lock);
>>>  		}
>>>  	}
>>
>
Uladzislau Rezki April 16, 2025, 1:50 p.m. UTC | #4
On Tue, Apr 15, 2025 at 10:39:50AM +0800, Baoquan He wrote:
> When purge lazily freed vmap areas, VA stored in vn->pool[] will also be
> taken away into free vmap tree partially or completely accordingly, that
> is done in decay_va_pool_node(). When doing that, for each pool of node,
> the whole list is detached from the pool for handling. At this time,
> that pool is empty. It's not necessary to update the pool size each time
> when one VA is removed and addded into free vmap tree.
> 
> Here change code to update the pool size when attaching the pool back.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/vmalloc.c | 23 +++++++++++------------
>  1 file changed, 11 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 488d69b56765..bf735c890878 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2150,7 +2150,7 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>  	LIST_HEAD(decay_list);
>  	struct rb_root decay_root = RB_ROOT;
>  	struct vmap_area *va, *nva;
> -	unsigned long n_decay;
> +	unsigned long n_decay, len;
>  	int i;
>  
>  	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
> @@ -2164,22 +2164,20 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>  		list_replace_init(&vn->pool[i].head, &tmp_list);
>  		spin_unlock(&vn->pool_lock);
>  
> -		if (full_decay)
> -			WRITE_ONCE(vn->pool[i].len, 0);
> +		len = n_decay = vn->pool[i].len;
> +		WRITE_ONCE(vn->pool[i].len, 0);
>  
>  		/* Decay a pool by ~25% out of left objects. */
> -		n_decay = vn->pool[i].len >> 2;
> +		if (!full_decay)
> +			n_decay >>= 2;
> +		len -= n_decay;
>  
>  		list_for_each_entry_safe(va, nva, &tmp_list, list) {
> +			if (!n_decay)
> +				break;
>  			list_del_init(&va->list);
>  			merge_or_add_vmap_area(va, &decay_root, &decay_list);
> -
> -			if (!full_decay) {
> -				WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
> -
> -				if (!--n_decay)
> -					break;
> -			}
> +			n_decay--;
>  		}
>  
>  		/*
> @@ -2188,9 +2186,10 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
>  		 * can populate the pool therefore a simple list replace
>  		 * operation takes place here.
>  		 */
> -		if (!full_decay && !list_empty(&tmp_list)) {
> +		if (!list_empty(&tmp_list)) {
>  			spin_lock(&vn->pool_lock);
>  			list_replace_init(&tmp_list, &vn->pool[i].head);
> +			vn->pool[i].len = len;
>  			spin_unlock(&vn->pool_lock);
>  		}
>  	}
> -- 
> 2.41.0
> 
Which Linux version this patch is based on? I can not apply it.

Small nits:

<snip>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c909b8fea6eb..0ae53c997219 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2150,7 +2150,7 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
 	LIST_HEAD(decay_list);
 	struct rb_root decay_root = RB_ROOT;
 	struct vmap_area *va, *nva;
-	unsigned long n_decay, len;
+	unsigned long n_decay, pool_len;
 	int i;
 
 	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
@@ -2164,21 +2164,20 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
 		list_replace_init(&vn->pool[i].head, &tmp_list);
 		spin_unlock(&vn->pool_lock);
 
-		len = n_decay = vn->pool[i].len;
+		pool_len = n_decay = vn->pool[i].len;
 		WRITE_ONCE(vn->pool[i].len, 0);
 
 		/* Decay a pool by ~25% out of left objects. */
 		if (!full_decay)
 			n_decay >>= 2;
-		len -= n_decay;
+		pool_len -= n_decay;
 
 		list_for_each_entry_safe(va, nva, &tmp_list, list) {
-			if (!n_decay)
+			if (!n_decay--)
 				break;
 
 			list_del_init(&va->list);
 			merge_or_add_vmap_area(va, &decay_root, &decay_list);
-			n_decay--;
 		}
 
 		/*
@@ -2190,7 +2189,7 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
 		if (!list_empty(&tmp_list)) {
 			spin_lock(&vn->pool_lock);
 			list_replace_init(&tmp_list, &vn->pool[i].head);
-			vn->pool[i].len = len;
+			vn->pool[i].len = pool_len;
 			spin_unlock(&vn->pool_lock);
 		}
 	}
<snip>

on top of this?

a) decay in "if" statement, no need extra line;
b) rename len to something obvious - pool_len.

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 488d69b56765..bf735c890878 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2150,7 +2150,7 @@  decay_va_pool_node(struct vmap_node *vn, bool full_decay)
 	LIST_HEAD(decay_list);
 	struct rb_root decay_root = RB_ROOT;
 	struct vmap_area *va, *nva;
-	unsigned long n_decay;
+	unsigned long n_decay, len;
 	int i;
 
 	for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
@@ -2164,22 +2164,20 @@  decay_va_pool_node(struct vmap_node *vn, bool full_decay)
 		list_replace_init(&vn->pool[i].head, &tmp_list);
 		spin_unlock(&vn->pool_lock);
 
-		if (full_decay)
-			WRITE_ONCE(vn->pool[i].len, 0);
+		len = n_decay = vn->pool[i].len;
+		WRITE_ONCE(vn->pool[i].len, 0);
 
 		/* Decay a pool by ~25% out of left objects. */
-		n_decay = vn->pool[i].len >> 2;
+		if (!full_decay)
+			n_decay >>= 2;
+		len -= n_decay;
 
 		list_for_each_entry_safe(va, nva, &tmp_list, list) {
+			if (!n_decay)
+				break;
 			list_del_init(&va->list);
 			merge_or_add_vmap_area(va, &decay_root, &decay_list);
-
-			if (!full_decay) {
-				WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
-
-				if (!--n_decay)
-					break;
-			}
+			n_decay--;
 		}
 
 		/*
@@ -2188,9 +2186,10 @@  decay_va_pool_node(struct vmap_node *vn, bool full_decay)
 		 * can populate the pool therefore a simple list replace
 		 * operation takes place here.
 		 */
-		if (!full_decay && !list_empty(&tmp_list)) {
+		if (!list_empty(&tmp_list)) {
 			spin_lock(&vn->pool_lock);
 			list_replace_init(&tmp_list, &vn->pool[i].head);
+			vn->pool[i].len = len;
 			spin_unlock(&vn->pool_lock);
 		}
 	}