diff mbox series

[2/9] mm: swap: factor out the actual swap entry freeing logic to new helper

Message ID 20250313210515.9920-3-shikemeng@huaweicloud.com (mailing list archive)
State New
Headers show
Series Minor cleanups and improvements to swap freeing code | expand

Commit Message

Kemeng Shi March 13, 2025, 9:05 p.m. UTC
Factor out the actual swap entry freeing logic to new helper
__swap_entries_free().
This allow us to futher simplify other swap entry freeing code by
leveraging __swap_entries_free() helper function.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/swapfile.c | 30 ++++++++++++++++++++----------
 1 file changed, 20 insertions(+), 10 deletions(-)

Comments

Kairui Song March 13, 2025, 5:42 p.m. UTC | #1
On Thu, Mar 13, 2025 at 8:09 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
>
> Factor out the actual swap entry freeing logic to new helper
> __swap_entries_free().
> This allow us to futher simplify other swap entry freeing code by
> leveraging __swap_entries_free() helper function.
>
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> ---
>  mm/swapfile.c | 30 ++++++++++++++++++++----------
>  1 file changed, 20 insertions(+), 10 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 5a775456e26c..7c886f9dd6f9 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1347,6 +1347,25 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
>         return NULL;
>  }
>
> +static inline void __swap_entries_free(struct swap_info_struct *si,
> +                                      struct swap_cluster_info *ci,
> +                                      swp_entry_t entry, unsigned int nr_pages)
> +{
> +       unsigned long offset = swp_offset(entry);
> +
> +       VM_BUG_ON(cluster_is_empty(ci));
> +       VM_BUG_ON(ci->count < nr_pages);
> +
> +       ci->count -= nr_pages;
> +       mem_cgroup_uncharge_swap(entry, nr_pages);
> +       swap_range_free(si, offset, nr_pages);
> +
> +       if (!ci->count)
> +               free_cluster(si, ci);
> +       else
> +               partial_free_cluster(si, ci);
> +}
> +
>  static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
>                                            unsigned long offset,
>                                            unsigned char usage)
> @@ -1525,22 +1544,13 @@ static void swap_entry_range_free(struct swap_info_struct *si,
>
>         /* It should never free entries across different clusters */
>         VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
> -       VM_BUG_ON(cluster_is_empty(ci));
> -       VM_BUG_ON(ci->count < nr_pages);
>
> -       ci->count -= nr_pages;
>         do {
>                 VM_BUG_ON(*map != SWAP_HAS_CACHE);
>                 *map = 0;
>         } while (++map < map_end);
>
> -       mem_cgroup_uncharge_swap(entry, nr_pages);
> -       swap_range_free(si, offset, nr_pages);
> -
> -       if (!ci->count)
> -               free_cluster(si, ci);
> -       else
> -               partial_free_cluster(si, ci);
> +       __swap_entries_free(si, ci, entry, nr_pages);
>  }
>
>  static void cluster_swap_free_nr(struct swap_info_struct *si,
> --
> 2.30.0
>
>

Hi Kemeng,

This patch is a bit too trivial to be a standalone one, you can fold
it with the later one easily. Also you may want to carry the
VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); check
as well.

But, that is basically just renaming swap_entry_range_free, you may
just remove the HAS_CACHE check as you rename it, that way the changes
should be cleaner.
Kemeng Shi March 14, 2025, 7:32 a.m. UTC | #2
on 3/14/2025 1:42 AM, Kairui Song wrote:
> On Thu, Mar 13, 2025 at 8:09 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
>>
>> Factor out the actual swap entry freeing logic to new helper
>> __swap_entries_free().
>> This allow us to futher simplify other swap entry freeing code by
>> leveraging __swap_entries_free() helper function.
>>
>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>> ---
>>  mm/swapfile.c | 30 ++++++++++++++++++++----------
>>  1 file changed, 20 insertions(+), 10 deletions(-)
>>
>> diff --git a/mm/swapfile.c b/mm/swapfile.c
>> index 5a775456e26c..7c886f9dd6f9 100644
>> --- a/mm/swapfile.c
>> +++ b/mm/swapfile.c
>> @@ -1347,6 +1347,25 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
>>         return NULL;
>>  }
>>
>> +static inline void __swap_entries_free(struct swap_info_struct *si,
>> +                                      struct swap_cluster_info *ci,
>> +                                      swp_entry_t entry, unsigned int nr_pages)
>> +{
>> +       unsigned long offset = swp_offset(entry);
>> +
>> +       VM_BUG_ON(cluster_is_empty(ci));
>> +       VM_BUG_ON(ci->count < nr_pages);
>> +
>> +       ci->count -= nr_pages;
>> +       mem_cgroup_uncharge_swap(entry, nr_pages);
>> +       swap_range_free(si, offset, nr_pages);
>> +
>> +       if (!ci->count)
>> +               free_cluster(si, ci);
>> +       else
>> +               partial_free_cluster(si, ci);
>> +}
>> +
>>  static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
>>                                            unsigned long offset,
>>                                            unsigned char usage)
>> @@ -1525,22 +1544,13 @@ static void swap_entry_range_free(struct swap_info_struct *si,
>>
>>         /* It should never free entries across different clusters */
>>         VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
>> -       VM_BUG_ON(cluster_is_empty(ci));
>> -       VM_BUG_ON(ci->count < nr_pages);
>>
>> -       ci->count -= nr_pages;
>>         do {
>>                 VM_BUG_ON(*map != SWAP_HAS_CACHE);
>>                 *map = 0;
>>         } while (++map < map_end);
>>
>> -       mem_cgroup_uncharge_swap(entry, nr_pages);
>> -       swap_range_free(si, offset, nr_pages);
>> -
>> -       if (!ci->count)
>> -               free_cluster(si, ci);
>> -       else
>> -               partial_free_cluster(si, ci);
>> +       __swap_entries_free(si, ci, entry, nr_pages);
>>  }
>>
>>  static void cluster_swap_free_nr(struct swap_info_struct *si,
>> --
>> 2.30.0
>>
>>
> 
> Hi Kemeng,
Hello Kairui,

Thanks for feedback.
> 
> This patch is a bit too trivial to be a standalone one, you can fold
> it with the later one easily. Also you may want to carry the
> VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); check
> as well.
Sure, I will do this in next version.
> 
> But, that is basically just renaming swap_entry_range_free, you may
> just remove the HAS_CACHE check as you rename it, that way the changes
> should be cleaner.
> 
Sorry, I don't quite follow. Are you suggesting that should fold this
patch to later one which removes the HAS_CACHE check and renmae the
swap_entry_range_free.

Thanks,
Kemeng
Kairui Song March 14, 2025, 7:47 a.m. UTC | #3
On Fri, Mar 14, 2025 at 3:32 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
>
> on 3/14/2025 1:42 AM, Kairui Song wrote:
> > On Thu, Mar 13, 2025 at 8:09 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
> >>
> >> Factor out the actual swap entry freeing logic to new helper
> >> __swap_entries_free().
> >> This allow us to futher simplify other swap entry freeing code by
> >> leveraging __swap_entries_free() helper function.
> >>
> >> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> >> ---
> >>  mm/swapfile.c | 30 ++++++++++++++++++++----------
> >>  1 file changed, 20 insertions(+), 10 deletions(-)
> >>
> >> diff --git a/mm/swapfile.c b/mm/swapfile.c
> >> index 5a775456e26c..7c886f9dd6f9 100644
> >> --- a/mm/swapfile.c
> >> +++ b/mm/swapfile.c
> >> @@ -1347,6 +1347,25 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
> >>         return NULL;
> >>  }
> >>
> >> +static inline void __swap_entries_free(struct swap_info_struct *si,
> >> +                                      struct swap_cluster_info *ci,
> >> +                                      swp_entry_t entry, unsigned int nr_pages)
> >> +{
> >> +       unsigned long offset = swp_offset(entry);
> >> +
> >> +       VM_BUG_ON(cluster_is_empty(ci));
> >> +       VM_BUG_ON(ci->count < nr_pages);
> >> +
> >> +       ci->count -= nr_pages;
> >> +       mem_cgroup_uncharge_swap(entry, nr_pages);
> >> +       swap_range_free(si, offset, nr_pages);
> >> +
> >> +       if (!ci->count)
> >> +               free_cluster(si, ci);
> >> +       else
> >> +               partial_free_cluster(si, ci);
> >> +}
> >> +
> >>  static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
> >>                                            unsigned long offset,
> >>                                            unsigned char usage)
> >> @@ -1525,22 +1544,13 @@ static void swap_entry_range_free(struct swap_info_struct *si,
> >>
> >>         /* It should never free entries across different clusters */
> >>         VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
> >> -       VM_BUG_ON(cluster_is_empty(ci));
> >> -       VM_BUG_ON(ci->count < nr_pages);
> >>
> >> -       ci->count -= nr_pages;
> >>         do {
> >>                 VM_BUG_ON(*map != SWAP_HAS_CACHE);
> >>                 *map = 0;
> >>         } while (++map < map_end);
> >>
> >> -       mem_cgroup_uncharge_swap(entry, nr_pages);
> >> -       swap_range_free(si, offset, nr_pages);
> >> -
> >> -       if (!ci->count)
> >> -               free_cluster(si, ci);
> >> -       else
> >> -               partial_free_cluster(si, ci);
> >> +       __swap_entries_free(si, ci, entry, nr_pages);
> >>  }
> >>
> >>  static void cluster_swap_free_nr(struct swap_info_struct *si,
> >> --
> >> 2.30.0
> >>
> >>
> >
> > Hi Kemeng,
> Hello Kairui,
>
> Thanks for feedback.
> >
> > This patch is a bit too trivial to be a standalone one, you can fold
> > it with the later one easily. Also you may want to carry the
> > VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); check
> > as well.
> Sure, I will do this in next version.
> >
> > But, that is basically just renaming swap_entry_range_free, you may
> > just remove the HAS_CACHE check as you rename it, that way the changes
> > should be cleaner.
> >
> Sorry, I don't quite follow. Are you suggesting that should fold this
> patch to later one which removes the HAS_CACHE check and renmae the
> swap_entry_range_free.

Hi,

Just some of my nitpicks :)

After you move these parts out of swap_entry_put_locked, there is
almost nothing left in swap_entry_put_locked except an "open coded
memset". And in your next patch (also after the whole series), all
callers of __swap_entries_free will have to call an "open coded
memset" anyway, so these changes seem redundant and could be improved.

BTW your next patch has a typo in the commit message:
s/__swap_entriy_free/__swap_entries_free/g.

>
> Thanks,
> Kemeng
>
Kemeng Shi March 14, 2025, 8:39 a.m. UTC | #4
on 3/14/2025 3:47 PM, Kairui Song wrote:
> On Fri, Mar 14, 2025 at 3:32 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
>>
>> on 3/14/2025 1:42 AM, Kairui Song wrote:
>>> On Thu, Mar 13, 2025 at 8:09 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
>>>>
>>>> Factor out the actual swap entry freeing logic to new helper
>>>> __swap_entries_free().
>>>> This allow us to futher simplify other swap entry freeing code by
>>>> leveraging __swap_entries_free() helper function.
>>>>
>>>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>>>> ---
>>>>  mm/swapfile.c | 30 ++++++++++++++++++++----------
>>>>  1 file changed, 20 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/mm/swapfile.c b/mm/swapfile.c
>>>> index 5a775456e26c..7c886f9dd6f9 100644
>>>> --- a/mm/swapfile.c
>>>> +++ b/mm/swapfile.c
>>>> @@ -1347,6 +1347,25 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
>>>>         return NULL;
>>>>  }
>>>>
>>>> +static inline void __swap_entries_free(struct swap_info_struct *si,
>>>> +                                      struct swap_cluster_info *ci,
>>>> +                                      swp_entry_t entry, unsigned int nr_pages)
>>>> +{
>>>> +       unsigned long offset = swp_offset(entry);
>>>> +
>>>> +       VM_BUG_ON(cluster_is_empty(ci));
>>>> +       VM_BUG_ON(ci->count < nr_pages);
>>>> +
>>>> +       ci->count -= nr_pages;
>>>> +       mem_cgroup_uncharge_swap(entry, nr_pages);
>>>> +       swap_range_free(si, offset, nr_pages);
>>>> +
>>>> +       if (!ci->count)
>>>> +               free_cluster(si, ci);
>>>> +       else
>>>> +               partial_free_cluster(si, ci);
>>>> +}
>>>> +
>>>>  static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
>>>>                                            unsigned long offset,
>>>>                                            unsigned char usage)
>>>> @@ -1525,22 +1544,13 @@ static void swap_entry_range_free(struct swap_info_struct *si,
>>>>
>>>>         /* It should never free entries across different clusters */
>>>>         VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
>>>> -       VM_BUG_ON(cluster_is_empty(ci));
>>>> -       VM_BUG_ON(ci->count < nr_pages);
>>>>
>>>> -       ci->count -= nr_pages;
>>>>         do {
>>>>                 VM_BUG_ON(*map != SWAP_HAS_CACHE);
>>>>                 *map = 0;
>>>>         } while (++map < map_end);
>>>>
>>>> -       mem_cgroup_uncharge_swap(entry, nr_pages);
>>>> -       swap_range_free(si, offset, nr_pages);
>>>> -
>>>> -       if (!ci->count)
>>>> -               free_cluster(si, ci);
>>>> -       else
>>>> -               partial_free_cluster(si, ci);
>>>> +       __swap_entries_free(si, ci, entry, nr_pages);
>>>>  }
>>>>
>>>>  static void cluster_swap_free_nr(struct swap_info_struct *si,
>>>> --
>>>> 2.30.0
>>>>
>>>>
>>>
>>> Hi Kemeng,
>> Hello Kairui,
>>
>> Thanks for feedback.
>>>
>>> This patch is a bit too trivial to be a standalone one, you can fold
>>> it with the later one easily. Also you may want to carry the
>>> VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); check
>>> as well.
>> Sure, I will do this in next version.
>>>
>>> But, that is basically just renaming swap_entry_range_free, you may
>>> just remove the HAS_CACHE check as you rename it, that way the changes
>>> should be cleaner.
>>>
>> Sorry, I don't quite follow. Are you suggesting that should fold this
>> patch to later one which removes the HAS_CACHE check and renmae the
>> swap_entry_range_free.
> 
> Hi,
> 
> Just some of my nitpicks :)
> 
> After you move these parts out of swap_entry_put_locked, there is
> almost nothing left in swap_entry_put_locked except an "open coded
> memset". And in your next patch (also after the whole series), all
> callers of __swap_entries_free will have to call an "open coded
> memset" anyway, so these changes seem redundant and could be improved.
Right, we can simply use swap_entries_free() instead of __swap_entries_free()
int swap_entry_put_locked() after the whole series and this change seems not
necessary. Will improve this in next version.
> 
> BTW your next patch has a typo in the commit message:
> s/__swap_entriy_free/__swap_entries_free/g.
Will fix this in next version.

Thanks,
Kemeng
> 
>>
>> Thanks,
>> Kemeng
>>
>
diff mbox series

Patch

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5a775456e26c..7c886f9dd6f9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1347,6 +1347,25 @@  static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
 	return NULL;
 }
 
+static inline void __swap_entries_free(struct swap_info_struct *si,
+				       struct swap_cluster_info *ci,
+				       swp_entry_t entry, unsigned int nr_pages)
+{
+	unsigned long offset = swp_offset(entry);
+
+	VM_BUG_ON(cluster_is_empty(ci));
+	VM_BUG_ON(ci->count < nr_pages);
+
+	ci->count -= nr_pages;
+	mem_cgroup_uncharge_swap(entry, nr_pages);
+	swap_range_free(si, offset, nr_pages);
+
+	if (!ci->count)
+		free_cluster(si, ci);
+	else
+		partial_free_cluster(si, ci);
+}
+
 static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
 					   unsigned long offset,
 					   unsigned char usage)
@@ -1525,22 +1544,13 @@  static void swap_entry_range_free(struct swap_info_struct *si,
 
 	/* It should never free entries across different clusters */
 	VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
-	VM_BUG_ON(cluster_is_empty(ci));
-	VM_BUG_ON(ci->count < nr_pages);
 
-	ci->count -= nr_pages;
 	do {
 		VM_BUG_ON(*map != SWAP_HAS_CACHE);
 		*map = 0;
 	} while (++map < map_end);
 
-	mem_cgroup_uncharge_swap(entry, nr_pages);
-	swap_range_free(si, offset, nr_pages);
-
-	if (!ci->count)
-		free_cluster(si, ci);
-	else
-		partial_free_cluster(si, ci);
+	__swap_entries_free(si, ci, entry, nr_pages);
 }
 
 static void cluster_swap_free_nr(struct swap_info_struct *si,