diff mbox series

[4/8] mm/compaction: remove stale fast_find_block flag in isolate_migratepages

Message ID 20230728171037.2219226-5-shikemeng@huaweicloud.com (mailing list archive)
State New
Headers show
Series Fixes and cleanups to compaction | expand

Commit Message

Kemeng Shi July 28, 2023, 5:10 p.m. UTC
In old code, we set skip to found page block in fast_find_migrateblock. So
we use fast_find_block to avoid skip found page block from
fast_find_migrateblock.
In 90ed667c03fe5 ("Revert "Revert "mm/compaction: fix set skip in
fast_find_migrateblock"""), we remove skip set in fast_find_migrateblock,
then fast_find_block is useless.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/compaction.c | 12 +-----------
 1 file changed, 1 insertion(+), 11 deletions(-)

Comments

Baolin Wang Aug. 1, 2023, 2:42 a.m. UTC | #1
On 7/29/2023 1:10 AM, Kemeng Shi wrote:
> In old code, we set skip to found page block in fast_find_migrateblock. So
> we use fast_find_block to avoid skip found page block from
> fast_find_migrateblock.
> In 90ed667c03fe5 ("Revert "Revert "mm/compaction: fix set skip in
> fast_find_migrateblock"""), we remove skip set in fast_find_migrateblock,
> then fast_find_block is useless.
> 
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> ---
>   mm/compaction.c | 12 +-----------
>   1 file changed, 1 insertion(+), 11 deletions(-)
> 
> diff --git a/mm/compaction.c b/mm/compaction.c
> index ad535f880c70..09c36251c613 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -1949,7 +1949,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>   	const isolate_mode_t isolate_mode =
>   		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
>   		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
> -	bool fast_find_block;
>   
>   	/*
>   	 * Start at where we last stopped, or beginning of the zone as
> @@ -1961,13 +1960,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>   	if (block_start_pfn < cc->zone->zone_start_pfn)
>   		block_start_pfn = cc->zone->zone_start_pfn;
>   
> -	/*
> -	 * fast_find_migrateblock marks a pageblock skipped so to avoid
> -	 * the isolation_suitable check below, check whether the fast
> -	 * search was successful.
> -	 */
> -	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
> -
>   	/* Only scan within a pageblock boundary */
>   	block_end_pfn = pageblock_end_pfn(low_pfn);
>   
> @@ -1976,7 +1968,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>   	 * Do not cross the free scanner.
>   	 */
>   	for (; block_end_pfn <= cc->free_pfn;
> -			fast_find_block = false,
>   			cc->migrate_pfn = low_pfn = block_end_pfn,
>   			block_start_pfn = block_end_pfn,
>   			block_end_pfn += pageblock_nr_pages) {
> @@ -2007,8 +1998,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>   		 * before making it "skip" so other compaction instances do
>   		 * not scan the same block.
>   		 */
> -		if (pageblock_aligned(low_pfn) &&
> -		    !fast_find_block && !isolation_suitable(cc, page))
> +		if (pageblock_aligned(low_pfn) && !isolation_suitable(cc, page))

I do not think so. If the pageblock is found by 
fast_find_migrateblock(), that means it definitely has not been set the 
skip flag, so there is not need to call isolation_suitable() if 
fast_find_block is true, right?
Kemeng Shi Aug. 1, 2023, 3:24 a.m. UTC | #2
on 8/1/2023 10:42 AM, Baolin Wang wrote:
> 
> 
> On 7/29/2023 1:10 AM, Kemeng Shi wrote:
>> In old code, we set skip to found page block in fast_find_migrateblock. So
>> we use fast_find_block to avoid skip found page block from
>> fast_find_migrateblock.
>> In 90ed667c03fe5 ("Revert "Revert "mm/compaction: fix set skip in
>> fast_find_migrateblock"""), we remove skip set in fast_find_migrateblock,
>> then fast_find_block is useless.
>>
>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>> ---
>>   mm/compaction.c | 12 +-----------
>>   1 file changed, 1 insertion(+), 11 deletions(-)
>>
>> diff --git a/mm/compaction.c b/mm/compaction.c
>> index ad535f880c70..09c36251c613 100644
>> --- a/mm/compaction.c
>> +++ b/mm/compaction.c
>> @@ -1949,7 +1949,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>       const isolate_mode_t isolate_mode =
>>           (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
>>           (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
>> -    bool fast_find_block;
>>         /*
>>        * Start at where we last stopped, or beginning of the zone as
>> @@ -1961,13 +1960,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>       if (block_start_pfn < cc->zone->zone_start_pfn)
>>           block_start_pfn = cc->zone->zone_start_pfn;
>>   -    /*
>> -     * fast_find_migrateblock marks a pageblock skipped so to avoid
>> -     * the isolation_suitable check below, check whether the fast
>> -     * search was successful.
>> -     */
>> -    fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
>> -
>>       /* Only scan within a pageblock boundary */
>>       block_end_pfn = pageblock_end_pfn(low_pfn);
>>   @@ -1976,7 +1968,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>        * Do not cross the free scanner.
>>        */
>>       for (; block_end_pfn <= cc->free_pfn;
>> -            fast_find_block = false,
>>               cc->migrate_pfn = low_pfn = block_end_pfn,
>>               block_start_pfn = block_end_pfn,
>>               block_end_pfn += pageblock_nr_pages) {
>> @@ -2007,8 +1998,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>            * before making it "skip" so other compaction instances do
>>            * not scan the same block.
>>            */
>> -        if (pageblock_aligned(low_pfn) &&
>> -            !fast_find_block && !isolation_suitable(cc, page))
>> +        if (pageblock_aligned(low_pfn) && !isolation_suitable(cc, page))
> 
> I do not think so. If the pageblock is found by fast_find_migrateblock(), that means it definitely has not been set the skip flag, so there is not need to call isolation_suitable() if fast_find_block is true, right?
> 
> 
Actually, found pageblock could be set skip as:
1. other compactor could mark this pageblock as skip after zone lock is realeased
in fast_find_migrateblock.
2. fast_find_migrateblock may uses pfn from reinit_migrate_pfn which is previously found
and sacnned. It could be fully sacnned and marked skip after it's first return from
fast_find_migrateblock and it should be skipped.
Thanks!
Baolin Wang Aug. 1, 2023, 3:34 a.m. UTC | #3
On 8/1/2023 11:24 AM, Kemeng Shi wrote:
> 
> 
> on 8/1/2023 10:42 AM, Baolin Wang wrote:
>>
>>
>> On 7/29/2023 1:10 AM, Kemeng Shi wrote:
>>> In old code, we set skip to found page block in fast_find_migrateblock. So
>>> we use fast_find_block to avoid skip found page block from
>>> fast_find_migrateblock.
>>> In 90ed667c03fe5 ("Revert "Revert "mm/compaction: fix set skip in
>>> fast_find_migrateblock"""), we remove skip set in fast_find_migrateblock,
>>> then fast_find_block is useless.
>>>
>>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>>> ---
>>>    mm/compaction.c | 12 +-----------
>>>    1 file changed, 1 insertion(+), 11 deletions(-)
>>>
>>> diff --git a/mm/compaction.c b/mm/compaction.c
>>> index ad535f880c70..09c36251c613 100644
>>> --- a/mm/compaction.c
>>> +++ b/mm/compaction.c
>>> @@ -1949,7 +1949,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>        const isolate_mode_t isolate_mode =
>>>            (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
>>>            (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
>>> -    bool fast_find_block;
>>>          /*
>>>         * Start at where we last stopped, or beginning of the zone as
>>> @@ -1961,13 +1960,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>        if (block_start_pfn < cc->zone->zone_start_pfn)
>>>            block_start_pfn = cc->zone->zone_start_pfn;
>>>    -    /*
>>> -     * fast_find_migrateblock marks a pageblock skipped so to avoid
>>> -     * the isolation_suitable check below, check whether the fast
>>> -     * search was successful.
>>> -     */
>>> -    fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
>>> -
>>>        /* Only scan within a pageblock boundary */
>>>        block_end_pfn = pageblock_end_pfn(low_pfn);
>>>    @@ -1976,7 +1968,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>         * Do not cross the free scanner.
>>>         */
>>>        for (; block_end_pfn <= cc->free_pfn;
>>> -            fast_find_block = false,
>>>                cc->migrate_pfn = low_pfn = block_end_pfn,
>>>                block_start_pfn = block_end_pfn,
>>>                block_end_pfn += pageblock_nr_pages) {
>>> @@ -2007,8 +1998,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>             * before making it "skip" so other compaction instances do
>>>             * not scan the same block.
>>>             */
>>> -        if (pageblock_aligned(low_pfn) &&
>>> -            !fast_find_block && !isolation_suitable(cc, page))
>>> +        if (pageblock_aligned(low_pfn) && !isolation_suitable(cc, page))
>>
>> I do not think so. If the pageblock is found by fast_find_migrateblock(), that means it definitely has not been set the skip flag, so there is not need to call isolation_suitable() if fast_find_block is true, right?
>>
>>
> Actually, found pageblock could be set skip as:
> 1. other compactor could mark this pageblock as skip after zone lock is realeased
> in fast_find_migrateblock.

Yes, but your patch also can not close this race window, that means it 
can also be set skip flag after the isolation_suitable() validation by 
other compactors.

> 2. fast_find_migrateblock may uses pfn from reinit_migrate_pfn which is previously found
> and sacnned. It could be fully sacnned and marked skip after it's first return from

Right, but now the 'fast_find_block' is false, and we will call 
isolation_suitable() to validate the skip flag.

> fast_find_migrateblock and it should be skipped.
> Thanks!
Kemeng Shi Aug. 1, 2023, 3:48 a.m. UTC | #4
on 8/1/2023 11:34 AM, Baolin Wang wrote:
> 
> 
> On 8/1/2023 11:24 AM, Kemeng Shi wrote:
>>
>>
>> on 8/1/2023 10:42 AM, Baolin Wang wrote:
>>>
>>>
>>> On 7/29/2023 1:10 AM, Kemeng Shi wrote:
>>>> In old code, we set skip to found page block in fast_find_migrateblock. So
>>>> we use fast_find_block to avoid skip found page block from
>>>> fast_find_migrateblock.
>>>> In 90ed667c03fe5 ("Revert "Revert "mm/compaction: fix set skip in
>>>> fast_find_migrateblock"""), we remove skip set in fast_find_migrateblock,
>>>> then fast_find_block is useless.
>>>>
>>>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>>>> ---
>>>>    mm/compaction.c | 12 +-----------
>>>>    1 file changed, 1 insertion(+), 11 deletions(-)
>>>>
>>>> diff --git a/mm/compaction.c b/mm/compaction.c
>>>> index ad535f880c70..09c36251c613 100644
>>>> --- a/mm/compaction.c
>>>> +++ b/mm/compaction.c
>>>> @@ -1949,7 +1949,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>        const isolate_mode_t isolate_mode =
>>>>            (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
>>>>            (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
>>>> -    bool fast_find_block;
>>>>          /*
>>>>         * Start at where we last stopped, or beginning of the zone as
>>>> @@ -1961,13 +1960,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>        if (block_start_pfn < cc->zone->zone_start_pfn)
>>>>            block_start_pfn = cc->zone->zone_start_pfn;
>>>>    -    /*
>>>> -     * fast_find_migrateblock marks a pageblock skipped so to avoid
>>>> -     * the isolation_suitable check below, check whether the fast
>>>> -     * search was successful.
>>>> -     */
>>>> -    fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
>>>> -
>>>>        /* Only scan within a pageblock boundary */
>>>>        block_end_pfn = pageblock_end_pfn(low_pfn);
>>>>    @@ -1976,7 +1968,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>         * Do not cross the free scanner.
>>>>         */
>>>>        for (; block_end_pfn <= cc->free_pfn;
>>>> -            fast_find_block = false,
>>>>                cc->migrate_pfn = low_pfn = block_end_pfn,
>>>>                block_start_pfn = block_end_pfn,
>>>>                block_end_pfn += pageblock_nr_pages) {
>>>> @@ -2007,8 +1998,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>             * before making it "skip" so other compaction instances do
>>>>             * not scan the same block.
>>>>             */
>>>> -        if (pageblock_aligned(low_pfn) &&
>>>> -            !fast_find_block && !isolation_suitable(cc, page))
>>>> +        if (pageblock_aligned(low_pfn) && !isolation_suitable(cc, page))
>>>
>>> I do not think so. If the pageblock is found by fast_find_migrateblock(), that means it definitely has not been set the skip flag, so there is not need to call isolation_suitable() if fast_find_block is true, right?
>>>
>>>
>> Actually, found pageblock could be set skip as:
>> 1. other compactor could mark this pageblock as skip after zone lock is realeased
>> in fast_find_migrateblock.
> 
> Yes, but your patch also can not close this race window, that means it can also be set skip flag after the isolation_suitable() validation by other compactors.
> 
Yes, I think it's still worth to remove a lot of fast_find_block relevant check and reduce
code complexity with one redundant isolation_suitable which may skip some block with luck.
>> 2. fast_find_migrateblock may uses pfn from reinit_migrate_pfn which is previously found
>> and sacnned. It could be fully sacnned and marked skip after it's first return from
> 
> Right, but now the 'fast_find_block' is false, and we will call isolation_suitable() to validate the skip flag.
> 
Right, sorry for missing that.

But it's ok to keep the fast_find_block if you insist and I will just correct the stale
comment that "fast_find_migrateblock marks a pageblock skipped ..." in next version.
Thanks!
>> fast_find_migrateblock and it should be skipped.
>> Thanks!
>
Baolin Wang Aug. 1, 2023, 8:15 a.m. UTC | #5
On 8/1/2023 11:48 AM, Kemeng Shi wrote:
> 
> 
> on 8/1/2023 11:34 AM, Baolin Wang wrote:
>>
>>
>> On 8/1/2023 11:24 AM, Kemeng Shi wrote:
>>>
>>>
>>> on 8/1/2023 10:42 AM, Baolin Wang wrote:
>>>>
>>>>
>>>> On 7/29/2023 1:10 AM, Kemeng Shi wrote:
>>>>> In old code, we set skip to found page block in fast_find_migrateblock. So
>>>>> we use fast_find_block to avoid skip found page block from
>>>>> fast_find_migrateblock.
>>>>> In 90ed667c03fe5 ("Revert "Revert "mm/compaction: fix set skip in
>>>>> fast_find_migrateblock"""), we remove skip set in fast_find_migrateblock,
>>>>> then fast_find_block is useless.
>>>>>
>>>>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>>>>> ---
>>>>>     mm/compaction.c | 12 +-----------
>>>>>     1 file changed, 1 insertion(+), 11 deletions(-)
>>>>>
>>>>> diff --git a/mm/compaction.c b/mm/compaction.c
>>>>> index ad535f880c70..09c36251c613 100644
>>>>> --- a/mm/compaction.c
>>>>> +++ b/mm/compaction.c
>>>>> @@ -1949,7 +1949,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>>         const isolate_mode_t isolate_mode =
>>>>>             (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
>>>>>             (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
>>>>> -    bool fast_find_block;
>>>>>           /*
>>>>>          * Start at where we last stopped, or beginning of the zone as
>>>>> @@ -1961,13 +1960,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>>         if (block_start_pfn < cc->zone->zone_start_pfn)
>>>>>             block_start_pfn = cc->zone->zone_start_pfn;
>>>>>     -    /*
>>>>> -     * fast_find_migrateblock marks a pageblock skipped so to avoid
>>>>> -     * the isolation_suitable check below, check whether the fast
>>>>> -     * search was successful.
>>>>> -     */
>>>>> -    fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
>>>>> -
>>>>>         /* Only scan within a pageblock boundary */
>>>>>         block_end_pfn = pageblock_end_pfn(low_pfn);
>>>>>     @@ -1976,7 +1968,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>>          * Do not cross the free scanner.
>>>>>          */
>>>>>         for (; block_end_pfn <= cc->free_pfn;
>>>>> -            fast_find_block = false,
>>>>>                 cc->migrate_pfn = low_pfn = block_end_pfn,
>>>>>                 block_start_pfn = block_end_pfn,
>>>>>                 block_end_pfn += pageblock_nr_pages) {
>>>>> @@ -2007,8 +1998,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
>>>>>              * before making it "skip" so other compaction instances do
>>>>>              * not scan the same block.
>>>>>              */
>>>>> -        if (pageblock_aligned(low_pfn) &&
>>>>> -            !fast_find_block && !isolation_suitable(cc, page))
>>>>> +        if (pageblock_aligned(low_pfn) && !isolation_suitable(cc, page))
>>>>
>>>> I do not think so. If the pageblock is found by fast_find_migrateblock(), that means it definitely has not been set the skip flag, so there is not need to call isolation_suitable() if fast_find_block is true, right?
>>>>
>>>>
>>> Actually, found pageblock could be set skip as:
>>> 1. other compactor could mark this pageblock as skip after zone lock is realeased
>>> in fast_find_migrateblock.
>>
>> Yes, but your patch also can not close this race window, that means it can also be set skip flag after the isolation_suitable() validation by other compactors.
>>
> Yes, I think it's still worth to remove a lot of fast_find_block relevant check and reduce
> code complexity with one redundant isolation_suitable which may skip some block with luck.
>>> 2. fast_find_migrateblock may uses pfn from reinit_migrate_pfn which is previously found
>>> and sacnned. It could be fully sacnned and marked skip after it's first return from
>>
>> Right, but now the 'fast_find_block' is false, and we will call isolation_suitable() to validate the skip flag.
>>
> Right, sorry for missing that.
> 
> But it's ok to keep the fast_find_block if you insist and I will just correct the stale

Yes, I still prefer to keep the fast_find_block, since I did not see 
this patch can fix any real issue and might have a side effect for 
fast-find-pageblock(?).

> comment that "fast_find_migrateblock marks a pageblock skipped ..." in next version.

Sure, please do it.
diff mbox series

Patch

diff --git a/mm/compaction.c b/mm/compaction.c
index ad535f880c70..09c36251c613 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1949,7 +1949,6 @@  static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 	const isolate_mode_t isolate_mode =
 		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
 		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
-	bool fast_find_block;
 
 	/*
 	 * Start at where we last stopped, or beginning of the zone as
@@ -1961,13 +1960,6 @@  static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 	if (block_start_pfn < cc->zone->zone_start_pfn)
 		block_start_pfn = cc->zone->zone_start_pfn;
 
-	/*
-	 * fast_find_migrateblock marks a pageblock skipped so to avoid
-	 * the isolation_suitable check below, check whether the fast
-	 * search was successful.
-	 */
-	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
-
 	/* Only scan within a pageblock boundary */
 	block_end_pfn = pageblock_end_pfn(low_pfn);
 
@@ -1976,7 +1968,6 @@  static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 	 * Do not cross the free scanner.
 	 */
 	for (; block_end_pfn <= cc->free_pfn;
-			fast_find_block = false,
 			cc->migrate_pfn = low_pfn = block_end_pfn,
 			block_start_pfn = block_end_pfn,
 			block_end_pfn += pageblock_nr_pages) {
@@ -2007,8 +1998,7 @@  static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 		 * before making it "skip" so other compaction instances do
 		 * not scan the same block.
 		 */
-		if (pageblock_aligned(low_pfn) &&
-		    !fast_find_block && !isolation_suitable(cc, page))
+		if (pageblock_aligned(low_pfn) && !isolation_suitable(cc, page))
 			continue;
 
 		/*