diff mbox series

[4/5] mm/compaction: remove unnecessary cursor page in isolate_freepages_block

Message ID 20230729174354.2239980-5-shikemeng@huaweicloud.com (mailing list archive)
State New
Headers show
Series Fixes and cleanups to compaction | expand

Commit Message

Kemeng Shi July 29, 2023, 5:43 p.m. UTC
The cursor is only used for page forward currently. We can simply move page
forward directly to remove unnecessary cursor.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 mm/compaction.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

Comments

Baolin Wang Aug. 2, 2023, 2:59 a.m. UTC | #1
On 7/30/2023 1:43 AM, Kemeng Shi wrote:
> The cursor is only used for page forward currently. We can simply move page
> forward directly to remove unnecessary cursor.
> 
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>

LGTM.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>

> ---
>   mm/compaction.c | 11 +++++------
>   1 file changed, 5 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 65791a74c5e8..cfb661f4ce23 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -589,7 +589,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
>   				bool strict)
>   {
>   	int nr_scanned = 0, total_isolated = 0;
> -	struct page *cursor;
> +	struct page *page;
>   	unsigned long flags = 0;
>   	spinlock_t *locked = NULL;
>   	unsigned long blockpfn = *start_pfn;
> @@ -599,12 +599,11 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
>   	if (strict)
>   		stride = 1;
>   
> -	cursor = pfn_to_page(blockpfn);
> +	page = pfn_to_page(blockpfn);
>   
>   	/* Isolate free pages. */
> -	for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
> +	for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
>   		int isolated;
> -		struct page *page = cursor;
>   
>   		/*
>   		 * Periodically drop the lock (if held) regardless of its
> @@ -628,7 +627,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
>   
>   			if (likely(order <= MAX_ORDER)) {
>   				blockpfn += (1UL << order) - 1;
> -				cursor += (1UL << order) - 1;
> +				page += (1UL << order) - 1;
>   				nr_scanned += (1UL << order) - 1;
>   			}
>   			goto isolate_fail;
> @@ -665,7 +664,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
>   		}
>   		/* Advance to the end of split page */
>   		blockpfn += isolated - 1;
> -		cursor += isolated - 1;
> +		page += isolated - 1;
>   		continue;
>   
>   isolate_fail:
diff mbox series

Patch

diff --git a/mm/compaction.c b/mm/compaction.c
index 65791a74c5e8..cfb661f4ce23 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -589,7 +589,7 @@  static unsigned long isolate_freepages_block(struct compact_control *cc,
 				bool strict)
 {
 	int nr_scanned = 0, total_isolated = 0;
-	struct page *cursor;
+	struct page *page;
 	unsigned long flags = 0;
 	spinlock_t *locked = NULL;
 	unsigned long blockpfn = *start_pfn;
@@ -599,12 +599,11 @@  static unsigned long isolate_freepages_block(struct compact_control *cc,
 	if (strict)
 		stride = 1;
 
-	cursor = pfn_to_page(blockpfn);
+	page = pfn_to_page(blockpfn);
 
 	/* Isolate free pages. */
-	for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
+	for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
 		int isolated;
-		struct page *page = cursor;
 
 		/*
 		 * Periodically drop the lock (if held) regardless of its
@@ -628,7 +627,7 @@  static unsigned long isolate_freepages_block(struct compact_control *cc,
 
 			if (likely(order <= MAX_ORDER)) {
 				blockpfn += (1UL << order) - 1;
-				cursor += (1UL << order) - 1;
+				page += (1UL << order) - 1;
 				nr_scanned += (1UL << order) - 1;
 			}
 			goto isolate_fail;
@@ -665,7 +664,7 @@  static unsigned long isolate_freepages_block(struct compact_control *cc,
 		}
 		/* Advance to the end of split page */
 		blockpfn += isolated - 1;
-		cursor += isolated - 1;
+		page += isolated - 1;
 		continue;
 
 isolate_fail: