diff mbox series

mm-remove-zone_lru_lock-function-access-lru_lock-directly-fix

Message ID 20190301121651.7741-1-aryabinin@virtuozzo.com (mailing list archive)
State New, archived
Headers show
Series mm-remove-zone_lru_lock-function-access-lru_lock-directly-fix | expand

Commit Message

Andrey Ryabinin March 1, 2019, 12:16 p.m. UTC
A slightly better version of __split_huge_page();

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
---
 mm/huge_memory.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Vlastimil Babka March 1, 2019, 12:30 p.m. UTC | #1
On 3/1/19 1:16 PM, Andrey Ryabinin wrote:
> A slightly better version of __split_huge_page();
> 
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>

Ack.

> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Mel Gorman <mgorman@techsingularity.net>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Rik van Riel <riel@surriel.com>
> Cc: William Kucharski <william.kucharski@oracle.com>
> Cc: John Hubbard <jhubbard@nvidia.com>
> ---
>  mm/huge_memory.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 4ccac6b32d49..fcf657886b4b 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2440,11 +2440,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>  		pgoff_t end, unsigned long flags)
>  {
>  	struct page *head = compound_head(page);
> -	struct zone *zone = page_zone(head);
> +	pg_data_t *pgdat = page_pgdat(head);
>  	struct lruvec *lruvec;
>  	int i;
>  
> -	lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
> +	lruvec = mem_cgroup_page_lruvec(head, pgdat);
>  
>  	/* complete memcg works before add pages to LRU */
>  	mem_cgroup_split_huge_fixup(head);
> @@ -2475,7 +2475,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>  		xa_unlock(&head->mapping->i_pages);
>  	}
>  
> -	spin_unlock_irqrestore(&page_pgdat(head)->lru_lock, flags);
> +	spin_unlock_irqrestore(&pgdat->lru_lock, flags);
>  
>  	remap_page(head);
>  
>
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4ccac6b32d49..fcf657886b4b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2440,11 +2440,11 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 		pgoff_t end, unsigned long flags)
 {
 	struct page *head = compound_head(page);
-	struct zone *zone = page_zone(head);
+	pg_data_t *pgdat = page_pgdat(head);
 	struct lruvec *lruvec;
 	int i;
 
-	lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
+	lruvec = mem_cgroup_page_lruvec(head, pgdat);
 
 	/* complete memcg works before add pages to LRU */
 	mem_cgroup_split_huge_fixup(head);
@@ -2475,7 +2475,7 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 		xa_unlock(&head->mapping->i_pages);
 	}
 
-	spin_unlock_irqrestore(&page_pgdat(head)->lru_lock, flags);
+	spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 
 	remap_page(head);