diff mbox series

[mm-unstable,v1,4/4] mm/huge_memory: work on folio->swap instead of page->private when splitting folio

Message ID 20230821160849.531668-5-david@redhat.com (mailing list archive)
State New
Headers show
Series mm/swap: stop using page->private on tail pages for THP_SWAP + cleanups | expand

Commit Message

David Hildenbrand Aug. 21, 2023, 4:08 p.m. UTC
Let's work on folio->swap instead. While at it, use folio_test_anon() and
folio_test_swapcache() -- the original folio remains valid even after
splitting (but is then an order-0 folio).

We can probably convert a lot more to folios in that code, let's focus
on folio->swap handling only for now.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/huge_memory.c | 29 +++++++++++++++--------------
 1 file changed, 15 insertions(+), 14 deletions(-)

Comments

Chris Li Aug. 23, 2023, 1:16 p.m. UTC | #1
Hi David,

Reviewed-by: Chris Li <chrisl@kernel.org>

Chris


Chris

On Mon, Aug 21, 2023 at 9:09 AM David Hildenbrand <david@redhat.com> wrote:
>
> Let's work on folio->swap instead. While at it, use folio_test_anon() and
> folio_test_swapcache() -- the original folio remains valid even after
> splitting (but is then an order-0 folio).
>
> We can probably convert a lot more to folios in that code, let's focus
> on folio->swap handling only for now.
>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  mm/huge_memory.c | 29 +++++++++++++++--------------
>  1 file changed, 15 insertions(+), 14 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index c04702ae71d2..4465915711c3 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2401,10 +2401,16 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
>         }
>  }
>
> -static void __split_huge_page_tail(struct page *head, int tail,
> +static void __split_huge_page_tail(struct folio *folio, int tail,
>                 struct lruvec *lruvec, struct list_head *list)
>  {
> +       struct page *head = &folio->page;
>         struct page *page_tail = head + tail;
> +       /*
> +        * Careful: new_folio is not a "real" folio before we cleared PageTail.
> +        * Don't pass it around before clear_compound_head().
> +        */
> +       struct folio *new_folio = (struct folio *)page_tail;
>
>         VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
>
> @@ -2453,8 +2459,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
>                 VM_WARN_ON_ONCE_PAGE(true, page_tail);
>                 page_tail->private = 0;
>         }
> -       if (PageSwapCache(head))
> -               set_page_private(page_tail, (unsigned long)head->private + tail);
> +       if (folio_test_swapcache(folio))
> +               new_folio->swap.val = folio->swap.val + tail;
>
>         /* Page flags must be visible before we make the page non-compound. */
>         smp_wmb();
> @@ -2500,11 +2506,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>         /* complete memcg works before add pages to LRU */
>         split_page_memcg(head, nr);
>
> -       if (PageAnon(head) && PageSwapCache(head)) {
> -               swp_entry_t entry = { .val = page_private(head) };
> -
> -               offset = swp_offset(entry);
> -               swap_cache = swap_address_space(entry);
> +       if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
> +               offset = swp_offset(folio->swap);
> +               swap_cache = swap_address_space(folio->swap);
>                 xa_lock(&swap_cache->i_pages);
>         }
>
> @@ -2514,7 +2518,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>         ClearPageHasHWPoisoned(head);
>
>         for (i = nr - 1; i >= 1; i--) {
> -               __split_huge_page_tail(head, i, lruvec, list);
> +               __split_huge_page_tail(folio, i, lruvec, list);
>                 /* Some pages can be beyond EOF: drop them from page cache */
>                 if (head[i].index >= end) {
>                         struct folio *tail = page_folio(head + i);
> @@ -2559,11 +2563,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>
>         remap_page(folio, nr);
>
> -       if (PageSwapCache(head)) {
> -               swp_entry_t entry = { .val = page_private(head) };
> -
> -               split_swap_cluster(entry);
> -       }
> +       if (folio_test_swapcache(folio))
> +               split_swap_cluster(folio->swap);
>
>         for (i = 0; i < nr; i++) {
>                 struct page *subpage = head + i;
> --
> 2.41.0
>
>
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c04702ae71d2..4465915711c3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2401,10 +2401,16 @@  static void lru_add_page_tail(struct page *head, struct page *tail,
 	}
 }
 
-static void __split_huge_page_tail(struct page *head, int tail,
+static void __split_huge_page_tail(struct folio *folio, int tail,
 		struct lruvec *lruvec, struct list_head *list)
 {
+	struct page *head = &folio->page;
 	struct page *page_tail = head + tail;
+	/*
+	 * Careful: new_folio is not a "real" folio before we cleared PageTail.
+	 * Don't pass it around before clear_compound_head().
+	 */
+	struct folio *new_folio = (struct folio *)page_tail;
 
 	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
 
@@ -2453,8 +2459,8 @@  static void __split_huge_page_tail(struct page *head, int tail,
 		VM_WARN_ON_ONCE_PAGE(true, page_tail);
 		page_tail->private = 0;
 	}
-	if (PageSwapCache(head))
-		set_page_private(page_tail, (unsigned long)head->private + tail);
+	if (folio_test_swapcache(folio))
+		new_folio->swap.val = folio->swap.val + tail;
 
 	/* Page flags must be visible before we make the page non-compound. */
 	smp_wmb();
@@ -2500,11 +2506,9 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 	/* complete memcg works before add pages to LRU */
 	split_page_memcg(head, nr);
 
-	if (PageAnon(head) && PageSwapCache(head)) {
-		swp_entry_t entry = { .val = page_private(head) };
-
-		offset = swp_offset(entry);
-		swap_cache = swap_address_space(entry);
+	if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
+		offset = swp_offset(folio->swap);
+		swap_cache = swap_address_space(folio->swap);
 		xa_lock(&swap_cache->i_pages);
 	}
 
@@ -2514,7 +2518,7 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 	ClearPageHasHWPoisoned(head);
 
 	for (i = nr - 1; i >= 1; i--) {
-		__split_huge_page_tail(head, i, lruvec, list);
+		__split_huge_page_tail(folio, i, lruvec, list);
 		/* Some pages can be beyond EOF: drop them from page cache */
 		if (head[i].index >= end) {
 			struct folio *tail = page_folio(head + i);
@@ -2559,11 +2563,8 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 
 	remap_page(folio, nr);
 
-	if (PageSwapCache(head)) {
-		swp_entry_t entry = { .val = page_private(head) };
-
-		split_swap_cluster(entry);
-	}
+	if (folio_test_swapcache(folio))
+		split_swap_cluster(folio->swap);
 
 	for (i = 0; i < nr; i++) {
 		struct page *subpage = head + i;