diff mbox series

[v3,08/14] mm/ksm: Convert chain series funcs to use folio

Message ID 20240325124904.398913-9-alexs@kernel.org (mailing list archive)
State New
Headers show
Series transfer page to folio in KSM | expand

Commit Message

alexs@kernel.org March 25, 2024, 12:48 p.m. UTC
From: "Alex Shi (tencent)" <alexs@kernel.org>

In ksm stable tree all page are single, let's convert them to use folios.
Change return type to void is ugly, but for a series funcs, it's still a
bit simpler than adding new funcs. And they will be changed to 'struct
folio' soon.

Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Chris Wright <chrisw@sous-sol.org>
---
 mm/ksm.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

David Hildenbrand April 5, 2024, 7:36 a.m. UTC | #1
On 25.03.24 13:48, alexs@kernel.org wrote:
> From: "Alex Shi (tencent)" <alexs@kernel.org>
> 
> In ksm stable tree all page are single, let's convert them to use folios.
> Change return type to void is ugly, but for a series funcs, it's still a
> bit simpler than adding new funcs. And they will be changed to 'struct
> folio' soon.
> 
> Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
> Cc: Izik Eidus <izik.eidus@ravellosystems.com>
> Cc: Matthew Wilcox <willy@infradead.org>
> Cc: Andrea Arcangeli <aarcange@redhat.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Chris Wright <chrisw@sous-sol.org>
> ---

Why not simply squash 8,9,10 and avoid this completely? There are not 
that many relevant calls that need conversion.
diff mbox series

Patch

diff --git a/mm/ksm.c b/mm/ksm.c
index 5d1f62e7462a..7188997437d3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1777,7 +1777,7 @@  static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl
  * function and will be overwritten in all cases, the caller doesn't
  * need to initialize it.
  */
-static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
+static void *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
 					struct ksm_stable_node **_stable_node,
 					struct rb_root *root,
 					bool prune_stale_stable_nodes)
@@ -1799,24 +1799,24 @@  static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_du
 			       prune_stale_stable_nodes);
 }
 
-static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d,
+static __always_inline void *chain_prune(struct ksm_stable_node **s_n_d,
 						struct ksm_stable_node **s_n,
 						struct rb_root *root)
 {
 	return __stable_node_chain(s_n_d, s_n, root, true);
 }
 
-static __always_inline struct page *chain(struct ksm_stable_node **s_n_d,
+static __always_inline void *chain(struct ksm_stable_node **s_n_d,
 					  struct ksm_stable_node *s_n,
 					  struct rb_root *root)
 {
 	struct ksm_stable_node *old_stable_node = s_n;
-	struct page *tree_page;
+	struct folio *tree_folio;
 
-	tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
+	tree_folio = __stable_node_chain(s_n_d, &s_n, root, false);
 	/* not pruning dups so s_n cannot have changed */
 	VM_BUG_ON(s_n != old_stable_node);
-	return tree_page;
+	return tree_folio;
 }
 
 /*