diff mbox series

[3/4] mm: mlock: update the interface to use folios

Message ID 74874865335885dabe0751c1140a6d198dea333d.1671738120.git.lstoakes@gmail.com (mailing list archive)
State New
Headers show
Series update mlock to use folios | expand

Commit Message

Lorenzo Stoakes Dec. 22, 2022, 7:48 p.m. UTC
This patch updates the mlock interface to accept folios rather than pages,
bringing the interface in line with the internal implementation.

munlock_vma_page() still requires a page_folio() conversion, however this
is consistent with the existent mlock_vma_page() implementation and a
product of rmap still dealing in pages rather than folios.

Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
---
 mm/internal.h | 26 ++++++++++++++++----------
 mm/mlock.c    | 32 +++++++++++++++-----------------
 mm/swap.c     |  2 +-
 3 files changed, 32 insertions(+), 28 deletions(-)

Comments

kernel test robot Dec. 26, 2022, 1:05 a.m. UTC | #1
Hi Lorenzo,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on vfs-idmapping/for-next linus/master v6.1 next-20221220]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Lorenzo-Stoakes/mm-pagevec-add-folio_batch_reinit/20221223-035645
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/74874865335885dabe0751c1140a6d198dea333d.1671738120.git.lstoakes%40gmail.com
patch subject: [PATCH 3/4] mm: mlock: update the interface to use folios
config: m68k-mvme147_defconfig
compiler: m68k-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/1977ecfaf773324c88d542d3056f278d359debe9
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Lorenzo-Stoakes/mm-pagevec-add-folio_batch_reinit/20221223-035645
        git checkout 1977ecfaf773324c88d542d3056f278d359debe9
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=m68k olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=m68k SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from include/linux/mmzone.h:22,
                    from include/linux/gfp.h:7,
                    from include/linux/mm.h:7,
                    from include/linux/mman.h:5,
                    from mm/mlock.c:10:
   mm/mlock.c: In function 'mlock_pte_range':
>> include/linux/page-flags.h:273:43: error: '_Generic' selector of type 'void *' is not compatible with any association
     273 | #define page_folio(p)           (_Generic((p),                          \
         |                                           ^
   mm/mlock.c:323:25: note: in expansion of macro 'page_folio'
     323 |                 folio = page_folio(pmd_page(*pmd));
         |                         ^~~~~~~~~~


vim +273 include/linux/page-flags.h

0f2317e34e2c7b Matthew Wilcox (Oracle  2021-06-28  259) 
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  260) /**
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  261)  * page_folio - Converts from page to folio.
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  262)  * @p: The page.
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  263)  *
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  264)  * Every page is part of a folio.  This function cannot be called on a
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  265)  * NULL pointer.
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  266)  *
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  267)  * Context: No reference, nor lock is required on @page.  If the caller
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  268)  * does not hold a reference, this call may race with a folio split, so
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  269)  * it should re-check the folio still contains this page after gaining
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  270)  * a reference on the folio.
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  271)  * Return: The folio which contains this page.
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  272)  */
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06 @273) #define page_folio(p)		(_Generic((p),				\
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  274) 	const struct page *:	(const struct folio *)_compound_head(p), \
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  275) 	struct page *:		(struct folio *)_compound_head(p)))
7b230db3b8d373 Matthew Wilcox (Oracle  2020-12-06  276)
Lorenzo Stoakes Dec. 26, 2022, 6:53 a.m. UTC | #2
On Mon, Dec 26, 2022 at 09:05:42AM +0800, kernel test robot wrote:
>    mm/mlock.c: In function 'mlock_pte_range':
> >> include/linux/page-flags.h:273:43: error: '_Generic' selector of type 'void *' is not compatible with any association
>      273 | #define page_folio(p)           (_Generic((p),                          \
>          |                                           ^
>    mm/mlock.c:323:25: note: in expansion of macro 'page_folio'
>      323 |                 folio = page_folio(pmd_page(*pmd));
>          |                         ^~~~~~~~~~
>

OK so this is a product of motorola_pgtable.h defining pmd_page() as NULL and
the type magic in page_folio() getting confused. Easy fix, will spin a v2.
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 1d6f4e168510..8a6e83315369 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -515,10 +515,9 @@  extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
  * should be called with vma's mmap_lock held for read or write,
  * under page table lock for the pte/pmd being added or removed.
  *
- * mlock is usually called at the end of page_add_*_rmap(),
- * munlock at the end of page_remove_rmap(); but new anon
- * pages are managed by lru_cache_add_inactive_or_unevictable()
- * calling mlock_new_page().
+ * mlock is usually called at the end of page_add_*_rmap(), munlock at
+ * the end of page_remove_rmap(); but new anon folios are managed by
+ * folio_add_lru_vma() calling mlock_new_folio().
  *
  * @compound is used to include pmd mappings of THPs, but filter out
  * pte mappings of THPs, which cannot be consistently counted: a pte
@@ -547,15 +546,22 @@  static inline void mlock_vma_page(struct page *page,
 	mlock_vma_folio(page_folio(page), vma, compound);
 }
 
-void munlock_page(struct page *page);
-static inline void munlock_vma_page(struct page *page,
+void munlock_folio(struct folio *folio);
+
+static inline void munlock_vma_folio(struct folio *folio,
 			struct vm_area_struct *vma, bool compound)
 {
 	if (unlikely(vma->vm_flags & VM_LOCKED) &&
-	    (compound || !PageTransCompound(page)))
-		munlock_page(page);
+	    (compound || !folio_test_large(folio)))
+		munlock_folio(folio);
+}
+
+static inline void munlock_vma_page(struct page *page,
+			struct vm_area_struct *vma, bool compound)
+{
+	munlock_vma_folio(page_folio(page), vma, compound);
 }
-void mlock_new_page(struct page *page);
+void mlock_new_folio(struct folio *folio);
 bool need_mlock_page_drain(int cpu);
 void mlock_page_drain_local(void);
 void mlock_page_drain_remote(int cpu);
@@ -647,7 +653,7 @@  static inline void mlock_vma_page(struct page *page,
 			struct vm_area_struct *vma, bool compound) { }
 static inline void munlock_vma_page(struct page *page,
 			struct vm_area_struct *vma, bool compound) { }
-static inline void mlock_new_page(struct page *page) { }
+static inline void mlock_new_folio(struct folio *folio) { }
 static inline bool need_mlock_page_drain(int cpu) { return false; }
 static inline void mlock_page_drain_local(void) { }
 static inline void mlock_page_drain_remote(int cpu) { }
diff --git a/mm/mlock.c b/mm/mlock.c
index e9ba47fe67ed..3982ef4d1632 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -262,13 +262,12 @@  void mlock_folio(struct folio *folio)
 }
 
 /**
- * mlock_new_page - mlock a newly allocated page not yet on LRU
- * @page: page to be mlocked, either a normal page or a THP head.
+ * mlock_new_folio - mlock a newly allocated folio not yet on LRU
+ * @folio: folio to be mlocked, either normal or a THP head.
  */
-void mlock_new_page(struct page *page)
+void mlock_new_folio(struct folio *folio)
 {
 	struct folio_batch *fbatch;
-	struct folio *folio = page_folio(page);
 	int nr_pages = folio_nr_pages(folio);
 
 	local_lock(&mlock_fbatch.lock);
@@ -286,13 +285,12 @@  void mlock_new_page(struct page *page)
 }
 
 /**
- * munlock_page - munlock a page
- * @page: page to be munlocked, either a normal page or a THP head.
+ * munlock_folio - munlock a folio
+ * @folio: folio to be munlocked, either normal or a THP head.
  */
-void munlock_page(struct page *page)
+void munlock_folio(struct folio *folio)
 {
 	struct folio_batch *fbatch;
-	struct folio *folio = page_folio(page);
 
 	local_lock(&mlock_fbatch.lock);
 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
@@ -314,7 +312,7 @@  static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
 	struct vm_area_struct *vma = walk->vma;
 	spinlock_t *ptl;
 	pte_t *start_pte, *pte;
-	struct page *page;
+	struct folio *folio;
 
 	ptl = pmd_trans_huge_lock(pmd, vma);
 	if (ptl) {
@@ -322,11 +320,11 @@  static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
 			goto out;
 		if (is_huge_zero_pmd(*pmd))
 			goto out;
-		page = pmd_page(*pmd);
+		folio = page_folio(pmd_page(*pmd));
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_folio(page_folio(page));
+			mlock_folio(folio);
 		else
-			munlock_page(page);
+			munlock_folio(folio);
 		goto out;
 	}
 
@@ -334,15 +332,15 @@  static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
 	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
 		if (!pte_present(*pte))
 			continue;
-		page = vm_normal_page(vma, addr, *pte);
-		if (!page || is_zone_device_page(page))
+		folio = vm_normal_folio(vma, addr, *pte);
+		if (!folio || folio_is_zone_device(folio))
 			continue;
-		if (PageTransCompound(page))
+		if (folio_test_large(folio))
 			continue;
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_folio(page_folio(page));
+			mlock_folio(folio);
 		else
-			munlock_page(page);
+			munlock_folio(folio);
 	}
 	pte_unmap(start_pte);
 out:
diff --git a/mm/swap.c b/mm/swap.c
index e54e2a252e27..7df297b143f9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -562,7 +562,7 @@  void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
 	if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
-		mlock_new_page(&folio->page);
+		mlock_new_folio(folio);
 	else
 		folio_add_lru(folio);
 }