diff mbox series

[v5,24/27] mm/filemap: Convert wait_on_page_bit to wait_on_folio_bit

Message ID 20210320054104.1300774-25-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Memory Folios | expand

Commit Message

Matthew Wilcox March 20, 2021, 5:41 a.m. UTC
We must always wait on the folio, otherwise we won't be woken up.

This commit shrinks the kernel by 691 bytes, mostly due to moving
the page waitqueue lookup into wait_on_folio_bit_common().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/netfs.h   |  2 +-
 include/linux/pagemap.h | 10 ++++----
 mm/filemap.c            | 56 ++++++++++++++++++-----------------------
 mm/page-writeback.c     |  4 +--
 4 files changed, 33 insertions(+), 39 deletions(-)

Comments

kernel test robot March 21, 2021, 7:10 a.m. UTC | #1
Hi "Matthew,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on next-20210319]
[cannot apply to linux/master linus/master hnaz-linux-mm/master v5.12-rc3 v5.12-rc2 v5.12-rc1 v5.12-rc3]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Matthew-Wilcox-Oracle/Memory-Folios/20210320-134732
base:    f00397ee41c79b6155b9b44abd0055b2c0621349
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/39199d654ac6a6bbaba1620337574ec74adee8fe
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Matthew-Wilcox-Oracle/Memory-Folios/20210320-134732
        git checkout 39199d654ac6a6bbaba1620337574ec74adee8fe
        # save the attached .config to linux build tree
        make W=1 ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   fs/afs/write.c: In function 'afs_page_mkwrite':
>> fs/afs/write.c:849:6: error: implicit declaration of function 'wait_on_page_bit_killable'; did you mean 'wait_on_folio_bit_killable'? [-Werror=implicit-function-declaration]
     849 |      wait_on_page_bit_killable(page, PG_fscache) < 0)
         |      ^~~~~~~~~~~~~~~~~~~~~~~~~
         |      wait_on_folio_bit_killable
   cc1: some warnings being treated as errors


vim +849 fs/afs/write.c

9b3f26c9110dce David Howells           2009-04-03  827  
9b3f26c9110dce David Howells           2009-04-03  828  /*
9b3f26c9110dce David Howells           2009-04-03  829   * notification that a previously read-only page is about to become writable
9b3f26c9110dce David Howells           2009-04-03  830   * - if it returns an error, the caller will deliver a bus error signal
9b3f26c9110dce David Howells           2009-04-03  831   */
0722f186205976 Souptick Joarder        2018-08-23  832  vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
9b3f26c9110dce David Howells           2009-04-03  833  {
721597fd1aa668 David Howells           2020-10-20  834  	struct page *page = thp_head(vmf->page);
1cf7a1518aefa6 David Howells           2017-11-02  835  	struct file *file = vmf->vma->vm_file;
1cf7a1518aefa6 David Howells           2017-11-02  836  	struct inode *inode = file_inode(file);
1cf7a1518aefa6 David Howells           2017-11-02  837  	struct afs_vnode *vnode = AFS_FS_I(inode);
1cf7a1518aefa6 David Howells           2017-11-02  838  	unsigned long priv;
9b3f26c9110dce David Howells           2009-04-03  839  
721597fd1aa668 David Howells           2020-10-20  840  	_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
9b3f26c9110dce David Howells           2009-04-03  841  
1cf7a1518aefa6 David Howells           2017-11-02  842  	sb_start_pagefault(inode->i_sb);
9b3f26c9110dce David Howells           2009-04-03  843  
1cf7a1518aefa6 David Howells           2017-11-02  844  	/* Wait for the page to be written to the cache before we allow it to
1cf7a1518aefa6 David Howells           2017-11-02  845  	 * be modified.  We then assume the entire page will need writing back.
1cf7a1518aefa6 David Howells           2017-11-02  846  	 */
77837f50249aa4 David Howells           2020-02-06  847  #ifdef CONFIG_AFS_FSCACHE
721597fd1aa668 David Howells           2020-10-20  848  	if (PageFsCache(page) &&
721597fd1aa668 David Howells           2020-10-20 @849  	    wait_on_page_bit_killable(page, PG_fscache) < 0)
77837f50249aa4 David Howells           2020-02-06  850  		return VM_FAULT_RETRY;
77837f50249aa4 David Howells           2020-02-06  851  #endif
9b3f26c9110dce David Howells           2009-04-03  852  
5dc1af598f0274 Matthew Wilcox (Oracle  2021-03-20  853) 	if (wait_on_folio_writeback_killable(page_folio(page)))
1cf7a1518aefa6 David Howells           2017-11-02  854  		return VM_FAULT_RETRY;
1cf7a1518aefa6 David Howells           2017-11-02  855  
721597fd1aa668 David Howells           2020-10-20  856  	if (lock_page_killable(page) < 0)
1cf7a1518aefa6 David Howells           2017-11-02  857  		return VM_FAULT_RETRY;
1cf7a1518aefa6 David Howells           2017-11-02  858  
1cf7a1518aefa6 David Howells           2017-11-02  859  	/* We mustn't change page->private until writeback is complete as that
1cf7a1518aefa6 David Howells           2017-11-02  860  	 * details the portion of the page we need to write back and we might
1cf7a1518aefa6 David Howells           2017-11-02  861  	 * need to redirty the page if there's a problem.
1cf7a1518aefa6 David Howells           2017-11-02  862  	 */
721597fd1aa668 David Howells           2020-10-20  863  	wait_on_page_writeback(page);
1cf7a1518aefa6 David Howells           2017-11-02  864  
721597fd1aa668 David Howells           2020-10-20  865  	priv = afs_page_dirty(page, 0, thp_size(page));
f86726a69dec5d David Howells           2020-10-22  866  	priv = afs_page_dirty_mmapped(priv);
721597fd1aa668 David Howells           2020-10-20  867  	if (PagePrivate(page)) {
721597fd1aa668 David Howells           2020-10-20  868  		set_page_private(page, priv);
721597fd1aa668 David Howells           2020-10-20  869  		trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page);
721597fd1aa668 David Howells           2020-10-20  870  	} else {
721597fd1aa668 David Howells           2020-10-20  871  		attach_page_private(page, (void *)priv);
721597fd1aa668 David Howells           2020-10-20  872  		trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page);
721597fd1aa668 David Howells           2020-10-20  873  	}
bb413489288e4e David Howells           2020-06-12  874  	file_update_time(file);
1cf7a1518aefa6 David Howells           2017-11-02  875  
1cf7a1518aefa6 David Howells           2017-11-02  876  	sb_end_pagefault(inode->i_sb);
1cf7a1518aefa6 David Howells           2017-11-02  877  	return VM_FAULT_LOCKED;
9b3f26c9110dce David Howells           2009-04-03  878  }
4343d00872e1de David Howells           2017-11-02  879  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 9d3fbed4e30a..f44142dca767 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -54,7 +54,7 @@  static inline void unlock_page_fscache(struct page *page)
 static inline void wait_on_page_fscache(struct page *page)
 {
 	if (PageFsCache(page))
-		wait_on_page_bit(compound_head(page), PG_fscache);
+		wait_on_folio_bit(page_folio(page), PG_fscache);
 }
 
 enum netfs_read_source {
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c92782b77d98..7ddaabbd1ddb 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -770,11 +770,11 @@  static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 }
 
 /*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for wait_on_folio_locked/wait_on_folio_writeback, etc.,
  * and should not be used directly.
  */
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+extern void wait_on_folio_bit(struct folio *folio, int bit_nr);
+extern int wait_on_folio_bit_killable(struct folio *folio, int bit_nr);
 
 /* 
  * Wait for a folio to be unlocked.
@@ -786,14 +786,14 @@  extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 static inline void wait_on_folio_locked(struct folio *folio)
 {
 	if (FolioLocked(folio))
-		wait_on_page_bit(&folio->page, PG_locked);
+		wait_on_folio_bit(folio, PG_locked);
 }
 
 static inline int wait_on_folio_locked_killable(struct folio *folio)
 {
 	if (!FolioLocked(folio))
 		return 0;
-	return wait_on_page_bit_killable(&folio->page, PG_locked);
+	return wait_on_folio_bit_killable(folio, PG_locked);
 }
 
 static inline void wait_on_page_locked(struct page *page)
diff --git a/mm/filemap.c b/mm/filemap.c
index dc7deb8c36ee..f8746c149562 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1102,7 +1102,7 @@  static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
 	 *
 	 * So update the flags atomically, and wake up the waiter
 	 * afterwards to avoid any races. This store-release pairs
-	 * with the load-acquire in wait_on_page_bit_common().
+	 * with the load-acquire in wait_on_folio_bit_common().
 	 */
 	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
 	wake_up_state(wait->private, mode);
@@ -1183,7 +1183,7 @@  static void wake_up_folio(struct folio *folio, int bit)
 }
 
 /*
- * A choice of three behaviors for wait_on_page_bit_common():
+ * A choice of three behaviors for wait_on_folio_bit_common():
  */
 enum behavior {
 	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
@@ -1217,9 +1217,10 @@  static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
 /* How many times do we accept lock stealing from under a waiter? */
 int sysctl_page_lock_unfairness = 5;
 
-static inline int wait_on_page_bit_common(wait_queue_head_t *q,
-	struct page *page, int bit_nr, int state, enum behavior behavior)
+static inline int wait_on_folio_bit_common(struct folio *folio, int bit_nr,
+		int state, enum behavior behavior)
 {
+	wait_queue_head_t *q = page_waitqueue(&folio->page);
 	int unfairness = sysctl_page_lock_unfairness;
 	struct wait_page_queue wait_page;
 	wait_queue_entry_t *wait = &wait_page.wait;
@@ -1228,8 +1229,8 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	unsigned long pflags;
 
 	if (bit_nr == PG_locked &&
-	    !PageUptodate(page) && PageWorkingset(page)) {
-		if (!PageSwapBacked(page)) {
+	    !FolioUptodate(folio) && FolioWorkingset(folio)) {
+		if (!FolioSwapBacked(folio)) {
 			delayacct_thrashing_start();
 			delayacct = true;
 		}
@@ -1239,7 +1240,7 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 
 	init_wait(wait);
 	wait->func = wake_page_function;
-	wait_page.page = page;
+	wait_page.page = &folio->page;
 	wait_page.bit_nr = bit_nr;
 
 repeat:
@@ -1254,7 +1255,7 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	 * Do one last check whether we can get the
 	 * page bit synchronously.
 	 *
-	 * Do the SetPageWaiters() marking before that
+	 * Do the SetFolioWaiters() marking before that
 	 * to let any waker we _just_ missed know they
 	 * need to wake us up (otherwise they'll never
 	 * even go to the slow case that looks at the
@@ -1265,8 +1266,8 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	 * lock to avoid races.
 	 */
 	spin_lock_irq(&q->lock);
-	SetPageWaiters(page);
-	if (!trylock_page_bit_common(page, bit_nr, wait))
+	SetFolioWaiters(folio);
+	if (!trylock_page_bit_common(&folio->page, bit_nr, wait))
 		__add_wait_queue_entry_tail(q, wait);
 	spin_unlock_irq(&q->lock);
 
@@ -1276,10 +1277,10 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	 * see whether the page bit testing has already
 	 * been done by the wake function.
 	 *
-	 * We can drop our reference to the page.
+	 * We can drop our reference to the folio.
 	 */
 	if (behavior == DROP)
-		put_page(page);
+		put_folio(folio);
 
 	/*
 	 * Note that until the "finish_wait()", or until
@@ -1316,7 +1317,7 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 		 *
 		 * And if that fails, we'll have to retry this all.
 		 */
-		if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
+		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
 			goto repeat;
 
 		wait->flags |= WQ_FLAG_DONE;
@@ -1325,7 +1326,7 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 
 	/*
 	 * If a signal happened, this 'finish_wait()' may remove the last
-	 * waiter from the wait-queues, but the PageWaiters bit will remain
+	 * waiter from the wait-queues, but the FolioWaiters bit will remain
 	 * set. That's ok. The next wakeup will take care of it, and trying
 	 * to do it here would be difficult and prone to races.
 	 */
@@ -1356,19 +1357,17 @@  static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
 }
 
-void wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_folio_bit(struct folio *folio, int bit_nr)
 {
-	wait_queue_head_t *q = page_waitqueue(page);
-	wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
+	wait_on_folio_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
 }
-EXPORT_SYMBOL(wait_on_page_bit);
+EXPORT_SYMBOL(wait_on_folio_bit);
 
-int wait_on_page_bit_killable(struct page *page, int bit_nr)
+int wait_on_folio_bit_killable(struct folio *folio, int bit_nr)
 {
-	wait_queue_head_t *q = page_waitqueue(page);
-	return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
+	return wait_on_folio_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
 }
-EXPORT_SYMBOL(wait_on_page_bit_killable);
+EXPORT_SYMBOL(wait_on_folio_bit_killable);
 
 /**
  * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
@@ -1385,11 +1384,8 @@  EXPORT_SYMBOL(wait_on_page_bit_killable);
  */
 int put_and_wait_on_page_locked(struct page *page, int state)
 {
-	wait_queue_head_t *q;
-
-	page = compound_head(page);
-	q = page_waitqueue(page);
-	return wait_on_page_bit_common(q, page, PG_locked, state, DROP);
+	return wait_on_folio_bit_common(page_folio(page), PG_locked, state,
+			DROP);
 }
 
 /**
@@ -1540,16 +1536,14 @@  EXPORT_SYMBOL_GPL(page_endio);
  */
 void __lock_folio(struct folio *folio)
 {
-	wait_queue_head_t *q = page_waitqueue(&folio->page);
-	wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
+	wait_on_folio_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
 				EXCLUSIVE);
 }
 EXPORT_SYMBOL(__lock_folio);
 
 int __lock_folio_killable(struct folio *folio)
 {
-	wait_queue_head_t *q = page_waitqueue(&folio->page);
-	return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
+	return wait_on_folio_bit_common(folio, PG_locked, TASK_KILLABLE,
 					EXCLUSIVE);
 }
 EXPORT_SYMBOL_GPL(__lock_folio_killable);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c222f88cf06b..b29737cd8049 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2832,7 +2832,7 @@  void wait_on_folio_writeback(struct folio *folio)
 {
 	while (FolioWriteback(folio)) {
 		trace_wait_on_page_writeback(&folio->page, folio_mapping(folio));
-		wait_on_page_bit(&folio->page, PG_writeback);
+		wait_on_folio_bit(folio, PG_writeback);
 	}
 }
 EXPORT_SYMBOL_GPL(wait_on_folio_writeback);
@@ -2853,7 +2853,7 @@  int wait_on_folio_writeback_killable(struct folio *folio)
 {
 	while (FolioWriteback(folio)) {
 		trace_wait_on_page_writeback(&folio->page, folio_mapping(folio));
-		if (wait_on_page_bit_killable(&folio->page, PG_writeback))
+		if (wait_on_folio_bit_killable(folio, PG_writeback))
 			return -EINTR;
 	}