diff mbox series

[7/7] mm/list_lru: Simplify the list_lru walk callback function

Message ID 20240624175313.47329-8-ryncsn@gmail.com (mailing list archive)
State New
Headers show
Series Split list_lru lock into per-cgroup scope | expand

Commit Message

Kairui Song June 24, 2024, 5:53 p.m. UTC
From: Kairui Song <kasong@tencent.com>

Now isolation no longer takes the list_lru global node lock, only use the
per-cgroup lock instead. And this lock is inside the list_lru_one being
walked, no longer needed to pass the lock explicitly.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 drivers/android/binder_alloc.c |  5 ++---
 drivers/android/binder_alloc.h |  2 +-
 fs/dcache.c                    |  4 ++--
 fs/gfs2/quota.c                |  2 +-
 fs/inode.c                     |  4 ++--
 fs/nfs/nfs42xattr.c            |  4 ++--
 fs/nfsd/filecache.c            |  5 +----
 fs/xfs/xfs_buf.c               |  2 --
 fs/xfs/xfs_qm.c                |  5 ++---
 include/linux/list_lru.h       |  2 +-
 mm/list_lru.c                  |  2 +-
 mm/workingset.c                | 15 +++++++--------
 mm/zswap.c                     |  4 ++--
 13 files changed, 24 insertions(+), 32 deletions(-)

Comments

kernel test robot June 27, 2024, 7:58 p.m. UTC | #1
Hi Kairui,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on staging/staging-testing staging/staging-next staging/staging-linus xfs-linux/for-next linus/master v6.10-rc5 next-20240626]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Kairui-Song/mm-swap-workingset-make-anon-workingset-nodes-memcg-aware/20240625-231015
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20240624175313.47329-8-ryncsn%40gmail.com
patch subject: [PATCH 7/7] mm/list_lru: Simplify the list_lru walk callback function
config: i386-buildonly-randconfig-001-20240628 (https://download.01.org/0day-ci/archive/20240628/202406280355.Qwjjjiug-lkp@intel.com/config)
compiler: gcc-10 (Ubuntu 10.5.0-1ubuntu1) 10.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240628/202406280355.Qwjjjiug-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202406280355.Qwjjjiug-lkp@intel.com/

All warnings (new ones prefixed by >>):

   drivers/android/binder_alloc.c:1060: warning: Function parameter or struct member 'lru' not described in 'binder_alloc_free_page'
>> drivers/android/binder_alloc.c:1060: warning: Excess function parameter 'lock' description in 'binder_alloc_free_page'


vim +1060 drivers/android/binder_alloc.c

0c972a05cde66e Todd Kjos         2017-06-29  1046  
f2517eb76f1f2f Sherry Yang       2017-08-23  1047  /**
f2517eb76f1f2f Sherry Yang       2017-08-23  1048   * binder_alloc_free_page() - shrinker callback to free pages
f2517eb76f1f2f Sherry Yang       2017-08-23  1049   * @item:   item to free
f2517eb76f1f2f Sherry Yang       2017-08-23  1050   * @lock:   lock protecting the item
f2517eb76f1f2f Sherry Yang       2017-08-23  1051   * @cb_arg: callback argument
f2517eb76f1f2f Sherry Yang       2017-08-23  1052   *
f2517eb76f1f2f Sherry Yang       2017-08-23  1053   * Called from list_lru_walk() in binder_shrink_scan() to free
f2517eb76f1f2f Sherry Yang       2017-08-23  1054   * up pages when the system is under memory pressure.
f2517eb76f1f2f Sherry Yang       2017-08-23  1055   */
f2517eb76f1f2f Sherry Yang       2017-08-23  1056  enum lru_status binder_alloc_free_page(struct list_head *item,
f2517eb76f1f2f Sherry Yang       2017-08-23  1057  				       struct list_lru_one *lru,
f2517eb76f1f2f Sherry Yang       2017-08-23  1058  				       void *cb_arg)
5672291f7d5dc5 Kairui Song       2024-06-25  1059  	__must_hold(&lru->lock)
f2517eb76f1f2f Sherry Yang       2017-08-23 @1060  {
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1061  	struct binder_lru_page *page = container_of(item, typeof(*page), lru);
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1062  	struct binder_alloc *alloc = page->alloc;
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1063  	struct mm_struct *mm = alloc->mm;
a1b2289cef92ef Sherry Yang       2017-10-03  1064  	struct vm_area_struct *vma;
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1065  	struct page *page_to_free;
df9aabead791d7 Carlos Llamas     2023-12-01  1066  	unsigned long page_addr;
f2517eb76f1f2f Sherry Yang       2017-08-23  1067  	size_t index;
f2517eb76f1f2f Sherry Yang       2017-08-23  1068  
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1069  	if (!mmget_not_zero(mm))
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1070  		goto err_mmget;
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1071  	if (!mmap_read_trylock(mm))
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1072  		goto err_mmap_read_lock_failed;
7710e2cca32e7f Carlos Llamas     2023-12-01  1073  	if (!spin_trylock(&alloc->lock))
7710e2cca32e7f Carlos Llamas     2023-12-01  1074  		goto err_get_alloc_lock_failed;
f2517eb76f1f2f Sherry Yang       2017-08-23  1075  	if (!page->page_ptr)
f2517eb76f1f2f Sherry Yang       2017-08-23  1076  		goto err_page_already_freed;
f2517eb76f1f2f Sherry Yang       2017-08-23  1077  
f2517eb76f1f2f Sherry Yang       2017-08-23  1078  	index = page - alloc->pages;
df9aabead791d7 Carlos Llamas     2023-12-01  1079  	page_addr = alloc->buffer + index * PAGE_SIZE;
5cec2d2e5839f9 Todd Kjos         2019-03-01  1080  
3f489c2067c582 Carlos Llamas     2023-12-01  1081  	vma = vma_lookup(mm, page_addr);
3f489c2067c582 Carlos Llamas     2023-12-01  1082  	if (vma && vma != binder_alloc_get_vma(alloc))
3f489c2067c582 Carlos Llamas     2023-12-01  1083  		goto err_invalid_vma;
a1b2289cef92ef Sherry Yang       2017-10-03  1084  
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1085  	trace_binder_unmap_kernel_start(alloc, index);
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1086  
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1087  	page_to_free = page->page_ptr;
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1088  	page->page_ptr = NULL;
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1089  
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1090  	trace_binder_unmap_kernel_end(alloc, index);
a1b2289cef92ef Sherry Yang       2017-10-03  1091  
a1b2289cef92ef Sherry Yang       2017-10-03  1092  	list_lru_isolate(lru, item);
7710e2cca32e7f Carlos Llamas     2023-12-01  1093  	spin_unlock(&alloc->lock);
5672291f7d5dc5 Kairui Song       2024-06-25  1094  	spin_unlock(&lru->lock);
f2517eb76f1f2f Sherry Yang       2017-08-23  1095  
a1b2289cef92ef Sherry Yang       2017-10-03  1096  	if (vma) {
e41e164c3cdff6 Sherry Yang       2017-08-23  1097  		trace_binder_unmap_user_start(alloc, index);
e41e164c3cdff6 Sherry Yang       2017-08-23  1098  
e9adcfecf572fc Mike Kravetz      2023-01-03  1099  		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
f2517eb76f1f2f Sherry Yang       2017-08-23  1100  
e41e164c3cdff6 Sherry Yang       2017-08-23  1101  		trace_binder_unmap_user_end(alloc, index);
f2517eb76f1f2f Sherry Yang       2017-08-23  1102  	}
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1103  
d8ed45c5dcd455 Michel Lespinasse 2020-06-08  1104  	mmap_read_unlock(mm);
f867c771f98891 Tetsuo Handa      2020-07-17  1105  	mmput_async(mm);
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1106  	__free_page(page_to_free);
e41e164c3cdff6 Sherry Yang       2017-08-23  1107  
a1b2289cef92ef Sherry Yang       2017-10-03  1108  	return LRU_REMOVED_RETRY;
f2517eb76f1f2f Sherry Yang       2017-08-23  1109  
3f489c2067c582 Carlos Llamas     2023-12-01  1110  err_invalid_vma:
e50f4e6cc9bfac Carlos Llamas     2023-12-01  1111  err_page_already_freed:
7710e2cca32e7f Carlos Llamas     2023-12-01  1112  	spin_unlock(&alloc->lock);
7710e2cca32e7f Carlos Llamas     2023-12-01  1113  err_get_alloc_lock_failed:
3f489c2067c582 Carlos Llamas     2023-12-01  1114  	mmap_read_unlock(mm);
3e4e28c5a8f01e Michel Lespinasse 2020-06-08  1115  err_mmap_read_lock_failed:
a1b2289cef92ef Sherry Yang       2017-10-03  1116  	mmput_async(mm);
a0c2baaf81bd53 Sherry Yang       2017-10-20  1117  err_mmget:
f2517eb76f1f2f Sherry Yang       2017-08-23  1118  	return LRU_SKIP;
f2517eb76f1f2f Sherry Yang       2017-08-23  1119  }
f2517eb76f1f2f Sherry Yang       2017-08-23  1120
diff mbox series

Patch

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index dd47d621e561..c55cce54f20c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1055,9 +1055,8 @@  void binder_alloc_vma_close(struct binder_alloc *alloc)
  */
 enum lru_status binder_alloc_free_page(struct list_head *item,
 				       struct list_lru_one *lru,
-				       spinlock_t *lock,
 				       void *cb_arg)
-	__must_hold(lock)
+	__must_hold(&lru->lock)
 {
 	struct binder_lru_page *page = container_of(item, typeof(*page), lru);
 	struct binder_alloc *alloc = page->alloc;
@@ -1092,7 +1091,7 @@  enum lru_status binder_alloc_free_page(struct list_head *item,
 
 	list_lru_isolate(lru, item);
 	spin_unlock(&alloc->lock);
-	spin_unlock(lock);
+	spin_unlock(&lru->lock);
 
 	if (vma) {
 		trace_binder_unmap_user_start(alloc, index);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 70387234477e..c02c8ebcb466 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -118,7 +118,7 @@  static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
 #endif
 enum lru_status binder_alloc_free_page(struct list_head *item,
 				       struct list_lru_one *lru,
-				       spinlock_t *lock, void *cb_arg);
+				       void *cb_arg);
 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 					   size_t data_size,
 					   size_t offsets_size,
diff --git a/fs/dcache.c b/fs/dcache.c
index 407095188f83..4e5f8382ee3f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1077,7 +1077,7 @@  void shrink_dentry_list(struct list_head *list)
 }
 
 static enum lru_status dentry_lru_isolate(struct list_head *item,
-		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+		struct list_lru_one *lru, void *arg)
 {
 	struct list_head *freeable = arg;
 	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
@@ -1158,7 +1158,7 @@  long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
 }
 
 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
-		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+		struct list_lru_one *lru, void *arg)
 {
 	struct list_head *freeable = arg;
 	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index aa9cf0102848..31aece125a75 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -152,7 +152,7 @@  static void gfs2_qd_list_dispose(struct list_head *list)
 
 
 static enum lru_status gfs2_qd_isolate(struct list_head *item,
-		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+		struct list_lru_one *lru, void *arg)
 {
 	struct list_head *dispose = arg;
 	struct gfs2_quota_data *qd =
diff --git a/fs/inode.c b/fs/inode.c
index 35da4e54e365..1fb52253a843 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -803,7 +803,7 @@  void invalidate_inodes(struct super_block *sb)
  * with this flag set because they are the inodes that are out of order.
  */
 static enum lru_status inode_lru_isolate(struct list_head *item,
-		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+		struct list_lru_one *lru, void *arg)
 {
 	struct list_head *freeable = arg;
 	struct inode	*inode = container_of(item, struct inode, i_lru);
@@ -845,7 +845,7 @@  static enum lru_status inode_lru_isolate(struct list_head *item,
 	if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
-		spin_unlock(lru_lock);
+		spin_unlock(&lru->lock);
 		if (remove_inode_buffers(inode)) {
 			unsigned long reap;
 			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
index b6e3d8f77b91..37d79400e5f4 100644
--- a/fs/nfs/nfs42xattr.c
+++ b/fs/nfs/nfs42xattr.c
@@ -802,7 +802,7 @@  static struct shrinker *nfs4_xattr_large_entry_shrinker;
 
 static enum lru_status
 cache_lru_isolate(struct list_head *item,
-	struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+	struct list_lru_one *lru, void *arg)
 {
 	struct list_head *dispose = arg;
 	struct inode *inode;
@@ -867,7 +867,7 @@  nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
 
 static enum lru_status
 entry_lru_isolate(struct list_head *item,
-	struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+	struct list_lru_one *lru, void *arg)
 {
 	struct list_head *dispose = arg;
 	struct nfs4_xattr_bucket *bucket;
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index ad9083ca144b..f68c4a1c529f 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -456,7 +456,6 @@  void nfsd_file_net_dispose(struct nfsd_net *nn)
  * nfsd_file_lru_cb - Examine an entry on the LRU list
  * @item: LRU entry to examine
  * @lru: controlling LRU
- * @lock: LRU list lock (unused)
  * @arg: dispose list
  *
  * Return values:
@@ -466,9 +465,7 @@  void nfsd_file_net_dispose(struct nfsd_net *nn)
  */
 static enum lru_status
 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
-		 spinlock_t *lock, void *arg)
-	__releases(lock)
-	__acquires(lock)
+		 void *arg)
 {
 	struct list_head *head = arg;
 	struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index aa4dbda7b536..43b914c1f621 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1857,7 +1857,6 @@  static enum lru_status
 xfs_buftarg_drain_rele(
 	struct list_head	*item,
 	struct list_lru_one	*lru,
-	spinlock_t		*lru_lock,
 	void			*arg)
 
 {
@@ -1956,7 +1955,6 @@  static enum lru_status
 xfs_buftarg_isolate(
 	struct list_head	*item,
 	struct list_lru_one	*lru,
-	spinlock_t		*lru_lock,
 	void			*arg)
 {
 	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 8d17099765ae..f1b6e73c0e68 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -412,9 +412,8 @@  static enum lru_status
 xfs_qm_dquot_isolate(
 	struct list_head	*item,
 	struct list_lru_one	*lru,
-	spinlock_t		*lru_lock,
 	void			*arg)
-		__releases(lru_lock) __acquires(lru_lock)
+		__releases(&lru->lock) __acquires(&lru->lock)
 {
 	struct xfs_dquot	*dqp = container_of(item,
 						struct xfs_dquot, q_lru);
@@ -460,7 +459,7 @@  xfs_qm_dquot_isolate(
 		trace_xfs_dqreclaim_dirty(dqp);
 
 		/* we have to drop the LRU lock to flush the dquot */
-		spin_unlock(lru_lock);
+		spin_unlock(&lru->lock);
 
 		error = xfs_qm_dqflush(dqp, &bp);
 		if (error)
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index b84483ef93a7..df6b9374ca68 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -184,7 +184,7 @@  void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
 			   struct list_head *head);
 
 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
-		struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+		struct list_lru_one *list, void *cb_arg);
 
 /**
  * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
diff --git a/mm/list_lru.c b/mm/list_lru.c
index c503921cbb13..d8d653317c2c 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -279,7 +279,7 @@  __list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 			break;
 		--*nr_to_walk;
 
-		ret = isolate(item, l, &l->lock, cb_arg);
+		ret = isolate(item, l, cb_arg);
 		switch (ret) {
 		/*
 		 * LRU_RETRY and LRU_REMOVED_RETRY will drop the lru lock,
diff --git a/mm/workingset.c b/mm/workingset.c
index 947423c3e719..e3552e7318a5 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -704,8 +704,7 @@  static unsigned long count_shadow_nodes(struct shrinker *shrinker,
 
 static enum lru_status shadow_lru_isolate(struct list_head *item,
 					  struct list_lru_one *lru,
-					  spinlock_t *lru_lock,
-					  void *arg) __must_hold(lru_lock)
+					  void *arg) __must_hold(lru->lock)
 {
 	struct xa_node *node = container_of(item, struct xa_node, private_list);
 	struct address_space *mapping;
@@ -714,20 +713,20 @@  static enum lru_status shadow_lru_isolate(struct list_head *item,
 	/*
 	 * Page cache insertions and deletions synchronously maintain
 	 * the shadow node LRU under the i_pages lock and the
-	 * lru_lock.  Because the page cache tree is emptied before
-	 * the inode can be destroyed, holding the lru_lock pins any
+	 * &lru->lock. Because the page cache tree is emptied before
+	 * the inode can be destroyed, holding the &lru->lock pins any
 	 * address_space that has nodes on the LRU.
 	 *
 	 * We can then safely transition to the i_pages lock to
 	 * pin only the address_space of the particular node we want
-	 * to reclaim, take the node off-LRU, and drop the lru_lock.
+	 * to reclaim, take the node off-LRU, and drop the &lru->lock.
 	 */
 
 	mapping = container_of(node->array, struct address_space, i_pages);
 
 	/* Coming from the list, invert the lock order */
 	if (!xa_trylock(&mapping->i_pages)) {
-		spin_unlock_irq(lru_lock);
+		spin_unlock_irq(&lru->lock);
 		ret = LRU_RETRY;
 		goto out;
 	}
@@ -736,7 +735,7 @@  static enum lru_status shadow_lru_isolate(struct list_head *item,
 	if (mapping->host != NULL) {
 		if (!spin_trylock(&mapping->host->i_lock)) {
 			xa_unlock(&mapping->i_pages);
-			spin_unlock_irq(lru_lock);
+			spin_unlock_irq(&lru->lock);
 			ret = LRU_RETRY;
 			goto out;
 		}
@@ -745,7 +744,7 @@  static enum lru_status shadow_lru_isolate(struct list_head *item,
 	list_lru_isolate(lru, item);
 	__dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
 
-	spin_unlock(lru_lock);
+	spin_unlock(&lru->lock);
 
 	/*
 	 * The nodes should only contain one or more shadow entries,
diff --git a/mm/zswap.c b/mm/zswap.c
index f7a2afaeea53..24e1e0c87172 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1097,7 +1097,7 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 * shrinker functions
 **********************************/
 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
-				       spinlock_t *lock, void *arg)
+				       void *arg)
 {
 	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
 	bool *encountered_page_in_swapcache = (bool *)arg;
@@ -1143,7 +1143,7 @@  static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
 	 * It's safe to drop the lock here because we return either
 	 * LRU_REMOVED_RETRY or LRU_RETRY.
 	 */
-	spin_unlock(lock);
+	spin_unlock(&l->lock);
 
 	writeback_result = zswap_writeback_entry(entry, swpentry);