diff mbox series

[mm-unstable,v1,4/4] mm: clean up mem_cgroup_iter()

Message ID 20240724190214.1108049-5-kinseyho@google.com (mailing list archive)
State New
Headers show
Series Improve mem_cgroup_iter() | expand

Commit Message

Kinsey Ho July 24, 2024, 7:02 p.m. UTC
A clean up to make variable names more clear and to improve code
readability.

No functional change.

Signed-off-by: Kinsey Ho <kinseyho@google.com>
---
 mm/memcontrol.c | 36 ++++++++++++++----------------------
 1 file changed, 14 insertions(+), 22 deletions(-)

Comments

kernel test robot July 25, 2024, 6:15 p.m. UTC | #1
Hi Kinsey,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on linus/master v6.10 next-20240725]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Kinsey-Ho/mm-don-t-hold-css-refcnt-during-traversal/20240725-030750
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20240724190214.1108049-5-kinseyho%40google.com
patch subject: [PATCH mm-unstable v1 4/4] mm: clean up mem_cgroup_iter()
config: x86_64-randconfig-121-20240725 (https://download.01.org/0day-ci/archive/20240726/202407260248.CBU1JMb1-lkp@intel.com/config)
compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240726/202407260248.CBU1JMb1-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202407260248.CBU1JMb1-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
   mm/memcontrol.c:1049:21: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected struct mem_cgroup [noderef] __rcu *__old @@     got struct mem_cgroup *[assigned] pos @@
   mm/memcontrol.c:1049:21: sparse:     expected struct mem_cgroup [noderef] __rcu *__old
   mm/memcontrol.c:1049:21: sparse:     got struct mem_cgroup *[assigned] pos
>> mm/memcontrol.c:1049:21: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected struct mem_cgroup [noderef] __rcu *__new @@     got struct mem_cgroup *[assigned] next @@
   mm/memcontrol.c:1049:21: sparse:     expected struct mem_cgroup [noderef] __rcu *__new
   mm/memcontrol.c:1049:21: sparse:     got struct mem_cgroup *[assigned] next
   mm/memcontrol.c:1049:57: sparse: sparse: incompatible types in comparison expression (different address spaces):
   mm/memcontrol.c:1049:57: sparse:    struct mem_cgroup [noderef] __rcu *
   mm/memcontrol.c:1049:57: sparse:    struct mem_cgroup *
   mm/memcontrol.c:1101:17: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected struct mem_cgroup [noderef] __rcu *__old @@     got struct mem_cgroup *dead_memcg @@
   mm/memcontrol.c:1101:17: sparse:     expected struct mem_cgroup [noderef] __rcu *__old
   mm/memcontrol.c:1101:17: sparse:     got struct mem_cgroup *dead_memcg
   mm/memcontrol.c: note: in included file (through include/linux/cgroup-defs.h):
   include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true
   mm/memcontrol.c: note: in included file:
   include/linux/memcontrol.h:747:9: sparse: sparse: context imbalance in 'folio_lruvec_lock' - wrong count at exit
   include/linux/memcontrol.h:747:9: sparse: sparse: context imbalance in 'folio_lruvec_lock_irq' - wrong count at exit
   include/linux/memcontrol.h:747:9: sparse: sparse: context imbalance in 'folio_lruvec_lock_irqsave' - wrong count at exit
   mm/memcontrol.c: note: in included file (through include/linux/cgroup-defs.h):
   include/linux/list.h:83:21: sparse: sparse: self-comparison always evaluates to true

vim +1049 mm/memcontrol.c

   974	
   975	/**
   976	 * mem_cgroup_iter - iterate over memory cgroup hierarchy
   977	 * @root: hierarchy root
   978	 * @prev: previously returned memcg, NULL on first invocation
   979	 * @reclaim: cookie for shared reclaim walks, NULL for full walks
   980	 *
   981	 * Returns references to children of the hierarchy below @root, or
   982	 * @root itself, or %NULL after a full round-trip.
   983	 *
   984	 * Caller must pass the return value in @prev on subsequent
   985	 * invocations for reference counting, or use mem_cgroup_iter_break()
   986	 * to cancel a hierarchy walk before the round-trip is complete.
   987	 *
   988	 * Reclaimers can specify a node in @reclaim to divide up the memcgs
   989	 * in the hierarchy among all concurrent reclaimers operating on the
   990	 * same node.
   991	 */
   992	struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
   993					   struct mem_cgroup *prev,
   994					   struct mem_cgroup_reclaim_cookie *reclaim)
   995	{
   996		struct mem_cgroup_reclaim_iter *iter;
   997		struct cgroup_subsys_state *css;
   998		struct mem_cgroup *pos;
   999		struct mem_cgroup *next = NULL;
  1000	
  1001		if (mem_cgroup_disabled())
  1002			return NULL;
  1003	
  1004		if (!root)
  1005			root = root_mem_cgroup;
  1006	
  1007		rcu_read_lock();
  1008	restart:
  1009		if (reclaim) {
  1010			int gen;
  1011			int nid = reclaim->pgdat->node_id;
  1012	
  1013			iter = &root->nodeinfo[nid]->iter;
  1014			gen = atomic_read(&iter->generation);
  1015	
  1016			/*
  1017			 * On start, join the current reclaim iteration cycle.
  1018			 * Exit when a concurrent walker completes it.
  1019			 */
  1020			if (!prev)
  1021				reclaim->generation = gen;
  1022			else if (reclaim->generation != gen)
  1023				goto out_unlock;
  1024	
  1025			pos = rcu_dereference(iter->position);
  1026		} else
  1027			pos = prev;
  1028	
  1029		css = pos ? &pos->css : NULL;
  1030	
  1031		while ((css = css_next_descendant_pre(css, &root->css))) {
  1032			/*
  1033			 * Verify the css and acquire a reference.  The root
  1034			 * is provided by the caller, so we know it's alive
  1035			 * and kicking, and don't take an extra reference.
  1036			 */
  1037			if (css == &root->css || css_tryget(css))
  1038				break;
  1039		}
  1040	
  1041		next = mem_cgroup_from_css(css);
  1042	
  1043		if (reclaim) {
  1044			/*
  1045			 * The position could have already been updated by a competing
  1046			 * thread, so check that the value hasn't changed since we read
  1047			 * it to avoid reclaiming from the same cgroup twice.
  1048			 */
> 1049			if (cmpxchg(&iter->position, pos, next) != pos) {
  1050				if (css && css != &root->css)
  1051					css_put(css);
  1052				goto restart;
  1053			}
  1054	
  1055			if (!next) {
  1056				atomic_inc(&iter->generation);
  1057	
  1058				/*
  1059				 * Reclaimers share the hierarchy walk, and a
  1060				 * new one might jump in right at the end of
  1061				 * the hierarchy - make sure they see at least
  1062				 * one group and restart from the beginning.
  1063				 */
  1064				if (!prev)
  1065					goto restart;
  1066			}
  1067		}
  1068	
  1069	out_unlock:
  1070		rcu_read_unlock();
  1071		if (prev && prev != root)
  1072			css_put(&prev->css);
  1073	
  1074		return next;
  1075	}
  1076
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4314a2b8848d..7e3e95c62122 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -992,9 +992,9 @@  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 				   struct mem_cgroup_reclaim_cookie *reclaim)
 {
 	struct mem_cgroup_reclaim_iter *iter;
-	struct cgroup_subsys_state *css = NULL;
-	struct mem_cgroup *memcg = NULL;
-	struct mem_cgroup *pos = NULL;
+	struct cgroup_subsys_state *css;
+	struct mem_cgroup *pos;
+	struct mem_cgroup *next = NULL;
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -1006,10 +1006,9 @@  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 restart:
 	if (reclaim) {
 		int gen;
-		struct mem_cgroup_per_node *mz;
+		int nid = reclaim->pgdat->node_id;
 
-		mz = root->nodeinfo[reclaim->pgdat->node_id];
-		iter = &mz->iter;
+		iter = &root->nodeinfo[nid]->iter;
 		gen = atomic_read(&iter->generation);
 
 		/*
@@ -1022,43 +1021,36 @@  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 			goto out_unlock;
 
 		pos = rcu_dereference(iter->position);
-	} else if (prev) {
+	} else
 		pos = prev;
-	}
 
-	if (pos)
-		css = &pos->css;
-
-	for (;;) {
-		css = css_next_descendant_pre(css, &root->css);
-		if (!css) {
-			break;
-		}
+	css = pos ? &pos->css : NULL;
 
+	while ((css = css_next_descendant_pre(css, &root->css))) {
 		/*
 		 * Verify the css and acquire a reference.  The root
 		 * is provided by the caller, so we know it's alive
 		 * and kicking, and don't take an extra reference.
 		 */
-		if (css == &root->css || css_tryget(css)) {
-			memcg = mem_cgroup_from_css(css);
+		if (css == &root->css || css_tryget(css))
 			break;
-		}
 	}
 
+	next = mem_cgroup_from_css(css);
+
 	if (reclaim) {
 		/*
 		 * The position could have already been updated by a competing
 		 * thread, so check that the value hasn't changed since we read
 		 * it to avoid reclaiming from the same cgroup twice.
 		 */
-		if (cmpxchg(&iter->position, pos, memcg) != pos) {
+		if (cmpxchg(&iter->position, pos, next) != pos) {
 			if (css && css != &root->css)
 				css_put(css);
 			goto restart;
 		}
 
-		if (!memcg) {
+		if (!next) {
 			atomic_inc(&iter->generation);
 
 			/*
@@ -1077,7 +1069,7 @@  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 	if (prev && prev != root)
 		css_put(&prev->css);
 
-	return memcg;
+	return next;
 }
 
 /**