diff mbox series

[1/3] mm, memcg: Don't put offlined memcg into local stock

Message ID 20211001190938.14050-2-longman@redhat.com (mailing list archive)
State New
Headers show
Series mm, memcg: Miscellaneous cleanups | expand

Commit Message

Waiman Long Oct. 1, 2021, 7:09 p.m. UTC
When freeing a page associated with an offlined memcg, refill_stock()
will put it into local stock delaying its demise until another memcg
comes in to take its place in the stock. To avoid that, we now check
for offlined memcg and go directly in this case to the slowpath for
the uncharge via the repurposed cancel_charge() function.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 mm/memcontrol.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

Comments

kernel test robot Oct. 1, 2021, 9:17 p.m. UTC | #1
Hi Waiman,

I love your patch! Yet something to improve:

[auto build test ERROR on hnaz-mm/master]

url:    https://github.com/0day-ci/linux/commits/Waiman-Long/mm-memcg-Miscellaneous-cleanups/20211002-031125
base:   https://github.com/hnaz/linux-mm master
config: nios2-randconfig-r024-20211001 (attached as .config)
compiler: nios2-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/321484dcb4f16ca7bd626adf390222913d188ecc
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Waiman-Long/mm-memcg-Miscellaneous-cleanups/20211002-031125
        git checkout 321484dcb4f16ca7bd626adf390222913d188ecc
        # save the attached .config to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=nios2 SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from include/asm-generic/percpu.h:5,
                    from ./arch/nios2/include/generated/asm/percpu.h:1,
                    from include/linux/irqflags.h:17,
                    from include/asm-generic/cmpxchg.h:15,
                    from ./arch/nios2/include/generated/asm/cmpxchg.h:1,
                    from include/asm-generic/atomic.h:12,
                    from ./arch/nios2/include/generated/asm/atomic.h:1,
                    from include/linux/atomic.h:7,
                    from include/linux/page_counter.h:5,
                    from mm/memcontrol.c:28:
   mm/memcontrol.c: In function 'refill_stock':
>> mm/memcontrol.c:2225:27: error: 'struct mem_cgroup' has no member named 'kmem_state'
    2225 |         if (unlikely(memcg->kmem_state != KMEM_ONLINE)) {
         |                           ^~
   include/linux/compiler.h:78:45: note: in definition of macro 'unlikely'
      78 | # define unlikely(x)    __builtin_expect(!!(x), 0)
         |                                             ^


vim +2225 mm/memcontrol.c

  2212	
  2213	/*
  2214	 * Cache charges(val) to local per_cpu area.
  2215	 * This will be consumed by consume_stock() function, later.
  2216	 */
  2217	static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  2218	{
  2219		struct memcg_stock_pcp *stock;
  2220		unsigned long flags;
  2221	
  2222		/*
  2223		 * An offlined memcg shouldn't be put into stock.
  2224		 */
> 2225		if (unlikely(memcg->kmem_state != KMEM_ONLINE)) {
  2226			cancel_charge(memcg, nr_pages);
  2227			return;
  2228		}
  2229	
  2230		local_irq_save(flags);
  2231	
  2232		stock = this_cpu_ptr(&memcg_stock);
  2233		if (stock->cached != memcg) { /* reset if necessary */
  2234			drain_stock(stock);
  2235			css_get(&memcg->css);
  2236			stock->cached = memcg;
  2237		}
  2238		stock->nr_pages += nr_pages;
  2239	
  2240		if (stock->nr_pages > MEMCG_CHARGE_BATCH)
  2241			drain_stock(stock);
  2242	
  2243		local_irq_restore(flags);
  2244	}
  2245	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
kernel test robot Oct. 1, 2021, 11:06 p.m. UTC | #2
Hi Waiman,

I love your patch! Yet something to improve:

[auto build test ERROR on hnaz-mm/master]

url:    https://github.com/0day-ci/linux/commits/Waiman-Long/mm-memcg-Miscellaneous-cleanups/20211002-031125
base:   https://github.com/hnaz/linux-mm master
config: x86_64-randconfig-a016-20211001 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project 962e503cc8bc411f7523cc393acae8aae425b1c4)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/321484dcb4f16ca7bd626adf390222913d188ecc
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Waiman-Long/mm-memcg-Miscellaneous-cleanups/20211002-031125
        git checkout 321484dcb4f16ca7bd626adf390222913d188ecc
        # save the attached .config to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> mm/memcontrol.c:2225:22: error: no member named 'kmem_state' in 'struct mem_cgroup'
           if (unlikely(memcg->kmem_state != KMEM_ONLINE)) {
                        ~~~~~  ^
   include/linux/compiler.h:78:42: note: expanded from macro 'unlikely'
   # define unlikely(x)    __builtin_expect(!!(x), 0)
                                               ^
   1 error generated.


vim +2225 mm/memcontrol.c

  2212	
  2213	/*
  2214	 * Cache charges(val) to local per_cpu area.
  2215	 * This will be consumed by consume_stock() function, later.
  2216	 */
  2217	static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
  2218	{
  2219		struct memcg_stock_pcp *stock;
  2220		unsigned long flags;
  2221	
  2222		/*
  2223		 * An offlined memcg shouldn't be put into stock.
  2224		 */
> 2225		if (unlikely(memcg->kmem_state != KMEM_ONLINE)) {
  2226			cancel_charge(memcg, nr_pages);
  2227			return;
  2228		}
  2229	
  2230		local_irq_save(flags);
  2231	
  2232		stock = this_cpu_ptr(&memcg_stock);
  2233		if (stock->cached != memcg) { /* reset if necessary */
  2234			drain_stock(stock);
  2235			css_get(&memcg->css);
  2236			stock->cached = memcg;
  2237		}
  2238		stock->nr_pages += nr_pages;
  2239	
  2240		if (stock->nr_pages > MEMCG_CHARGE_BATCH)
  2241			drain_stock(stock);
  2242	
  2243		local_irq_restore(flags);
  2244	}
  2245	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Roman Gushchin Oct. 1, 2021, 11:51 p.m. UTC | #3
On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
> When freeing a page associated with an offlined memcg, refill_stock()
> will put it into local stock delaying its demise until another memcg
> comes in to take its place in the stock. To avoid that, we now check
> for offlined memcg and go directly in this case to the slowpath for
> the uncharge via the repurposed cancel_charge() function.

Hi Waiman!

I'm afraid it can make a cleanup of a dying cgroup slower: for every
released page we'll potentially traverse the whole cgroup tree and
decrease atomic page counters.

I'm not sure I understand the benefits we get from this change which
do justify the slowdown on the cleanup path.

Thanks!

> 
> Signed-off-by: Waiman Long <longman@redhat.com>
> ---
>  mm/memcontrol.c | 16 +++++++++++-----
>  1 file changed, 11 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 4b32896d87a2..4568363062c1 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2167,6 +2167,8 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>  	return ret;
>  }
>  
> +static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
> +
>  /*
>   * Returns stocks cached in percpu and reset cached information.
>   */
> @@ -2178,9 +2180,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
>  		return;
>  
>  	if (stock->nr_pages) {
> -		page_counter_uncharge(&old->memory, stock->nr_pages);
> -		if (do_memsw_account())
> -			page_counter_uncharge(&old->memsw, stock->nr_pages);
> +		cancel_charge(old, stock->nr_pages);
>  		stock->nr_pages = 0;
>  	}
>  
> @@ -2219,6 +2219,14 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>  	struct memcg_stock_pcp *stock;
>  	unsigned long flags;
>  
> +	/*
> +	 * An offlined memcg shouldn't be put into stock.
> +	 */
> +	if (unlikely(memcg->kmem_state != KMEM_ONLINE)) {
> +		cancel_charge(memcg, nr_pages);
> +		return;
> +	}
> +
>  	local_irq_save(flags);
>  
>  	stock = this_cpu_ptr(&memcg_stock);
> @@ -2732,7 +2740,6 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>  	return try_charge_memcg(memcg, gfp_mask, nr_pages);
>  }
>  
> -#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
>  static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
>  {
>  	if (mem_cgroup_is_root(memcg))
> @@ -2742,7 +2749,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
>  	if (do_memsw_account())
>  		page_counter_uncharge(&memcg->memsw, nr_pages);
>  }
> -#endif
>  
>  static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
>  {
> -- 
> 2.18.1
>
Waiman Long Oct. 2, 2021, 1:54 a.m. UTC | #4
On 10/1/21 7:51 PM, Roman Gushchin wrote:
> On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
>> When freeing a page associated with an offlined memcg, refill_stock()
>> will put it into local stock delaying its demise until another memcg
>> comes in to take its place in the stock. To avoid that, we now check
>> for offlined memcg and go directly in this case to the slowpath for
>> the uncharge via the repurposed cancel_charge() function.
> Hi Waiman!
>
> I'm afraid it can make a cleanup of a dying cgroup slower: for every
> released page we'll potentially traverse the whole cgroup tree and
> decrease atomic page counters.
>
> I'm not sure I understand the benefits we get from this change which
> do justify the slowdown on the cleanup path.

I am debugging a problem where some dying memcgs somehow stay around for 
a long time leading to gradual increase in memory consumption over time. 
I see the per-cpu stock as one of the places where a reference to a 
dying memcg may be present. Anyway, I agree that it may not help much. I 
am going to drop it if you think it is not a good idea.

Cheers,
Longman
Waiman Long Jan. 31, 2022, 3:55 a.m. UTC | #5
On 10/1/21 19:51, Roman Gushchin wrote:
> On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
>> When freeing a page associated with an offlined memcg, refill_stock()
>> will put it into local stock delaying its demise until another memcg
>> comes in to take its place in the stock. To avoid that, we now check
>> for offlined memcg and go directly in this case to the slowpath for
>> the uncharge via the repurposed cancel_charge() function.
> Hi Waiman!
>
> I'm afraid it can make a cleanup of a dying cgroup slower: for every
> released page we'll potentially traverse the whole cgroup tree and
> decrease atomic page counters.
>
> I'm not sure I understand the benefits we get from this change which
> do justify the slowdown on the cleanup path.
>
> Thanks!

I was notified of a lockdep splat that this patch may help to prevent.

[18073.102101] ======================================================
[18073.102101] WARNING: possible circular locking dependency detected
[18073.102101] 5.14.0-42.el9.x86_64+debug #1 Not tainted
[18073.102101] ------------------------------------------------------
[18073.102101] bz1567074_bin/420270 is trying to acquire lock:
[18073.102101] ffffffff9bdfc478 (css_set_lock){..-.}-{2:2}, at: 
obj_cgroup_release+0x79/0x210
[18073.102101]
[18073.102101] but task is already holding lock:
[18073.102101] ffff88806ba4ef18 (&sighand->siglock){-...}-{2:2}, at: 
force_sig_info_to_task+0x6c/0x370
[18073.102101]
[18073.102101] which lock already depends on the new lock.
[18073.102101]
[18073.102101]
[18073.102101] the existing dependency chain (in reverse order) is:
[18073.102101]
[18073.102101] -> #1 (&sighand->siglock){-...}-{2:2}:
[18073.102101]        __lock_acquire+0xb72/0x1870
[18073.102101]        lock_acquire.part.0+0x117/0x340
[18073.102101]        _raw_spin_lock_irqsave+0x43/0x90
[18073.102101]        __lock_task_sighand+0xa0/0x210
[18073.102101]        cgroup_freeze_task+0x6f/0x150
[18073.102101]        cgroup_migrate_execute+0x25f/0xf90
[18073.102101]        cgroup_update_dfl_csses+0x417/0x4f0
[18073.102101]        cgroup_subtree_control_write+0x67b/0xa10
[18073.102101]        cgroup_file_write+0x1ef/0x6a0
[18073.102101]        kernfs_fop_write_iter+0x2c7/0x460
[18073.102101]        new_sync_write+0x36f/0x610
[18073.102101]        vfs_write+0x5c6/0x890
[18073.102101]        ksys_write+0xf9/0x1d0
[18073.102101]        do_syscall_64+0x3b/0x90
[18073.102101]        entry_SYSCALL_64_after_hwframe+0x44/0xae
[18073.102101]
[18073.102101] -> #0 (css_set_lock){..-.}-{2:2}:
[18073.102101]        check_prev_add+0x15e/0x20f0
[18073.102101]        validate_chain+0xac6/0xde0
[18073.102101]        __lock_acquire+0xb72/0x1870
[18073.102101]        lock_acquire.part.0+0x117/0x340
[18073.102101]        _raw_spin_lock_irqsave+0x43/0x90
[18073.102101]        obj_cgroup_release+0x79/0x210
[18073.102101]        percpu_ref_put_many.constprop.0+0x16b/0x1a0
[18073.102101]        drain_obj_stock+0x1a8/0x310
[18073.102101]        refill_obj_stock+0xa4/0x480
[18073.102101]        obj_cgroup_charge+0x104/0x240
[18073.102101]        kmem_cache_alloc+0x94/0x400
[18073.102101]        __sigqueue_alloc+0x1b9/0x460
[18073.102101]        __send_signal+0x4b2/0xf60
[18073.102101]        force_sig_info_to_task+0x226/0x370
[18073.102101]        force_sig_fault+0xb0/0xf0
[18073.102101]        noist_exc_debug+0xec/0x110
[18073.102101]        asm_exc_debug+0x2b/0x30
[18073.102101]
[18073.102101] other info that might help us debug this:
[18073.102101]
[18073.102101]  Possible unsafe locking scenario:
[18073.102101]
[18073.102101]        CPU0                    CPU1
[18073.102101]        ----                    ----
[18073.102101]   lock(&sighand->siglock);
[18073.102101]                                lock(css_set_lock);
[18073.102101] lock(&sighand->siglock);
[18073.102101]   lock(css_set_lock);
[18073.102101]
[18073.102101]  *** DEADLOCK ***
[18073.102101]
[18073.102101] 2 locks held by bz1567074_bin/420270:
[18073.102101]  #0: ffff88806ba4ef18 (&sighand->siglock){-...}-{2:2}, 
at: force_sig_info_to_task+0x6c/0x370
[18073.102101]  #1: ffffffff9bd0ea00 (rcu_read_lock){....}-{1:2}, at: 
percpu_ref_put_many.constprop.0+0x0/0x1a0
[18073.102101]
[18073.102101] stack backtrace:
[18073.102101] CPU: 0 PID: 420270 Comm: bz1567074_bin Kdump: loaded Not 
tainted 5.14.0-42.el9.x86_64+debug #1
[18073.102101] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
[18073.102101] Call Trace:
[18073.102101]  dump_stack_lvl+0x57/0x7d
[18073.102101]  check_noncircular+0x26a/0x310
[18073.102101]  ? pvclock_clocksource_read+0x2b8/0x520
[18073.102101]  ? print_circular_bug+0x1f0/0x1f0
[18073.102101]  ? alloc_chain_hlocks+0x1de/0x530
[18073.102101]  check_prev_add+0x15e/0x20f0
[18073.102101]  validate_chain+0xac6/0xde0
[18073.102101]  ? check_prev_add+0x20f0/0x20f0
[18073.102101]  __lock_acquire+0xb72/0x1870
[18073.102101]  ? __lock_acquire+0xb72/0x1870
[18073.102101]  lock_acquire.part.0+0x117/0x340
[18073.102101]  ? obj_cgroup_release+0x79/0x210
[18073.102101]  ? rcu_read_unlock+0x40/0x40
[18073.102101]  ? rcu_read_lock_sched_held+0x3f/0x70
[18073.102101]  ? lock_acquire+0x224/0x2d0
[18073.102101]  ? obj_cgroup_release+0x79/0x210
[18073.102101]  _raw_spin_lock_irqsave+0x43/0x90
[18073.102101]  ? obj_cgroup_release+0x79/0x210
[18073.102101]  obj_cgroup_release+0x79/0x210
[18073.102101]  percpu_ref_put_many.constprop.0+0x16b/0x1a0
[18073.102101]  drain_obj_stock+0x1a8/0x310
[18073.102101]  refill_obj_stock+0xa4/0x480
[18073.102101]  obj_cgroup_charge+0x104/0x240
[18073.102101]  ? __sigqueue_alloc+0x1b9/0x460
[18073.102101]  kmem_cache_alloc+0x94/0x400
[18073.102101]  ? __sigqueue_alloc+0x129/0x460
[18073.102101]  __sigqueue_alloc+0x1b9/0x460
[18073.102101]  __send_signal+0x4b2/0xf60
[18073.102101]  ? send_signal+0x9f/0x580
[18073.102101]  force_sig_info_to_task+0x226/0x370
[18073.102101]  force_sig_fault+0xb0/0xf0
[18073.102101]  ? force_sig_fault_to_task+0xe0/0xe0
[18073.102101]  ? asm_exc_debug+0x23/0x30
[18073.102101]  ? notify_die+0x88/0x100
[18073.102101]  ? asm_exc_debug+0x23/0x30
[18073.102101]  noist_exc_debug+0xec/0x110
[18073.102101]  asm_exc_debug+0x2b/0x30

The &sighand->siglock => css_set_lock locking sequence is caused by a 
task holding sighand->siglock and call kmem_cache_alloc(GFP_ATOMIC) and 
the release of the obj_cgroup originally from an offlined memcg in 
percpu stock leading to the call of obj_cgroup_release() which takes the 
cs_set_lock. The chance of hitting that is very small, but it can still 
happen. So do you think addressing this possible deadlock scenario is 
worth the possible slower release of an offlined memcg?

Cheers,
Longman

>> Signed-off-by: Waiman Long <longman@redhat.com>
>> ---
>>   mm/memcontrol.c | 16 +++++++++++-----
>>   1 file changed, 11 insertions(+), 5 deletions(-)
>>
>> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
>> index 4b32896d87a2..4568363062c1 100644
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
>> @@ -2167,6 +2167,8 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>>   	return ret;
>>   }
>>   
>> +static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
>> +
>>   /*
>>    * Returns stocks cached in percpu and reset cached information.
>>    */
>> @@ -2178,9 +2180,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
>>   		return;
>>   
>>   	if (stock->nr_pages) {
>> -		page_counter_uncharge(&old->memory, stock->nr_pages);
>> -		if (do_memsw_account())
>> -			page_counter_uncharge(&old->memsw, stock->nr_pages);
>> +		cancel_charge(old, stock->nr_pages);
>>   		stock->nr_pages = 0;
>>   	}
>>   
>> @@ -2219,6 +2219,14 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>>   	struct memcg_stock_pcp *stock;
>>   	unsigned long flags;
>>   
>> +	/*
>> +	 * An offlined memcg shouldn't be put into stock.
>> +	 */
>> +	if (unlikely(memcg->kmem_state != KMEM_ONLINE)) {
>> +		cancel_charge(memcg, nr_pages);
>> +		return;
>> +	}
>> +
>>   	local_irq_save(flags);
>>   
>>   	stock = this_cpu_ptr(&memcg_stock);
>> @@ -2732,7 +2740,6 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
>>   	return try_charge_memcg(memcg, gfp_mask, nr_pages);
>>   }
>>   
>> -#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
>>   static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
>>   {
>>   	if (mem_cgroup_is_root(memcg))
>> @@ -2742,7 +2749,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
>>   	if (do_memsw_account())
>>   		page_counter_uncharge(&memcg->memsw, nr_pages);
>>   }
>> -#endif
>>   
>>   static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
>>   {
>> -- 
>> 2.18.1
>>
Roman Gushchin Jan. 31, 2022, 5:01 p.m. UTC | #6
On Sun, Jan 30, 2022 at 10:55:56PM -0500, Waiman Long wrote:
> On 10/1/21 19:51, Roman Gushchin wrote:
> > On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
> > > When freeing a page associated with an offlined memcg, refill_stock()
> > > will put it into local stock delaying its demise until another memcg
> > > comes in to take its place in the stock. To avoid that, we now check
> > > for offlined memcg and go directly in this case to the slowpath for
> > > the uncharge via the repurposed cancel_charge() function.
> > Hi Waiman!
> > 
> > I'm afraid it can make a cleanup of a dying cgroup slower: for every
> > released page we'll potentially traverse the whole cgroup tree and
> > decrease atomic page counters.
> > 
> > I'm not sure I understand the benefits we get from this change which
> > do justify the slowdown on the cleanup path.
> > 
> > Thanks!
> 
> I was notified of a lockdep splat that this patch may help to prevent.

Would you mind to test this patch:
https://www.spinics.net/lists/cgroups/msg31244.html ?

It should address this dependency.

Thanks!
Waiman Long Jan. 31, 2022, 5:09 p.m. UTC | #7
On 1/31/22 12:01, Roman Gushchin wrote:
> On Sun, Jan 30, 2022 at 10:55:56PM -0500, Waiman Long wrote:
>> On 10/1/21 19:51, Roman Gushchin wrote:
>>> On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
>>>> When freeing a page associated with an offlined memcg, refill_stock()
>>>> will put it into local stock delaying its demise until another memcg
>>>> comes in to take its place in the stock. To avoid that, we now check
>>>> for offlined memcg and go directly in this case to the slowpath for
>>>> the uncharge via the repurposed cancel_charge() function.
>>> Hi Waiman!
>>>
>>> I'm afraid it can make a cleanup of a dying cgroup slower: for every
>>> released page we'll potentially traverse the whole cgroup tree and
>>> decrease atomic page counters.
>>>
>>> I'm not sure I understand the benefits we get from this change which
>>> do justify the slowdown on the cleanup path.
>>>
>>> Thanks!
>> I was notified of a lockdep splat that this patch may help to prevent.
> Would you mind to test this patch:
> https://www.spinics.net/lists/cgroups/msg31244.html ?
>
> It should address this dependency.

Thanks for the pointer. I believe that your patch should be able to 
address this circular locking dependency.

Feel free to add my

Reviewed-by: Waiman Long <longman@redhat.com>

Cheers,
Longman
Waiman Long Jan. 31, 2022, 5:15 p.m. UTC | #8
On 1/31/22 12:09, Waiman Long wrote:
> On 1/31/22 12:01, Roman Gushchin wrote:
>> On Sun, Jan 30, 2022 at 10:55:56PM -0500, Waiman Long wrote:
>>> On 10/1/21 19:51, Roman Gushchin wrote:
>>>> On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
>>>>> When freeing a page associated with an offlined memcg, refill_stock()
>>>>> will put it into local stock delaying its demise until another memcg
>>>>> comes in to take its place in the stock. To avoid that, we now check
>>>>> for offlined memcg and go directly in this case to the slowpath for
>>>>> the uncharge via the repurposed cancel_charge() function.
>>>> Hi Waiman!
>>>>
>>>> I'm afraid it can make a cleanup of a dying cgroup slower: for every
>>>> released page we'll potentially traverse the whole cgroup tree and
>>>> decrease atomic page counters.
>>>>
>>>> I'm not sure I understand the benefits we get from this change which
>>>> do justify the slowdown on the cleanup path.
>>>>
>>>> Thanks!
>>> I was notified of a lockdep splat that this patch may help to prevent.
>> Would you mind to test this patch:
>> https://www.spinics.net/lists/cgroups/msg31244.html ?
>>
>> It should address this dependency.
>
> Thanks for the pointer. I believe that your patch should be able to 
> address this circular locking dependency.
>
> Feel free to add my
>
> Reviewed-by: Waiman Long <longman@redhat.com>

BTW, have you posted it to lkml? If not, would you mind doing so?

Thanks,
Longman
Roman Gushchin Jan. 31, 2022, 5:15 p.m. UTC | #9
On Mon, Jan 31, 2022 at 12:09:09PM -0500, Waiman Long wrote:
> On 1/31/22 12:01, Roman Gushchin wrote:
> > On Sun, Jan 30, 2022 at 10:55:56PM -0500, Waiman Long wrote:
> > > On 10/1/21 19:51, Roman Gushchin wrote:
> > > > On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
> > > > > When freeing a page associated with an offlined memcg, refill_stock()
> > > > > will put it into local stock delaying its demise until another memcg
> > > > > comes in to take its place in the stock. To avoid that, we now check
> > > > > for offlined memcg and go directly in this case to the slowpath for
> > > > > the uncharge via the repurposed cancel_charge() function.
> > > > Hi Waiman!
> > > > 
> > > > I'm afraid it can make a cleanup of a dying cgroup slower: for every
> > > > released page we'll potentially traverse the whole cgroup tree and
> > > > decrease atomic page counters.
> > > > 
> > > > I'm not sure I understand the benefits we get from this change which
> > > > do justify the slowdown on the cleanup path.
> > > > 
> > > > Thanks!
> > > I was notified of a lockdep splat that this patch may help to prevent.
> > Would you mind to test this patch:
> > https://www.spinics.net/lists/cgroups/msg31244.html  ?
> > 
> > It should address this dependency.
> 
> Thanks for the pointer. I believe that your patch should be able to address
> this circular locking dependency.
> 
> Feel free to add my
> 
> Reviewed-by: Waiman Long <longman@redhat.com>

Thank you!
Roman Gushchin Jan. 31, 2022, 5:19 p.m. UTC | #10
On Mon, Jan 31, 2022 at 12:15:19PM -0500, Waiman Long wrote:
> On 1/31/22 12:09, Waiman Long wrote:
> > On 1/31/22 12:01, Roman Gushchin wrote:
> > > On Sun, Jan 30, 2022 at 10:55:56PM -0500, Waiman Long wrote:
> > > > On 10/1/21 19:51, Roman Gushchin wrote:
> > > > > On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
> > > > > > When freeing a page associated with an offlined memcg, refill_stock()
> > > > > > will put it into local stock delaying its demise until another memcg
> > > > > > comes in to take its place in the stock. To avoid that, we now check
> > > > > > for offlined memcg and go directly in this case to the slowpath for
> > > > > > the uncharge via the repurposed cancel_charge() function.
> > > > > Hi Waiman!
> > > > > 
> > > > > I'm afraid it can make a cleanup of a dying cgroup slower: for every
> > > > > released page we'll potentially traverse the whole cgroup tree and
> > > > > decrease atomic page counters.
> > > > > 
> > > > > I'm not sure I understand the benefits we get from this change which
> > > > > do justify the slowdown on the cleanup path.
> > > > > 
> > > > > Thanks!
> > > > I was notified of a lockdep splat that this patch may help to prevent.
> > > Would you mind to test this patch:
> > > https://www.spinics.net/lists/cgroups/msg31244.html  ?
> > > 
> > > It should address this dependency.
> > 
> > Thanks for the pointer. I believe that your patch should be able to
> > address this circular locking dependency.
> > 
> > Feel free to add my
> > 
> > Reviewed-by: Waiman Long <longman@redhat.com>
> 
> BTW, have you posted it to lkml? If not, would you mind doing so?

Not yet.

I was waiting for Alexander to confirm that it resolves the originally reported
issue. I just pinged him, will wait for tomorrow and post the patch in any case.

Thanks!
Waiman Long Jan. 31, 2022, 5:25 p.m. UTC | #11
On 1/31/22 12:19, Roman Gushchin wrote:
> On Mon, Jan 31, 2022 at 12:15:19PM -0500, Waiman Long wrote:
>> On 1/31/22 12:09, Waiman Long wrote:
>>> On 1/31/22 12:01, Roman Gushchin wrote:
>>>> On Sun, Jan 30, 2022 at 10:55:56PM -0500, Waiman Long wrote:
>>>>> On 10/1/21 19:51, Roman Gushchin wrote:
>>>>>> On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
>>>>>>> When freeing a page associated with an offlined memcg, refill_stock()
>>>>>>> will put it into local stock delaying its demise until another memcg
>>>>>>> comes in to take its place in the stock. To avoid that, we now check
>>>>>>> for offlined memcg and go directly in this case to the slowpath for
>>>>>>> the uncharge via the repurposed cancel_charge() function.
>>>>>> Hi Waiman!
>>>>>>
>>>>>> I'm afraid it can make a cleanup of a dying cgroup slower: for every
>>>>>> released page we'll potentially traverse the whole cgroup tree and
>>>>>> decrease atomic page counters.
>>>>>>
>>>>>> I'm not sure I understand the benefits we get from this change which
>>>>>> do justify the slowdown on the cleanup path.
>>>>>>
>>>>>> Thanks!
>>>>> I was notified of a lockdep splat that this patch may help to prevent.
>>>> Would you mind to test this patch:
>>>> https://www.spinics.net/lists/cgroups/msg31244.html  ?
>>>>
>>>> It should address this dependency.
>>> Thanks for the pointer. I believe that your patch should be able to
>>> address this circular locking dependency.
>>>
>>> Feel free to add my
>>>
>>> Reviewed-by: Waiman Long <longman@redhat.com>
>> BTW, have you posted it to lkml? If not, would you mind doing so?
> Not yet.
>
> I was waiting for Alexander to confirm that it resolves the originally reported
> issue. I just pinged him, will wait for tomorrow and post the patch in any case.
>
> Thanks!

I see. This is not a problem that is easily reproducible. You need to 
hit the right timing for the lockdep splat to appear.

Regards,
Longman
Shakeel Butt Jan. 31, 2022, 6 p.m. UTC | #12
On Mon, Jan 31, 2022 at 9:25 AM Waiman Long <longman@redhat.com> wrote:
>
> On 1/31/22 12:19, Roman Gushchin wrote:
> > On Mon, Jan 31, 2022 at 12:15:19PM -0500, Waiman Long wrote:
> >> On 1/31/22 12:09, Waiman Long wrote:
> >>> On 1/31/22 12:01, Roman Gushchin wrote:
> >>>> On Sun, Jan 30, 2022 at 10:55:56PM -0500, Waiman Long wrote:
> >>>>> On 10/1/21 19:51, Roman Gushchin wrote:
> >>>>>> On Fri, Oct 01, 2021 at 03:09:36PM -0400, Waiman Long wrote:
> >>>>>>> When freeing a page associated with an offlined memcg, refill_stock()
> >>>>>>> will put it into local stock delaying its demise until another memcg
> >>>>>>> comes in to take its place in the stock. To avoid that, we now check
> >>>>>>> for offlined memcg and go directly in this case to the slowpath for
> >>>>>>> the uncharge via the repurposed cancel_charge() function.
> >>>>>> Hi Waiman!
> >>>>>>
> >>>>>> I'm afraid it can make a cleanup of a dying cgroup slower: for every
> >>>>>> released page we'll potentially traverse the whole cgroup tree and
> >>>>>> decrease atomic page counters.
> >>>>>>
> >>>>>> I'm not sure I understand the benefits we get from this change which
> >>>>>> do justify the slowdown on the cleanup path.
> >>>>>>
> >>>>>> Thanks!
> >>>>> I was notified of a lockdep splat that this patch may help to prevent.
> >>>> Would you mind to test this patch:
> >>>> https://www.spinics.net/lists/cgroups/msg31244.html  ?
> >>>>
> >>>> It should address this dependency.
> >>> Thanks for the pointer. I believe that your patch should be able to
> >>> address this circular locking dependency.
> >>>
> >>> Feel free to add my
> >>>
> >>> Reviewed-by: Waiman Long <longman@redhat.com>
> >> BTW, have you posted it to lkml? If not, would you mind doing so?
> > Not yet.
> >
> > I was waiting for Alexander to confirm that it resolves the originally reported
> > issue. I just pinged him, will wait for tomorrow and post the patch in any case.
> >
> > Thanks!
>
> I see. This is not a problem that is easily reproducible. You need to
> hit the right timing for the lockdep splat to appear.

I agree here. The patch on its own has merits as it is reducing
dependency on an unrelated lock.
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4b32896d87a2..4568363062c1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2167,6 +2167,8 @@  static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	return ret;
 }
 
+static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
+
 /*
  * Returns stocks cached in percpu and reset cached information.
  */
@@ -2178,9 +2180,7 @@  static void drain_stock(struct memcg_stock_pcp *stock)
 		return;
 
 	if (stock->nr_pages) {
-		page_counter_uncharge(&old->memory, stock->nr_pages);
-		if (do_memsw_account())
-			page_counter_uncharge(&old->memsw, stock->nr_pages);
+		cancel_charge(old, stock->nr_pages);
 		stock->nr_pages = 0;
 	}
 
@@ -2219,6 +2219,14 @@  static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	struct memcg_stock_pcp *stock;
 	unsigned long flags;
 
+	/*
+	 * An offlined memcg shouldn't be put into stock.
+	 */
+	if (unlikely(memcg->kmem_state != KMEM_ONLINE)) {
+		cancel_charge(memcg, nr_pages);
+		return;
+	}
+
 	local_irq_save(flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
@@ -2732,7 +2740,6 @@  static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
 }
 
-#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
 	if (mem_cgroup_is_root(memcg))
@@ -2742,7 +2749,6 @@  static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 	if (do_memsw_account())
 		page_counter_uncharge(&memcg->memsw, nr_pages);
 }
-#endif
 
 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
 {