diff mbox series

[next,-v1,4/5] memcg: factor out the __refill_obj_stock function

Message ID 20241206013512.2883617-5-chenridong@huaweicloud.com (mailing list archive)
State New
Headers show
Series Some cleanup for memcg | expand

Commit Message

Chen Ridong Dec. 6, 2024, 1:35 a.m. UTC
From: Chen Ridong <chenridong@huawei.com>

Factor out the '__refill_obj_stock' function to make the code more
cohesive.

Signed-off-by: Chen Ridong <chenridong@huawei.com>
---
 mm/memcontrol.c | 31 ++++++++++++++++++-------------
 1 file changed, 18 insertions(+), 13 deletions(-)

Comments

Shakeel Butt Dec. 17, 2024, 6:19 p.m. UTC | #1
On Fri, Dec 06, 2024 at 01:35:11AM +0000, Chen Ridong wrote:
> From: Chen Ridong <chenridong@huawei.com>
> 
> Factor out the '__refill_obj_stock' function to make the code more
> cohesive.
> 
> Signed-off-by: Chen Ridong <chenridong@huawei.com>
> ---
>  mm/memcontrol.c | 31 ++++++++++++++++++-------------
>  1 file changed, 18 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index f977e0be1c04..0c9331d7b606 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2697,6 +2697,21 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
>  	obj_cgroup_put(objcg);
>  }
>  
> +/* If the cached_objcg was refilled, return true; otherwise, return false */
> +static bool __refill_obj_stock(struct memcg_stock_pcp *stock,
> +		struct obj_cgroup *objcg, struct obj_cgroup **old_objcg)
> +{
> +	if (READ_ONCE(stock->cached_objcg) != objcg) {

Keep the above check in the calling functions and make this a void
function. Also I think we need a better name.

> +		*old_objcg = drain_obj_stock(stock);
> +		obj_cgroup_get(objcg);
> +		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
> +				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
> +		WRITE_ONCE(stock->cached_objcg, objcg);
> +		return true;
> +	}
> +	return false;
> +}
> +
>  static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
>  		     enum node_stat_item idx, int nr)
>  {
> @@ -2713,12 +2728,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
>  	 * accumulating over a page of vmstat data or when pgdat or idx
>  	 * changes.
>  	 */
> -	if (READ_ONCE(stock->cached_objcg) != objcg) {
> -		old = drain_obj_stock(stock);
> -		obj_cgroup_get(objcg);
> -		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
> -				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
> -		WRITE_ONCE(stock->cached_objcg, objcg);
> +	if (__refill_obj_stock(stock, objcg, &old)) {
>  		stock->cached_pgdat = pgdat;
>  	} else if (stock->cached_pgdat != pgdat) {
>  		/* Flush the existing cached vmstat data */
> @@ -2871,14 +2881,9 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
>  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
>  
>  	stock = this_cpu_ptr(&memcg_stock);
> -	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
> -		old = drain_obj_stock(stock);
> -		obj_cgroup_get(objcg);
> -		WRITE_ONCE(stock->cached_objcg, objcg);
> -		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
> -				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
> +	if (__refill_obj_stock(stock, objcg, &old))
>  		allow_uncharge = true;	/* Allow uncharge when objcg changes */
> -	}
> +
>  	stock->nr_bytes += nr_bytes;
>  
>  	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
> -- 
> 2.34.1
>
Chen Ridong Dec. 19, 2024, 1:54 a.m. UTC | #2
On 2024/12/18 2:19, Shakeel Butt wrote:
> On Fri, Dec 06, 2024 at 01:35:11AM +0000, Chen Ridong wrote:
>> From: Chen Ridong <chenridong@huawei.com>
>>
>> Factor out the '__refill_obj_stock' function to make the code more
>> cohesive.
>>
>> Signed-off-by: Chen Ridong <chenridong@huawei.com>
>> ---
>>  mm/memcontrol.c | 31 ++++++++++++++++++-------------
>>  1 file changed, 18 insertions(+), 13 deletions(-)
>>
>> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
>> index f977e0be1c04..0c9331d7b606 100644
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
>> @@ -2697,6 +2697,21 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
>>  	obj_cgroup_put(objcg);
>>  }
>>  
>> +/* If the cached_objcg was refilled, return true; otherwise, return false */
>> +static bool __refill_obj_stock(struct memcg_stock_pcp *stock,
>> +		struct obj_cgroup *objcg, struct obj_cgroup **old_objcg)
>> +{
>> +	if (READ_ONCE(stock->cached_objcg) != objcg) {
> 
> Keep the above check in the calling functions and make this a void
> function. Also I think we need a better name.
> 

Thank you for your review。

How about keeping the check in the calling functions and renaming like that:
/* Replace the stock objcg with objcg, return the old objcg */
static obj_cgroup *replace_stock_objcg (struct memcg_stock_pcp *stock,
struct obj_cgroup *objcg)

Best regards,
Ridong

>> +		*old_objcg = drain_obj_stock(stock);
>> +		obj_cgroup_get(objcg);
>> +		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
>> +				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
>> +		WRITE_ONCE(stock->cached_objcg, objcg);
>> +		return true;
>> +	}
>> +	return false;
>> +}
>> +
>>  static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
>>  		     enum node_stat_item idx, int nr)
>>  {
>> @@ -2713,12 +2728,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
>>  	 * accumulating over a page of vmstat data or when pgdat or idx
>>  	 * changes.
>>  	 */
>> -	if (READ_ONCE(stock->cached_objcg) != objcg) {
>> -		old = drain_obj_stock(stock);
>> -		obj_cgroup_get(objcg);
>> -		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
>> -				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
>> -		WRITE_ONCE(stock->cached_objcg, objcg);
>> +	if (__refill_obj_stock(stock, objcg, &old)) {
>>  		stock->cached_pgdat = pgdat;
>>  	} else if (stock->cached_pgdat != pgdat) {
>>  		/* Flush the existing cached vmstat data */
>> @@ -2871,14 +2881,9 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
>>  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
>>  
>>  	stock = this_cpu_ptr(&memcg_stock);
>> -	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
>> -		old = drain_obj_stock(stock);
>> -		obj_cgroup_get(objcg);
>> -		WRITE_ONCE(stock->cached_objcg, objcg);
>> -		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
>> -				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
>> +	if (__refill_obj_stock(stock, objcg, &old))
>>  		allow_uncharge = true;	/* Allow uncharge when objcg changes */
>> -	}
>> +
>>  	stock->nr_bytes += nr_bytes;
>>  
>>  	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
>> -- 
>> 2.34.1
>>
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f977e0be1c04..0c9331d7b606 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2697,6 +2697,21 @@  void __memcg_kmem_uncharge_page(struct page *page, int order)
 	obj_cgroup_put(objcg);
 }
 
+/* If the cached_objcg was refilled, return true; otherwise, return false */
+static bool __refill_obj_stock(struct memcg_stock_pcp *stock,
+		struct obj_cgroup *objcg, struct obj_cgroup **old_objcg)
+{
+	if (READ_ONCE(stock->cached_objcg) != objcg) {
+		*old_objcg = drain_obj_stock(stock);
+		obj_cgroup_get(objcg);
+		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
+				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+		WRITE_ONCE(stock->cached_objcg, objcg);
+		return true;
+	}
+	return false;
+}
+
 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
 		     enum node_stat_item idx, int nr)
 {
@@ -2713,12 +2728,7 @@  static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
 	 * accumulating over a page of vmstat data or when pgdat or idx
 	 * changes.
 	 */
-	if (READ_ONCE(stock->cached_objcg) != objcg) {
-		old = drain_obj_stock(stock);
-		obj_cgroup_get(objcg);
-		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
-				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
-		WRITE_ONCE(stock->cached_objcg, objcg);
+	if (__refill_obj_stock(stock, objcg, &old)) {
 		stock->cached_pgdat = pgdat;
 	} else if (stock->cached_pgdat != pgdat) {
 		/* Flush the existing cached vmstat data */
@@ -2871,14 +2881,9 @@  static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
-	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
-		old = drain_obj_stock(stock);
-		obj_cgroup_get(objcg);
-		WRITE_ONCE(stock->cached_objcg, objcg);
-		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
-				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+	if (__refill_obj_stock(stock, objcg, &old))
 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
-	}
+
 	stock->nr_bytes += nr_bytes;
 
 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {