diff mbox series

[v2,11/46] mm/memcg: Remove 'page' parameter to mem_cgroup_charge_statistics()

Message ID 20210622121551.3398730-12-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Folio-enabling the page cache | expand

Commit Message

Matthew Wilcox June 22, 2021, 12:15 p.m. UTC
The last use of 'page' was removed by commit 468c398233da ("mm:
memcontrol: switch to native NR_ANON_THPS counter"), so we can now remove
the parameter from the function.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/memcontrol.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

Comments

Christoph Hellwig June 23, 2021, 8:09 a.m. UTC | #1
On Tue, Jun 22, 2021 at 01:15:16PM +0100, Matthew Wilcox (Oracle) wrote:
> The last use of 'page' was removed by commit 468c398233da ("mm:
> memcontrol: switch to native NR_ANON_THPS counter"), so we can now remove
> the parameter from the function.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
Michal Hocko June 25, 2021, 7:58 a.m. UTC | #2
On Tue 22-06-21 13:15:16, Matthew Wilcox wrote:
> The last use of 'page' was removed by commit 468c398233da ("mm:
> memcontrol: switch to native NR_ANON_THPS counter"), so we can now remove
> the parameter from the function.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Michal Hocko <mhocko@suse.com>
Thanks!

> ---
>  mm/memcontrol.c | 11 +++++------
>  1 file changed, 5 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 64ada9e650a5..1204c6a0c671 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -814,7 +814,6 @@ static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
>  }
>  
>  static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
> -					 struct page *page,
>  					 int nr_pages)
>  {
>  	/* pagein of a big page is an event. So, ignore page size */
> @@ -5504,9 +5503,9 @@ static int mem_cgroup_move_account(struct page *page,
>  	ret = 0;
>  
>  	local_irq_disable();
> -	mem_cgroup_charge_statistics(to, page, nr_pages);
> +	mem_cgroup_charge_statistics(to, nr_pages);
>  	memcg_check_events(to, page);
> -	mem_cgroup_charge_statistics(from, page, -nr_pages);
> +	mem_cgroup_charge_statistics(from, -nr_pages);
>  	memcg_check_events(from, page);
>  	local_irq_enable();
>  out_unlock:
> @@ -6527,7 +6526,7 @@ static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
>  	commit_charge(page, memcg);
>  
>  	local_irq_disable();
> -	mem_cgroup_charge_statistics(memcg, page, nr_pages);
> +	mem_cgroup_charge_statistics(memcg, nr_pages);
>  	memcg_check_events(memcg, page);
>  	local_irq_enable();
>  out:
> @@ -6814,7 +6813,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
>  	commit_charge(newpage, memcg);
>  
>  	local_irq_save(flags);
> -	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
> +	mem_cgroup_charge_statistics(memcg, nr_pages);
>  	memcg_check_events(memcg, newpage);
>  	local_irq_restore(flags);
>  }
> @@ -7044,7 +7043,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
>  	 * only synchronisation we have for updating the per-CPU variables.
>  	 */
>  	VM_BUG_ON(!irqs_disabled());
> -	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
> +	mem_cgroup_charge_statistics(memcg, -nr_entries);
>  	memcg_check_events(memcg, page);
>  
>  	css_put(&memcg->css);
> -- 
> 2.30.2
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 64ada9e650a5..1204c6a0c671 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -814,7 +814,6 @@  static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 }
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
-					 struct page *page,
 					 int nr_pages)
 {
 	/* pagein of a big page is an event. So, ignore page size */
@@ -5504,9 +5503,9 @@  static int mem_cgroup_move_account(struct page *page,
 	ret = 0;
 
 	local_irq_disable();
-	mem_cgroup_charge_statistics(to, page, nr_pages);
+	mem_cgroup_charge_statistics(to, nr_pages);
 	memcg_check_events(to, page);
-	mem_cgroup_charge_statistics(from, page, -nr_pages);
+	mem_cgroup_charge_statistics(from, -nr_pages);
 	memcg_check_events(from, page);
 	local_irq_enable();
 out_unlock:
@@ -6527,7 +6526,7 @@  static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
 	commit_charge(page, memcg);
 
 	local_irq_disable();
-	mem_cgroup_charge_statistics(memcg, page, nr_pages);
+	mem_cgroup_charge_statistics(memcg, nr_pages);
 	memcg_check_events(memcg, page);
 	local_irq_enable();
 out:
@@ -6814,7 +6813,7 @@  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 	commit_charge(newpage, memcg);
 
 	local_irq_save(flags);
-	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
+	mem_cgroup_charge_statistics(memcg, nr_pages);
 	memcg_check_events(memcg, newpage);
 	local_irq_restore(flags);
 }
@@ -7044,7 +7043,7 @@  void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 	 * only synchronisation we have for updating the per-CPU variables.
 	 */
 	VM_BUG_ON(!irqs_disabled());
-	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
+	mem_cgroup_charge_statistics(memcg, -nr_entries);
 	memcg_check_events(memcg, page);
 
 	css_put(&memcg->css);