diff mbox series

[v2,13/46] mm/memcg: Convert commit_charge() to take a folio

Message ID 20210622121551.3398730-14-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Folio-enabling the page cache | expand

Commit Message

Matthew Wilcox June 22, 2021, 12:15 p.m. UTC
The memcg_data is only set on the head page, so enforce that by
typing it as a folio.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/memcontrol.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

Comments

Christoph Hellwig June 23, 2021, 8:13 a.m. UTC | #1
On Tue, Jun 22, 2021 at 01:15:18PM +0100, Matthew Wilcox (Oracle) wrote:
> The memcg_data is only set on the head page, so enforce that by
> typing it as a folio.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
Michal Hocko June 25, 2021, 8:11 a.m. UTC | #2
On Tue 22-06-21 13:15:18, Matthew Wilcox wrote:
> The memcg_data is only set on the head page, so enforce that by
> typing it as a folio.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Michal Hocko <mhocko@suse.com>
Thanks!

> ---
>  mm/memcontrol.c | 27 +++++++++++++--------------
>  1 file changed, 13 insertions(+), 14 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 7423cb11eb88..7939e4e9118d 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2700,9 +2700,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
>  }
>  #endif
>  
> -static void commit_charge(struct page *page, struct mem_cgroup *memcg)
> +static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
>  {
> -	VM_BUG_ON_PAGE(page_memcg(page), page);
> +	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
>  	/*
>  	 * Any of the following ensures page's memcg stability:
>  	 *
> @@ -2711,7 +2711,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
>  	 * - lock_page_memcg()
>  	 * - exclusive reference
>  	 */
> -	page->memcg_data = (unsigned long)memcg;
> +	folio->memcg_data = (unsigned long)memcg;
>  }
>  
>  static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
> @@ -6506,7 +6506,8 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
>  static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
>  			       gfp_t gfp)
>  {
> -	unsigned int nr_pages = thp_nr_pages(page);
> +	struct folio *folio = page_folio(page);
> +	unsigned int nr_pages = folio_nr_pages(folio);
>  	int ret;
>  
>  	ret = try_charge(memcg, gfp, nr_pages);
> @@ -6514,7 +6515,7 @@ static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
>  		goto out;
>  
>  	css_get(&memcg->css);
> -	commit_charge(page, memcg);
> +	commit_charge(folio, memcg);
>  
>  	local_irq_disable();
>  	mem_cgroup_charge_statistics(memcg, nr_pages);
> @@ -6771,21 +6772,21 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
>   */
>  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
>  {
> +	struct folio *newfolio = page_folio(newpage);
>  	struct mem_cgroup *memcg;
> -	unsigned int nr_pages;
> +	unsigned int nr_pages = folio_nr_pages(newfolio);
>  	unsigned long flags;
>  
>  	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
> -	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
> -	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
> -	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
> -		       newpage);
> +	VM_BUG_ON_FOLIO(!folio_locked(newfolio), newfolio);
> +	VM_BUG_ON_FOLIO(PageAnon(oldpage) != folio_anon(newfolio), newfolio);
> +	VM_BUG_ON_FOLIO(compound_nr(oldpage) != nr_pages, newfolio);
>  
>  	if (mem_cgroup_disabled())
>  		return;
>  
>  	/* Page cache replacement: new page already charged? */
> -	if (page_memcg(newpage))
> +	if (folio_memcg(newfolio))
>  		return;
>  
>  	memcg = page_memcg(oldpage);
> @@ -6794,14 +6795,12 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
>  		return;
>  
>  	/* Force-charge the new page. The old one will be freed soon */
> -	nr_pages = thp_nr_pages(newpage);
> -
>  	page_counter_charge(&memcg->memory, nr_pages);
>  	if (do_memsw_account())
>  		page_counter_charge(&memcg->memsw, nr_pages);
>  
>  	css_get(&memcg->css);
> -	commit_charge(newpage, memcg);
> +	commit_charge(newfolio, memcg);
>  
>  	local_irq_save(flags);
>  	mem_cgroup_charge_statistics(memcg, nr_pages);
> -- 
> 2.30.2
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7423cb11eb88..7939e4e9118d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2700,9 +2700,9 @@  static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 }
 #endif
 
-static void commit_charge(struct page *page, struct mem_cgroup *memcg)
+static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
 {
-	VM_BUG_ON_PAGE(page_memcg(page), page);
+	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
 	/*
 	 * Any of the following ensures page's memcg stability:
 	 *
@@ -2711,7 +2711,7 @@  static void commit_charge(struct page *page, struct mem_cgroup *memcg)
 	 * - lock_page_memcg()
 	 * - exclusive reference
 	 */
-	page->memcg_data = (unsigned long)memcg;
+	folio->memcg_data = (unsigned long)memcg;
 }
 
 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
@@ -6506,7 +6506,8 @@  void mem_cgroup_calculate_protection(struct mem_cgroup *root,
 static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
 			       gfp_t gfp)
 {
-	unsigned int nr_pages = thp_nr_pages(page);
+	struct folio *folio = page_folio(page);
+	unsigned int nr_pages = folio_nr_pages(folio);
 	int ret;
 
 	ret = try_charge(memcg, gfp, nr_pages);
@@ -6514,7 +6515,7 @@  static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
 		goto out;
 
 	css_get(&memcg->css);
-	commit_charge(page, memcg);
+	commit_charge(folio, memcg);
 
 	local_irq_disable();
 	mem_cgroup_charge_statistics(memcg, nr_pages);
@@ -6771,21 +6772,21 @@  void mem_cgroup_uncharge_list(struct list_head *page_list)
  */
 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 {
+	struct folio *newfolio = page_folio(newpage);
 	struct mem_cgroup *memcg;
-	unsigned int nr_pages;
+	unsigned int nr_pages = folio_nr_pages(newfolio);
 	unsigned long flags;
 
 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
-	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
-	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
-	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
-		       newpage);
+	VM_BUG_ON_FOLIO(!folio_locked(newfolio), newfolio);
+	VM_BUG_ON_FOLIO(PageAnon(oldpage) != folio_anon(newfolio), newfolio);
+	VM_BUG_ON_FOLIO(compound_nr(oldpage) != nr_pages, newfolio);
 
 	if (mem_cgroup_disabled())
 		return;
 
 	/* Page cache replacement: new page already charged? */
-	if (page_memcg(newpage))
+	if (folio_memcg(newfolio))
 		return;
 
 	memcg = page_memcg(oldpage);
@@ -6794,14 +6795,12 @@  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 		return;
 
 	/* Force-charge the new page. The old one will be freed soon */
-	nr_pages = thp_nr_pages(newpage);
-
 	page_counter_charge(&memcg->memory, nr_pages);
 	if (do_memsw_account())
 		page_counter_charge(&memcg->memsw, nr_pages);
 
 	css_get(&memcg->css);
-	commit_charge(newpage, memcg);
+	commit_charge(newfolio, memcg);
 
 	local_irq_save(flags);
 	mem_cgroup_charge_statistics(memcg, nr_pages);