diff mbox series

[v2,29/46] mm/writeback: Add folio_account_cleaned()

Message ID 20210622121551.3398730-30-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Folio-enabling the page cache | expand

Commit Message

Matthew Wilcox June 22, 2021, 12:15 p.m. UTC
Get the statistics right; compound pages were being accounted as a
single page.  Also move the declaration to filemap.h since this is
part of the page cache.  Add a wrapper for account_page_cleaned().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/mm.h      |  3 ---
 include/linux/pagemap.h |  7 +++++++
 mm/page-writeback.c     | 11 ++++++-----
 3 files changed, 13 insertions(+), 8 deletions(-)

Comments

Christoph Hellwig June 23, 2021, 9:36 a.m. UTC | #1
On Tue, Jun 22, 2021 at 01:15:34PM +0100, Matthew Wilcox (Oracle) wrote:
> Get the statistics right; compound pages were being accounted as a
> single page.

Maybe reword this a little to document the existing function that got
it wrong, and why it did not matter before.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Matthew Wilcox June 24, 2021, 8:06 p.m. UTC | #2
On Wed, Jun 23, 2021 at 11:36:14AM +0200, Christoph Hellwig wrote:
> On Tue, Jun 22, 2021 at 01:15:34PM +0100, Matthew Wilcox (Oracle) wrote:
> > Get the statistics right; compound pages were being accounted as a
> > single page.
> 
> Maybe reword this a little to document the existing function that got
> it wrong, and why it did not matter before.

Get the statistics right; compound pages were being accounted as a
single page.  This didn't matter before now as no filesystem which
supported compound pages did writeback.  Also move the declaration
to filemap.h since this is part of the page cache.  Add a wrapper for
account_page_cleaned().
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3c8dfcb56fa5..2ccf294afc3e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -39,7 +39,6 @@  struct anon_vma_chain;
 struct file_ra_state;
 struct user_struct;
 struct writeback_control;
-struct bdi_writeback;
 struct pt_regs;
 
 extern int sysctl_page_lock_unfairness;
@@ -1986,8 +1985,6 @@  extern void do_invalidatepage(struct page *page, unsigned int offset,
 
 int redirty_page_for_writepage(struct writeback_control *wbc,
 				struct page *page);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
-			  struct bdi_writeback *wb);
 bool folio_mark_dirty(struct folio *folio);
 bool set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e6a9756293aa..084fca551e60 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -778,6 +778,13 @@  static inline void __set_page_dirty(struct page *page,
 {
 	__folio_mark_dirty((struct folio *)page, mapping, warn);
 }
+void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
+			  struct bdi_writeback *wb);
+static inline void account_page_cleaned(struct page *page,
+		struct address_space *mapping, struct bdi_writeback *wb)
+{
+	return folio_account_cleaned(page_folio(page), mapping, wb);
+}
 
 int __set_page_dirty_nobuffers(struct page *page);
 int __set_page_dirty_no_writeback(struct page *page);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 64b989eff9f5..cf48ac5b85f6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2449,14 +2449,15 @@  static void folio_account_dirtied(struct folio *folio,
  *
  * Caller must hold lock_page_memcg().
  */
-void account_page_cleaned(struct page *page, struct address_space *mapping,
+void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
 			  struct bdi_writeback *wb)
 {
 	if (mapping_can_writeback(mapping)) {
-		dec_lruvec_page_state(page, NR_FILE_DIRTY);
-		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
-		dec_wb_stat(wb, WB_RECLAIMABLE);
-		task_io_account_cancelled_write(PAGE_SIZE);
+		long nr = folio_nr_pages(folio);
+		lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
+		zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+		wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
+		task_io_account_cancelled_write(folio_size(folio));
 	}
 }