@@ -99,6 +99,7 @@ static ssize_t node_read_meminfo(struct device *dev,
#endif
n += sprintf(buf + n,
"Node %d Dirty: %8lu kB\n"
+ "Node %d MetadataDirty: %8lu kB\n"
"Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n"
"Node %d Mapped: %8lu kB\n"
@@ -119,6 +120,7 @@ static ssize_t node_read_meminfo(struct device *dev,
#endif
,
nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
+ nid, K(node_page_state(pgdat, NR_METADATA_DIRTY)),
nid, K(node_page_state(pgdat, NR_WRITEBACK)),
nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
@@ -1822,6 +1822,7 @@ static unsigned long get_nr_dirty_pages(void)
{
return global_node_page_state(NR_FILE_DIRTY) +
global_node_page_state(NR_UNSTABLE_NFS) +
+ global_node_page_state(NR_METADATA_DIRTY) +
get_nr_dirty_inodes();
}
@@ -98,6 +98,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "SwapFree: ", i.freeswap);
show_val_kb(m, "Dirty: ",
global_node_page_state(NR_FILE_DIRTY));
+ seq_printf(m, "MetadataDirty: %8lu kB\n",
+ global_node_page_state(NR_METADATA_DIRTY));
show_val_kb(m, "Writeback: ",
global_node_page_state(NR_WRITEBACK));
show_val_kb(m, "AnonPages: ",
@@ -36,6 +36,7 @@ typedef int (congested_fn)(void *, int);
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
+ WB_METADATA_DIRTY,
WB_DIRTIED,
WB_WRITTEN,
NR_WB_STAT_ITEMS
@@ -32,6 +32,7 @@ struct file_ra_state;
struct user_struct;
struct writeback_control;
struct bdi_writeback;
+struct backing_dev_info;
void init_mm_internals(void);
@@ -1428,6 +1429,12 @@ int redirty_page_for_writepage(struct writeback_control *wbc,
void account_page_dirtied(struct page *page, struct address_space *mapping);
void account_page_cleaned(struct page *page, struct address_space *mapping,
struct bdi_writeback *wb);
+void account_metadata_dirtied(struct page *page, struct backing_dev_info *bdi);
+void account_metadata_cleaned(struct page *page, struct backing_dev_info *bdi);
+void account_metadata_writeback(struct page *page,
+ struct backing_dev_info *bdi);
+void account_metadata_end_writeback(struct page *page,
+ struct backing_dev_info *bdi);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
void cancel_dirty_page(struct page *page);
@@ -179,6 +179,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_METADATA_DIRTY, /* Metadata dirty pages */
NR_VM_NODE_STAT_ITEMS
};
@@ -402,6 +402,7 @@ TRACE_EVENT(global_dirty_state,
TP_STRUCT__entry(
__field(unsigned long, nr_dirty)
+ __field(unsigned long, nr_metadata_dirty)
__field(unsigned long, nr_writeback)
__field(unsigned long, nr_unstable)
__field(unsigned long, background_thresh)
@@ -413,6 +414,7 @@ TRACE_EVENT(global_dirty_state,
TP_fast_assign(
__entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
+ __entry->nr_metadata_dirty = global_node_page_state(NR_METADATA_DIRTY);
__entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
__entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS);
__entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
@@ -424,7 +426,7 @@ TRACE_EVENT(global_dirty_state,
TP_printk("dirty=%lu writeback=%lu unstable=%lu "
"bg_thresh=%lu thresh=%lu limit=%lu "
- "dirtied=%lu written=%lu",
+ "dirtied=%lu written=%lu metadata_dirty=%lu",
__entry->nr_dirty,
__entry->nr_writeback,
__entry->nr_unstable,
@@ -432,7 +434,8 @@ TRACE_EVENT(global_dirty_state,
__entry->dirty_thresh,
__entry->dirty_limit,
__entry->nr_dirtied,
- __entry->nr_written
+ __entry->nr_written,
+ __entry->nr_metadata_dirty
)
);
@@ -76,6 +76,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
"BackgroundThresh: %10lu kB\n"
"BdiDirtied: %10lu kB\n"
"BdiWritten: %10lu kB\n"
+ "BdiMetadataDirty: %10lu kB\n"
"BdiWriteBandwidth: %10lu kBps\n"
"b_dirty: %10lu\n"
"b_io: %10lu\n"
@@ -90,6 +91,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
K(background_thresh),
(unsigned long) K(wb_stat(wb, WB_DIRTIED)),
(unsigned long) K(wb_stat(wb, WB_WRITTEN)),
+ (unsigned long) K(wb_stat(wb, WB_METADATA_DIRTY)),
(unsigned long) K(wb->write_bandwidth),
nr_dirty,
nr_io,
@@ -507,6 +507,7 @@ bool node_dirty_ok(struct pglist_data *pgdat)
nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS);
nr_pages += node_page_state(pgdat, NR_WRITEBACK);
+ nr_pages += node_page_state(pgdat, NR_METADATA_DIRTY);
return nr_pages <= limit;
}
@@ -1595,7 +1596,8 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
* been flushed to permanent storage.
*/
nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) +
- global_node_page_state(NR_UNSTABLE_NFS);
+ global_node_page_state(NR_UNSTABLE_NFS) +
+ global_node_page_state(NR_METADATA_DIRTY);
gdtc->avail = global_dirtyable_memory();
gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
@@ -1936,7 +1938,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
*/
gdtc->avail = global_dirtyable_memory();
gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) +
- global_node_page_state(NR_UNSTABLE_NFS);
+ global_node_page_state(NR_UNSTABLE_NFS) +
+ global_node_page_state(NR_METADATA_DIRTY);
domain_dirty_limits(gdtc);
if (gdtc->dirty > gdtc->bg_thresh)
@@ -1980,7 +1983,8 @@ void laptop_mode_timer_fn(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
int nr_pages = global_node_page_state(NR_FILE_DIRTY) +
- global_node_page_state(NR_UNSTABLE_NFS);
+ global_node_page_state(NR_UNSTABLE_NFS) +
+ global_node_page_state(NR_METADATA_DIRTY);
struct bdi_writeback *wb;
/*
@@ -2444,6 +2448,96 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
EXPORT_SYMBOL(account_page_dirtied);
/*
+ * account_metadata_dirtied
+ * @page - the page being dirited
+ * @bdi - the bdi that owns this page
+ *
+ * Do the dirty page accounting for metadata pages that aren't backed by an
+ * address_space.
+ */
+void account_metadata_dirtied(struct page *page, struct backing_dev_info *bdi)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __inc_node_page_state(page, NR_METADATA_DIRTY);
+ __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+ __inc_node_page_state(page, NR_DIRTIED);
+ inc_wb_stat(&bdi->wb, WB_RECLAIMABLE);
+ inc_wb_stat(&bdi->wb, WB_DIRTIED);
+ inc_wb_stat(&bdi->wb, WB_METADATA_DIRTY);
+ current->nr_dirtied++;
+ task_io_account_write(PAGE_SIZE);
+ this_cpu_inc(bdp_ratelimits);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(account_metadata_dirtied);
+
+/*
+ * account_metadata_cleaned
+ * @page - the page being cleaned
+ * @bdi - the bdi that owns this page
+ *
+ * Called on a no longer dirty metadata page.
+ */
+void account_metadata_cleaned(struct page *page, struct backing_dev_info *bdi)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __dec_node_page_state(page, NR_METADATA_DIRTY);
+ __dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+ dec_wb_stat(&bdi->wb, WB_RECLAIMABLE);
+ dec_wb_stat(&bdi->wb, WB_METADATA_DIRTY);
+ task_io_account_cancelled_write(PAGE_SIZE);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(account_metadata_cleaned);
+
+/*
+ * account_metadata_writeback
+ * @page - the page being marked as writeback
+ * @bdi - the bdi that owns this page
+ *
+ * Called on a metadata page that has been marked writeback.
+ */
+void account_metadata_writeback(struct page *page,
+ struct backing_dev_info *bdi)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ inc_wb_stat(&bdi->wb, WB_WRITEBACK);
+ __inc_node_page_state(page, NR_WRITEBACK);
+ __dec_node_page_state(page, NR_METADATA_DIRTY);
+ dec_wb_stat(&bdi->wb, WB_METADATA_DIRTY);
+ dec_wb_stat(&bdi->wb, WB_RECLAIMABLE);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(account_metadata_writeback);
+
+/*
+ * account_metadata_end_writeback
+ * @page - the page we are ending writeback on
+ * @bdi - the bdi that owns this page
+ *
+ * Called on a metadata page that has completed writeback.
+ */
+void account_metadata_end_writeback(struct page *page,
+ struct backing_dev_info *bdi)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ dec_wb_stat(&bdi->wb, WB_WRITEBACK);
+ __dec_node_page_state(page, NR_WRITEBACK);
+ __dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+ __inc_node_page_state(page, NR_WRITTEN);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(account_metadata_end_writeback);
+
+/*
* Helper function for deaccounting dirty page without writeback.
*
* Caller must hold lock_page_memcg().
@@ -4694,8 +4694,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
- " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
- " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
+ " unevictable:%lu dirty:%lu metadata_dirty:%lu writeback:%lu\n"
+ " unstable:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
" free:%lu free_pcp:%lu free_cma:%lu\n",
global_node_page_state(NR_ACTIVE_ANON),
@@ -4706,6 +4706,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
global_node_page_state(NR_ISOLATED_FILE),
global_node_page_state(NR_UNEVICTABLE),
global_node_page_state(NR_FILE_DIRTY),
+ global_node_page_state(NR_METADATA_DIRTY),
global_node_page_state(NR_WRITEBACK),
global_node_page_state(NR_UNSTABLE_NFS),
global_node_page_state(NR_SLAB_RECLAIMABLE),
@@ -4732,6 +4733,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
" isolated(file):%lukB"
" mapped:%lukB"
" dirty:%lukB"
+ " metadata_dirty:%lukB"
" writeback:%lukB"
" shmem:%lukB"
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -4753,6 +4755,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(node_page_state(pgdat, NR_ISOLATED_FILE)),
K(node_page_state(pgdat, NR_FILE_MAPPED)),
K(node_page_state(pgdat, NR_FILE_DIRTY)),
+ K(node_page_state(pgdat, NR_METADATA_DIRTY)),
K(node_page_state(pgdat, NR_WRITEBACK)),
K(node_page_state(pgdat, NR_SHMEM)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE