@@ -198,6 +198,34 @@ void inode_wb_list_del(struct inode *inode)
}
/*
+ * mark an inode as under writeback on the given bdi
+ */
+void bdi_mark_inode_writeback(struct backing_dev_info *bdi, struct inode *inode)
+{
+ WARN_ON_ONCE(bdi != inode_to_bdi(inode));
+ if (list_empty(&inode->i_wb_list)) {
+ spin_lock(&bdi->wb.list_lock);
+ if (list_empty(&inode->i_wb_list))
+ list_add_tail(&inode->i_wb_list, &bdi->wb.b_writeback);
+ spin_unlock(&bdi->wb.list_lock);
+ }
+}
+
+/*
+ * clear an inode as under writeback on the given bdi
+ */
+static void bdi_clear_inode_writeback(struct backing_dev_info *bdi,
+ struct inode *inode)
+{
+ WARN_ON_ONCE(bdi != inode_to_bdi(inode));
+ if (!list_empty(&inode->i_wb_list)) {
+ spin_lock(&bdi->wb.list_lock);
+ list_del_init(&inode->i_wb_list);
+ spin_unlock(&bdi->wb.list_lock);
+ }
+}
+
+/*
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
* furthest end of its superblock's dirty-inode list.
*
@@ -371,13 +399,23 @@ static void __inode_wait_for_writeback(struct inode *inode)
}
/*
- * Wait for writeback on an inode to complete. Caller must have inode pinned.
+ * Wait for writeback on an inode to complete during eviction. Caller must have
+ * inode pinned.
*/
void inode_wait_for_writeback(struct inode *inode)
{
+ BUG_ON(!(inode->i_state & I_FREEING));
+
spin_lock(&inode->i_lock);
__inode_wait_for_writeback(inode);
spin_unlock(&inode->i_lock);
+
+ /*
+ * bd_inode's will have already had the bdev free'd so don't bother
+ * doing the bdi_clear_inode_writeback.
+ */
+ if (!sb_is_blkdev_sb(inode->i_sb))
+ bdi_clear_inode_writeback(inode_to_bdi(inode), inode);
}
/*
@@ -1299,7 +1337,9 @@ EXPORT_SYMBOL(__mark_inode_dirty);
*/
static void wait_sb_inodes(struct super_block *sb)
{
- struct inode *inode, *old_inode = NULL;
+ struct backing_dev_info *bdi = sb->s_bdi;
+ struct inode *old_inode = NULL;
+ LIST_HEAD(sync_list);
/*
* We need to be protected against the filesystem going from
@@ -1307,28 +1347,59 @@ static void wait_sb_inodes(struct super_block *sb)
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- mutex_lock(&sb->s_sync_lock);
- spin_lock(&sb->s_inode_list_lock);
-
/*
- * Data integrity sync. Must wait for all pages under writeback,
- * because there may have been pages dirtied before our sync
- * call, but which had writeout started before we write it out.
- * In which case, the inode may not be on the dirty list, but
- * we still have to wait for that writeout.
+ * Data integrity sync. Must wait for all pages under writeback, because
+ * there may have been pages dirtied before our sync call, but which had
+ * writeout started before we write it out. In which case, the inode
+ * may not be on the dirty list, but we still have to wait for that
+ * writeout.
+ *
+ * To avoid syncing inodes put under IO after we have started here,
+ * splice the io list to a temporary list head and walk that. Newly
+ * dirtied inodes will go onto the primary list so we won't wait for
+ * them. This is safe to do as we are serialised by the s_sync_lock,
+ * so we'll complete processing the complete list before the next
+ * sync operation repeats the splice-and-walk process.
+ *
+ * Inodes that have pages under writeback after we've finished the wait
+ * may or may not be on the primary list. They had pages under put IO
+ * after we started our wait, so we need to make sure the next sync or
+ * IO completion treats them correctly, Move them back to the primary
+ * list and restart the walk.
+ *
+ * Inodes that are clean after we have waited for them don't belong on
+ * any list, so simply remove them from the sync list and move onwards.
*/
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ mutex_lock(&sb->s_sync_lock);
+ spin_lock(&bdi->wb.list_lock);
+ list_splice_init(&bdi->wb.b_writeback, &sync_list);
+
+ while (!list_empty(&sync_list)) {
+ struct inode *inode = list_first_entry(&sync_list, struct inode,
+ i_wb_list);
struct address_space *mapping = inode->i_mapping;
+ /*
+ * We are lazy on IO completion and don't remove inodes from the
+ * list when they are clean. Detect that immediately and skip
+ * inodes we don't ahve to wait on.
+ */
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
+ list_del_init(&inode->i_wb_list);
+ cond_resched_lock(&bdi->wb.list_lock);
+ continue;
+ }
+
spin_lock(&inode->i_lock);
- if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
- (mapping->nrpages == 0)) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
+ list_move(&inode->i_wb_list, &bdi->wb.b_writeback);
spin_unlock(&inode->i_lock);
+ cond_resched_lock(&bdi->wb.list_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&sb->s_inode_list_lock);
+ spin_unlock(&bdi->wb.list_lock);
/*
* We hold a reference to 'inode' so it couldn't have been
@@ -1345,9 +1416,21 @@ static void wait_sb_inodes(struct super_block *sb)
cond_resched();
- spin_lock(&sb->s_inode_list_lock);
+ /*
+ * the inode has been written back now, so check whether we
+ * still have pages under IO and move it back to the primary
+ * list if necessary
+ */
+ spin_lock_irq(&mapping->tree_lock);
+ spin_lock(&bdi->wb.list_lock);
+ if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
+ WARN_ON(list_empty(&inode->i_wb_list));
+ list_move(&inode->i_wb_list, &bdi->wb.b_writeback);
+ } else
+ list_del_init(&inode->i_wb_list);
+ spin_unlock_irq(&mapping->tree_lock);
}
- spin_unlock(&sb->s_inode_list_lock);
+ spin_unlock(&bdi->wb.list_lock);
iput(old_inode);
mutex_unlock(&sb->s_sync_lock);
}
@@ -356,6 +356,7 @@ void inode_init_once(struct inode *inode)
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_devices);
INIT_LIST_HEAD(&inode->i_io_list);
+ INIT_LIST_HEAD(&inode->i_wb_list);
INIT_LIST_HEAD(&inode->i_lru);
address_space_init_once(&inode->i_data);
i_size_ordered_init(inode);
@@ -56,6 +56,7 @@ struct bdi_writeback {
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */
struct list_head b_dirty_time; /* time stamps are dirty */
+ struct list_head b_writeback; /* inodes under writeback */
spinlock_t list_lock; /* protects the b_* lists */
};
@@ -124,6 +125,8 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
void bdi_writeback_workfn(struct work_struct *work);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
+void bdi_mark_inode_writeback(struct backing_dev_info *bdi,
+ struct inode *inode);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
@@ -610,6 +610,7 @@ struct inode {
struct list_head i_io_list; /* backing dev IO list */
struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
+ struct list_head i_wb_list; /* backing dev writeback list */
union {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
@@ -386,6 +386,7 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
INIT_LIST_HEAD(&wb->b_io);
INIT_LIST_HEAD(&wb->b_more_io);
INIT_LIST_HEAD(&wb->b_dirty_time);
+ INIT_LIST_HEAD(&wb->b_writeback);
spin_lock_init(&wb->list_lock);
INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
}
@@ -2359,11 +2359,25 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestSetPageWriteback(page);
if (!ret) {
+ bool on_wblist;
+
+ on_wblist = mapping_tagged(mapping,
+ PAGECACHE_TAG_WRITEBACK);
+
radix_tree_tag_set(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi))
__inc_bdi_stat(bdi, BDI_WRITEBACK);
+
+ /*
+ * we can come through here when swapping anonymous
+ * pages, so we don't necessarily have an inode to
+ * track for sync. Inodes are remove lazily, so there is
+ * no equivalent in test_clear_page_writeback().
+ */
+ if (!on_wblist && mapping->host)
+ bdi_mark_inode_writeback(bdi, mapping->host);
}
if (!PageDirty(page))
radix_tree_tag_clear(&mapping->page_tree,