@@ -637,6 +637,7 @@ void evict_inodes(struct super_block *sb)
dispose_list(&dispose);
}
+EXPORT_SYMBOL_GPL(evict_inodes);
/**
* invalidate_inodes - attempt to free all inodes on a superblock
@@ -136,7 +136,6 @@ extern bool atime_needs_update_rcu(const struct path *, struct inode *);
extern void inode_io_list_del(struct inode *inode);
extern long get_nr_dirty_inodes(void);
-extern void evict_inodes(struct super_block *);
extern int invalidate_inodes(struct super_block *, bool);
/*
@@ -761,12 +761,24 @@ xfs_log_mount_finish(
* inodes. Turn it off immediately after recovery finishes
* so that we don't leak the quota inodes if subsequent mount
* activities fail.
+ *
+ * We let all inodes involved in redo item processing end up on
+ * the LRU instead of being evicted immediately so that if we do
+ * something to an unlinked inode, the irele won't cause
+ * premature truncation and freeing of the inode, which results
+ * in log recovery failure. We have to evict the unreferenced
+ * lru inodes after clearing MS_ACTIVE because we don't
+ * otherwise clean up the lru if there's a subsequent failure in
+ * xfs_mountfs, which leads to us leaking the inodes if nothing
+ * else (e.g. quotacheck) references the inodes before the
+ * mount failure occurs.
*/
mp->m_super->s_flags |= MS_ACTIVE;
error = xlog_recover_finish(mp->m_log);
if (!error)
xfs_log_work_queue(mp);
mp->m_super->s_flags &= ~MS_ACTIVE;
+ evict_inodes(mp->m_super);
if (readonly)
mp->m_flags |= XFS_MOUNT_RDONLY;
@@ -2760,6 +2760,7 @@ static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
+extern void evict_inodes(struct super_block *sb);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);