@@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
struct list_head *head)
{
assert_spin_locked(&wb->list_lock);
+ assert_spin_locked(&inode->i_lock);
list_move(&inode->i_io_list, head);
@@ -293,8 +294,8 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
* @inode: inode of interest with i_lock held
*
* Returns @inode's wb with its list_lock held. @inode->i_lock must be
- * held on entry and is released on return. The returned wb is guaranteed
- * to stay @inode's associated wb until its list_lock is released.
+ * held on entry. The returned wb is guaranteed to stay @inode's associated
+ * wb until its list_lock is released.
*/
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
@@ -317,6 +318,7 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
/* i_wb may have changed inbetween, can't use inode_to_wb() */
if (likely(wb == inode->i_wb)) {
wb_put(wb); /* @inode already has ref */
+ spin_lock(&inode->i_lock);
return wb;
}
@@ -1141,7 +1143,6 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
{
struct bdi_writeback *wb = inode_to_wb(inode);
- spin_unlock(&inode->i_lock);
spin_lock(&wb->list_lock);
return wb;
}
@@ -1152,6 +1153,7 @@ static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
struct bdi_writeback *wb = inode_to_wb(inode);
spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
return wb;
}
@@ -1233,7 +1235,6 @@ void inode_io_list_del(struct inode *inode)
struct bdi_writeback *wb;
wb = inode_to_wb_and_lock_list(inode);
- spin_lock(&inode->i_lock);
inode->i_state &= ~I_SYNC_QUEUED;
list_del_init(&inode->i_io_list);
@@ -1704,7 +1705,6 @@ static int writeback_single_inode(struct inode *inode,
wbc_detach_inode(wbc);
wb = inode_to_wb_and_lock_list(inode);
- spin_lock(&inode->i_lock);
/*
* If the inode is now fully clean, then it can be safely removed from
* its writeback list (if any). Otherwise the flusher threads are
@@ -1875,7 +1875,6 @@ static long writeback_sb_inodes(struct super_block *sb,
* have been switched to another wb in the meantime.
*/
tmp_wb = inode_to_wb_and_lock_list(inode);
- spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
wrote++;
requeue_inode(inode, tmp_wb, &wbc);
@@ -27,7 +27,7 @@
* Inode locking rules:
*
* inode->i_lock protects:
- * inode->i_state, inode->i_hash, __iget()
+ * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
* Inode LRU list locks protect:
* inode->i_sb->s_inode_lru, inode->i_lru
* inode->i_sb->s_inode_list_lock protects: