@@ -1361,6 +1361,15 @@ out_unlock_inode:
}
EXPORT_SYMBOL(__mark_inode_dirty);
+/*
+ * The @s_sync_lock is used to serialise concurrent sync operations
+ * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
+ * Concurrent callers will block on the s_sync_lock rather than doing contending
+ * walks. The queueing maintains sync(2) required behaviour as all the IO that
+ * has been issued up to the time this function is enter is guaranteed to be
+ * completed by the time we have gained the lock and waited for all IO that is
+ * in progress regardless of the order callers are granted the lock.
+ */
static void wait_sb_inodes(struct super_block *sb)
{
struct inode *inode, *old_inode = NULL;
@@ -1371,6 +1380,7 @@ static void wait_sb_inodes(struct super_block *sb)
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
+ mutex_lock(&sb->s_sync_lock);
spin_lock(&sb->s_inode_list_lock);
/*
@@ -1412,6 +1422,7 @@ static void wait_sb_inodes(struct super_block *sb)
}
spin_unlock(&sb->s_inode_list_lock);
iput(old_inode);
+ mutex_unlock(&sb->s_sync_lock);
}
/**
@@ -190,6 +190,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
s->s_flags = flags;
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
+ mutex_init(&s->s_sync_lock);
INIT_LIST_HEAD(&s->s_inodes);
spin_lock_init(&s->s_inode_list_lock);
@@ -1346,6 +1346,8 @@ struct super_block {
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
struct rcu_head rcu;
+ struct mutex s_sync_lock; /* sync serialisation lock */
+
/*
* Indicates how deep in a filesystem stack this SB is
*/