@@ -142,12 +142,12 @@ xfs_ilock_attr_map_shared(
*
* Basic locking order:
*
- * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
+ * s_dax_sem -> i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
*
* mmap_sem locking order:
*
* i_rwsem -> page lock -> mmap_sem
- * mmap_sem -> i_mmap_lock -> page_lock
+ * s_dax_sem -> mmap_sem -> i_mmap_lock -> page_lock
*
* The difference in mmap_sem locking order mean that we cannot hold the
* i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
@@ -182,6 +182,9 @@ xfs_ilock(
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+ if (lock_flags & XFS_DAX_EXCL)
+ inode_dax_state_down_write(VFS_I(ip));
+
if (lock_flags & XFS_IOLOCK_EXCL) {
down_write_nested(&VFS_I(ip)->i_rwsem,
XFS_IOLOCK_DEP(lock_flags));
@@ -224,6 +227,8 @@ xfs_ilock_nowait(
* You can't set both SHARED and EXCL for the same lock,
* and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
* and XFS_ILOCK_EXCL are valid values to set in lock_flags.
+ *
+ * XFS_DAX_* is not allowed
*/
ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
@@ -232,6 +237,7 @@ xfs_ilock_nowait(
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+ ASSERT((lock_flags & XFS_DAX_EXCL) == 0);
if (lock_flags & XFS_IOLOCK_EXCL) {
if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
@@ -318,6 +324,9 @@ xfs_iunlock(
else if (lock_flags & XFS_ILOCK_SHARED)
mrunlock_shared(&ip->i_lock);
+ if (lock_flags & XFS_DAX_EXCL)
+ inode_dax_state_up_write(VFS_I(ip));
+
trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
}
@@ -333,6 +342,8 @@ xfs_ilock_demote(
ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
ASSERT((lock_flags &
~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
+ /* XFS_DAX_* is not allowed */
+ ASSERT((lock_flags & XFS_DAX_EXCL) == 0);
if (lock_flags & XFS_ILOCK_EXCL)
mrdemote(&ip->i_lock);
@@ -465,6 +476,9 @@ xfs_lock_inodes(
ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
+ /* XFS_DAX_* is not allowed */
+ ASSERT((lock_mode & XFS_DAX_EXCL) == 0);
+
if (lock_mode & XFS_IOLOCK_EXCL) {
ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
} else if (lock_mode & XFS_MMAPLOCK_EXCL)
@@ -566,6 +580,10 @@ xfs_lock_two_inodes(
ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
!(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ /* XFS_DAX_* is not allowed */
+ ASSERT((ip0_mode & XFS_DAX_EXCL) == 0);
+ ASSERT((ip1_mode & XFS_DAX_EXCL) == 0);
+
ASSERT(ip0->i_ino != ip1->i_ino);
if (ip0->i_ino > ip1->i_ino) {
@@ -278,10 +278,12 @@ static inline void xfs_ifunlock(struct xfs_inode *ip)
#define XFS_ILOCK_SHARED (1<<3)
#define XFS_MMAPLOCK_EXCL (1<<4)
#define XFS_MMAPLOCK_SHARED (1<<5)
+#define XFS_DAX_EXCL (1<<6)
#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
| XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \
- | XFS_MMAPLOCK_EXCL | XFS_MMAPLOCK_SHARED)
+ | XFS_MMAPLOCK_EXCL | XFS_MMAPLOCK_SHARED \
+ | XFS_DAX_EXCL)
#define XFS_LOCK_FLAGS \
{ XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \
@@ -289,7 +291,8 @@ static inline void xfs_ifunlock(struct xfs_inode *ip)
{ XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \
{ XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \
{ XFS_MMAPLOCK_EXCL, "MMAPLOCK_EXCL" }, \
- { XFS_MMAPLOCK_SHARED, "MMAPLOCK_SHARED" }
+ { XFS_MMAPLOCK_SHARED, "MMAPLOCK_SHARED" }, \
+ { XFS_DAX_EXCL, "DAX_EXCL" }
/*