@@ -489,10 +489,15 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
+ int rc;
*span = sis->pages;
- return rpc_clnt_swap_activate(clnt);
+ rc = rpc_clnt_swap_activate(clnt);
+ if (rc)
+ return rc;
+ sis->flags |= SWP_FILE;
+ return add_swap_extent(sis, 0, sis->max, 0);
}
static void nfs_swap_deactivate(struct file *file)
@@ -4483,7 +4483,8 @@ xfs_bmapi_write(
/* fail any attempts to mutate data extents */
if (IS_IOMAP_IMMUTABLE(VFS_I(ip))
- && !(flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ATTRFORK)))
+ && !(flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ATTRFORK
+ | XFS_BMAPI_FORCE)))
return -ETXTBSY;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -113,6 +113,15 @@ struct xfs_extent_free_item
/* Only convert delalloc space, don't allocate entirely new extents */
#define XFS_BMAPI_DELALLOC 0x400
+/*
+ * Permit extent manipulations even if S_IOMAP_IMMUTABLE is set on the
+ * inode. This is only expected to be used in the swapfile activation
+ * case where we want to mark all swap space as unwritten so that reads
+ * return zero and writes fail with ETXTBSY. Storage access in this
+ * state can only occur via swap operations.
+ */
+#define XFS_BMAPI_FORCE 0x800
+
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
{ XFS_BMAPI_METADATA, "METADATA" }, \
@@ -124,7 +133,8 @@ struct xfs_extent_free_item
{ XFS_BMAPI_ZERO, "ZERO" }, \
{ XFS_BMAPI_REMAP, "REMAP" }, \
{ XFS_BMAPI_COWFORK, "COWFORK" }, \
- { XFS_BMAPI_DELALLOC, "DELALLOC" }
+ { XFS_BMAPI_DELALLOC, "DELALLOC" }, \
+ { XFS_BMAPI_FORCE, "FORCE" }
static inline int xfs_bmapi_aflag(int w)
@@ -1418,6 +1418,58 @@ xfs_vm_set_page_dirty(
return newly_dirty;
}
+STATIC void
+xfs_vm_swap_deactivate(
+ struct file *file)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+ int error = 0;
+
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ if (IS_IOMAP_IMMUTABLE(inode))
+ error = xfs_alloc_file_space(ip, PAGE_SIZE,
+ i_size_read(inode) - PAGE_SIZE,
+ XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO
+ | XFS_BMAPI_FORCE);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+
+ WARN(error, "%s failed to restore block map (%d)\n", __func__, error);
+}
+
+STATIC int
+xfs_vm_swap_activate(
+ struct swap_info_struct *sis,
+ struct file *file,
+ sector_t *span)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+ int error = 0, nr_extents;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+ nr_extents = generic_swapfile_activate(sis, file, span);
+ if (nr_extents < 0)
+ return nr_extents;
+
+ /*
+ * If the file is already immutable take this opportunity to
+ * mark all extents as unwritten. This arranges for all reads
+ * to return 0 and all writes to fail with ETXTBSY since they
+ * would attempt extent conversion to the 'written' state. The
+ * swap header (PAGE_SIZE) is left alone.
+ */
+ if (IS_IOMAP_IMMUTABLE(inode))
+ error = xfs_alloc_file_space(ip, PAGE_SIZE,
+ i_size_read(inode) - PAGE_SIZE,
+ XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT
+ | XFS_BMAPI_FORCE);
+ if (error)
+ nr_extents = error;
+ return nr_extents;
+}
+
const struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages,
@@ -1427,6 +1479,8 @@ const struct address_space_operations xfs_address_space_operations = {
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.bmap = xfs_vm_bmap,
+ .swap_activate = xfs_vm_swap_activate,
+ .swap_deactivate = xfs_vm_swap_deactivate,
.direct_IO = xfs_vm_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
@@ -231,6 +231,7 @@ int generic_swapfile_activate(struct swap_info_struct *sis,
ret = -EINVAL;
goto out;
}
+EXPORT_SYMBOL_GPL(generic_swapfile_activate);
/*
* We may have stale swap cache pages in memory: notice
@@ -2105,6 +2105,9 @@ sector_t map_swap_page(struct page *page, struct block_device **bdev)
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+
while (!list_empty(&sis->first_swap_extent.list)) {
struct swap_extent *se;
@@ -2114,10 +2117,7 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
kfree(se);
}
- if (sis->flags & SWP_FILE) {
- struct file *swap_file = sis->swap_file;
- struct address_space *mapping = swap_file->f_mapping;
-
+ if (mapping->a_ops->swap_deactivate) {
sis->flags &= ~SWP_FILE;
mapping->a_ops->swap_deactivate(swap_file);
}
@@ -2168,6 +2168,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
list_add_tail(&new_se->list, &sis->first_swap_extent.list);
return 1;
}
+EXPORT_SYMBOL_GPL(add_swap_extent);
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
@@ -2213,15 +2214,8 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
return ret;
}
- if (mapping->a_ops->swap_activate) {
- ret = mapping->a_ops->swap_activate(sis, swap_file, span);
- if (!ret) {
- sis->flags |= SWP_FILE;
- ret = add_swap_extent(sis, 0, sis->max, 0);
- *span = sis->pages;
- }
- return ret;
- }
+ if (mapping->a_ops->swap_activate)
+ return mapping->a_ops->swap_activate(sis, swap_file, span);
return generic_swapfile_activate(sis, swap_file, span);
}