@@ -786,7 +786,7 @@ static int write_folio_nounlock(struct folio *folio,
ceph_fscache_write_to_cache(inode, page_off, len, caching);
if (IS_ENCRYPTED(inode)) {
- bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
CEPH_FSCRYPT_BLOCK_SIZE, 0,
GFP_NOFS);
if (IS_ERR(bounce_page)) {
@@ -1248,7 +1248,7 @@ static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
gfp_t gfp_flags = ceph_wbc->locked_pages ? GFP_NOWAIT : GFP_NOFS;
if (IS_ENCRYPTED(inode)) {
- pages[index] = fscrypt_encrypt_pagecache_blocks(&folio->page,
+ pages[index] = fscrypt_encrypt_pagecache_blocks(folio,
PAGE_SIZE,
0,
gfp_flags);
@@ -153,8 +153,8 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
}
/**
- * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
- * @page: the locked pagecache page containing the data to encrypt
+ * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache folio
+ * @folio: the locked pagecache folio containing the data to encrypt
* @len: size of the data to encrypt, in bytes
* @offs: offset within @page of the data to encrypt, in bytes
* @gfp_flags: memory allocation flags; see details below
@@ -177,23 +177,21 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
*
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
-
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
- const struct inode *inode = page->mapping->host;
+ const struct inode *inode = folio->mapping->host;
const struct fscrypt_inode_info *ci = inode->i_crypt_info;
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
struct page *ciphertext_page;
- u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
+ u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
(offs >> du_bits);
unsigned int i;
int err;
- if (WARN_ON_ONCE(!PageLocked(page)))
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+ if (WARN_ON_ONCE(!folio_test_locked(folio)))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
@@ -205,7 +203,7 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
for (i = offs; i < offs + len; i += du_size, index++) {
err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
- page, ciphertext_page,
+ &folio->page, ciphertext_page,
du_size, i, gfp_flags);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
@@ -213,7 +211,7 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
}
}
SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)page);
+ set_page_private(ciphertext_page, (unsigned long)folio);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
@@ -522,7 +522,7 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
if (io->io_bio)
gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
retry_encrypt:
- bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
enc_bytes, 0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
@@ -2504,7 +2504,7 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
return 0;
retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+ fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page),
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
@@ -310,10 +310,8 @@ static inline void fscrypt_prepare_dentry(struct dentry *dentry,
/* crypto.c */
void fscrypt_enqueue_decrypt_work(struct work_struct *);
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags);
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags);
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags);
@@ -480,10 +478,8 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
+static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
ext4 and ceph already have a folio to pass; f2fs needs to be properly converted but this will do for now. This removes a reference to page->index and page->mapping as well as removing a call to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- This is against next-20250304 and will have conflicts with the ceph tree if applied to mainline. It might be easiest for Christian to carry it? fs/ceph/addr.c | 4 ++-- fs/crypto/crypto.c | 22 ++++++++++------------ fs/ext4/page-io.c | 2 +- fs/f2fs/data.c | 2 +- include/linux/fscrypt.h | 12 ++++-------- 5 files changed, 18 insertions(+), 24 deletions(-)