@@ -432,7 +432,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
else
btrfs_folio_clear_uptodate(fs_info, folio, start, len);
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_is_subpage(fs_info, folio))
folio_unlock(folio);
else
btrfs_folio_end_lock(fs_info, folio, start, len);
@@ -488,7 +488,7 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
{
ASSERT(folio_test_locked(folio));
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_is_subpage(fs_info, folio))
return;
ASSERT(folio_test_private(folio));
@@ -870,7 +870,7 @@ int set_folio_extent_mapped(struct folio *folio)
fs_info = folio_to_fs_info(folio);
- if (btrfs_is_subpage(fs_info, folio->mapping))
+ if (btrfs_is_subpage(fs_info, folio))
return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
@@ -887,7 +887,7 @@ void clear_folio_extent_mapped(struct folio *folio)
return;
fs_info = folio_to_fs_info(folio);
- if (btrfs_is_subpage(fs_info, folio->mapping))
+ if (btrfs_is_subpage(fs_info, folio))
return btrfs_detach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
folio_detach_private(folio);
@@ -1331,7 +1331,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
struct writeback_control *wbc = bio_ctrl->wbc;
- const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
+ const bool is_subpage = btrfs_is_subpage(fs_info, folio);
const u64 page_start = folio_pos(folio);
const u64 page_end = page_start + folio_size(folio) - 1;
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
@@ -1359,7 +1359,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
int bit;
/* Save the dirty bitmap as our submission bitmap will be a subset of it. */
- if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
+ if (btrfs_is_subpage(fs_info, folio)) {
ASSERT(blocks_per_folio > 1);
btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
} else {
@@ -2411,7 +2411,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
* regular submission.
*/
if (wbc->sync_mode != WB_SYNC_NONE ||
- btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
+ btrfs_is_subpage(inode_to_fs_info(inode), folio)) {
if (folio_test_writeback(folio))
submit_write_bio(bio_ctrl, 0);
folio_wait_writeback(folio);
@@ -7283,7 +7283,7 @@ static void wait_subpage_spinlock(struct folio *folio)
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
struct btrfs_subpage *subpage;
- if (!btrfs_is_subpage(fs_info, folio->mapping))
+ if (!btrfs_is_subpage(fs_info, folio))
return;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
@@ -83,7 +83,7 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
return 0;
if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
return 0;
- if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio->mapping))
+ if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return 0;
subpage = btrfs_alloc_subpage(fs_info, type);
@@ -104,7 +104,7 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *fol
return;
if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
return;
- if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio->mapping))
+ if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
return;
subpage = folio_detach_private(folio);
@@ -286,7 +286,7 @@ void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
ASSERT(folio_test_locked(folio));
- if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio)) {
folio_unlock(folio);
return;
}
@@ -320,7 +320,7 @@ void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
int cleared = 0;
int bit;
- if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+ if (!btrfs_is_subpage(fs_info, folio)) {
folio_unlock(folio);
return;
}
@@ -572,7 +572,7 @@ void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_set_func(folio); \
return; \
} \
@@ -582,7 +582,7 @@ void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_clear_func(folio); \
return; \
} \
@@ -592,7 +592,7 @@ bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) \
+ !btrfs_is_subpage(fs_info, folio)) \
return folio_test_func(folio); \
return btrfs_subpage_test_##name(fs_info, folio, start, len); \
} \
@@ -600,7 +600,7 @@ void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_set_func(folio); \
return; \
} \
@@ -611,7 +611,7 @@ void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) { \
+ !btrfs_is_subpage(fs_info, folio)) { \
folio_clear_func(folio); \
return; \
} \
@@ -622,7 +622,7 @@ bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
struct folio *folio, u64 start, u32 len) \
{ \
if (unlikely(!fs_info) || \
- !btrfs_is_subpage(fs_info, folio->mapping)) \
+ !btrfs_is_subpage(fs_info, folio)) \
return folio_test_func(folio); \
btrfs_subpage_clamp_range(folio, &start, &len); \
return btrfs_subpage_test_##name(fs_info, folio, start, len); \
@@ -700,7 +700,7 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
return;
- if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+ if (!btrfs_is_subpage(fs_info, folio)) {
ASSERT(!folio_test_dirty(folio));
return;
}
@@ -735,7 +735,7 @@ void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
int ret;
ASSERT(folio_test_locked(folio));
- if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio))
return;
subpage = folio_get_private(folio);
@@ -85,10 +85,10 @@ static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
return fs_info->nodesize < PAGE_SIZE;
}
static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
- struct address_space *mapping)
+ struct folio *folio)
{
- if (mapping && mapping->host)
- ASSERT(is_data_inode(BTRFS_I(mapping->host)));
+ if (folio->mapping && folio->mapping->host)
+ ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
return fs_info->sectorsize < PAGE_SIZE;
}
#else
@@ -97,10 +97,10 @@ static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
return false;
}
static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
- struct address_space *mapping)
+ struct folio *folio)
{
- if (mapping && mapping->host)
- ASSERT(is_data_inode(BTRFS_I(mapping->host)));
+ if (folio->mapping && folio->mapping->host)
+ ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
return false;
}
#endif
To support larger data folios, we can no longer assume every filemap folio is page sized. So btrfs_is_subpage() check must be done against a folio. Thankfully for metadata folios, we have the full control and ensure a larger folio will not be larger than nodesize, so btrfs_meta_is_subpage() doesn't need this change. Signed-off-by: Qu Wenruo <wqu@suse.com> --- fs/btrfs/extent_io.c | 14 +++++++------- fs/btrfs/inode.c | 2 +- fs/btrfs/subpage.c | 24 ++++++++++++------------ fs/btrfs/subpage.h | 12 ++++++------ 4 files changed, 26 insertions(+), 26 deletions(-)