@@ -491,7 +491,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
*memstall = 1;
}
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
@@ -883,7 +883,7 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t
return ERR_PTR(-ETXTBSY);
}
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
@@ -859,38 +859,18 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb,
return ret;
}
-int set_folio_extent_mapped(struct folio *folio)
+int btrfs_set_folio_subpage(struct folio *folio)
{
- struct btrfs_fs_info *fs_info;
-
- ASSERT(folio->mapping);
-
- if (folio_test_private(folio))
- return 0;
-
- fs_info = folio_to_fs_info(folio);
-
- if (btrfs_is_subpage(fs_info, folio->mapping))
- return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
- return 0;
+ return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
}
-void clear_folio_extent_mapped(struct folio *folio)
+void btrfs_clear_folio_subpage(struct folio *folio)
{
- struct btrfs_fs_info *fs_info;
-
- ASSERT(folio->mapping);
-
- if (!folio_test_private(folio))
- return;
-
- fs_info = folio_to_fs_info(folio);
- if (btrfs_is_subpage(fs_info, folio->mapping))
- return btrfs_detach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- folio_detach_private(folio);
+ btrfs_detach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
}
static struct extent_map *get_extent_map(struct btrfs_inode *inode,
@@ -942,7 +922,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
int ret = 0;
const size_t blocksize = fs_info->sectorsize;
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0) {
folio_unlock(folio);
return ret;
@@ -1731,30 +1711,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
*/
bio_ctrl->submit_bitmap = (unsigned long)-1;
- /*
- * If the page is dirty but without private set, it's marked dirty
- * without informing the fs.
- * Nowadays that is a bug, since the introduction of
- * pin_user_pages*().
- *
- * So here we check if the page has private set to rule out such
- * case.
- * But we also have a long history of relying on the COW fixup,
- * so here we only enable this check for experimental builds until
- * we're sure it's safe.
- */
- if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) &&
- unlikely(!folio_test_private(folio))) {
- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
- btrfs_err_rl(fs_info,
- "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
- inode->root->root_key.objectid,
- btrfs_ino(inode), folio_pos(folio));
- ret = -EUCLEAN;
- goto done;
- }
-
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0)
goto done;
@@ -65,12 +65,6 @@ enum {
ENUM_BIT(PAGE_SET_ORDERED),
};
-/*
- * Folio private values. Every page that is controlled by the extent map has
- * folio private set to this value.
- */
-#define EXTENT_FOLIO_PRIVATE 1
-
/*
* The extent buffer bitmap operations are done with byte granularity instead of
* word granularity for two reasons:
@@ -247,8 +241,8 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wb
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
void btrfs_readahead(struct readahead_control *rac);
-int set_folio_extent_mapped(struct folio *folio);
-void clear_folio_extent_mapped(struct folio *folio);
+int btrfs_set_folio_subpage(struct folio *folio);
+void btrfs_clear_folio_subpage(struct folio *folio);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level);
@@ -877,7 +877,7 @@ static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_
}
/* Only support page sized folio yet. */
ASSERT(folio_order(folio) == 0);
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
@@ -1835,7 +1835,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
folio_wait_writeback(folio);
lock_extent(io_tree, page_start, page_end, &cached_state);
- ret2 = set_folio_extent_mapped(folio);
+ ret2 = btrfs_set_folio_subpage(folio);
if (ret2 < 0) {
ret = vmf_error(ret2);
unlock_extent(io_tree, page_start, page_end, &cached_state);
@@ -450,7 +450,6 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
int i;
for (i = 0; i < io_ctl->num_pages; i++) {
- int ret;
folio = __filemap_get_folio(inode->i_mapping, i,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
@@ -460,14 +459,6 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
return -ENOMEM;
}
- ret = set_folio_extent_mapped(folio);
- if (ret < 0) {
- folio_unlock(folio);
- folio_put(folio);
- io_ctl_drop_pages(io_ctl);
- return ret;
- }
-
io_ctl->pages[i] = &folio->page;
if (uptodate && !folio_test_uptodate(folio)) {
btrfs_read_folio(NULL, folio);
@@ -4873,11 +4873,11 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
/*
* We unlock the page after the io is completed and then re-lock it
- * above. release_folio() could have come in between that and cleared
- * folio private, but left the page in the mapping. Set the page mapped
+ * above. release_folio() could have come in between that,
+ * but left the page in the mapping. Set the page mapped
* here to make sure it's properly set for the subpage stuff.
*/
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0)
goto out_unlock;
@@ -7317,7 +7317,7 @@ static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
if (try_release_extent_mapping(folio, gfp_flags)) {
wait_subpage_spinlock(folio);
- clear_folio_extent_mapped(folio);
+ btrfs_clear_folio_subpage(folio);
return true;
}
return false;
@@ -7515,7 +7515,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
__btrfs_release_folio(folio, GFP_NOFS);
- clear_folio_extent_mapped(folio);
+ btrfs_clear_folio_subpage(folio);
}
static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
@@ -91,7 +91,7 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
goto out_unlock;
}
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0)
goto out_unlock;
@@ -2870,10 +2870,10 @@ static int relocate_one_folio(struct reloc_control *rc,
/*
* We could have lost folio private when we dropped the lock to read the
- * folio above, make sure we set_folio_extent_mapped() here so we have any
+ * folio above, make sure we btrfs_set_folio_subpage() here so we have any
* of the subpage blocksize stuff we need in place.
*/
- ret = set_folio_extent_mapped(folio);
+ ret = btrfs_set_folio_subpage(folio);
if (ret < 0)
goto release_folio;