@@ -1110,14 +1110,38 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
static int btree_set_page_dirty(struct page *page)
{
#ifdef DEBUG
+ struct btrfs_fs_info *fs_info = page_to_fs_info(page);
struct extent_buffer *eb;
- BUG_ON(!PagePrivate(page));
- eb = (struct extent_buffer *)page->private;
- BUG_ON(!eb);
- BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
- BUG_ON(!atomic_read(&eb->refs));
- btrfs_assert_tree_locked(eb);
+ if (fs_info->sectorsize == PAGE_SIZE) {
+ BUG_ON(!PagePrivate(page));
+ eb = (struct extent_buffer *)page->private;
+ BUG_ON(!eb);
+ BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+ BUG_ON(!atomic_read(&eb->refs));
+ btrfs_assert_tree_locked(eb);
+ } else {
+ u64 page_start = page_offset(page);
+ u64 page_end = page_start + PAGE_SIZE - 1;
+ u64 cur = page_start;
+ bool found_dirty_eb = false;
+ int ret;
+
+ ASSERT(btrfs_is_subpage(fs_info));
+ while (cur <= page_end) {
+ ret = btrfs_find_first_subpage_eb(fs_info, &eb, cur,
+ page_end, 0);
+ if (ret > 0)
+ break;
+ cur = eb->start + eb->len;
+ if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+ found_dirty_eb = true;
+ ASSERT(atomic_read(&eb->refs));
+ btrfs_assert_tree_locked(eb);
+ }
+ }
+ BUG_ON(!found_dirty_eb);
+ }
#endif
return __set_page_dirty_nobuffers(page);
}
@@ -2754,9 +2754,9 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode,
* Return 0 if we found one extent buffer and record it in @eb_ret.
* Return 1 if there is no extent buffer in the range.
*/
-static int find_first_subpage_eb(struct btrfs_fs_info *fs_info,
- struct extent_buffer **eb_ret, u64 start,
- u64 end, u32 extra_bits)
+int btrfs_find_first_subpage_eb(struct btrfs_fs_info *fs_info,
+ struct extent_buffer **eb_ret, u64 start,
+ u64 end, u32 extra_bits)
{
struct extent_io_tree *io_tree = info_to_btree_io_tree(fs_info);
u64 found_start;
@@ -6427,7 +6427,7 @@ static int try_release_subpage_eb(struct page *page)
while (cur <= end) {
struct extent_buffer *eb;
- ret = find_first_subpage_eb(fs_info, &eb, cur, end, 0);
+ ret = btrfs_find_first_subpage_eb(fs_info, &eb, cur, end, 0);
if (ret > 0)
break;
@@ -298,6 +298,10 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
struct btrfs_fs_info;
struct btrfs_inode;
+int btrfs_find_first_subpage_eb(struct btrfs_fs_info *fs_info,
+ struct extent_buffer **eb_ret, u64 start,
+ u64 end, unsigned int extra_bits);
+
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 length, u64 logical, struct page *page,
unsigned int pg_offset, int mirror_num);
For btree_set_page_dirty(), we should also check the extent buffer sanity for subpage support. Unlike the regular sector size case, since one page can contain multile extent buffers, and page::private no longer contains the pointer to extent buffer. So this patch will iterate through the extent_io_tree to find out any EXTENT_HAS_TREE_BLOCK bit, and check if any extent buffers in the page range has EXTENT_BUFFER_DIRTY and proper refs. Also, since we need to find subpage extent outside of extent_io.c, export find_first_subpage_eb() as btrfs_find_first_subpage_eb(). Signed-off-by: Qu Wenruo <wqu@suse.com> --- fs/btrfs/disk-io.c | 36 ++++++++++++++++++++++++++++++------ fs/btrfs/extent_io.c | 8 ++++---- fs/btrfs/extent_io.h | 4 ++++ 3 files changed, 38 insertions(+), 10 deletions(-)