@@ -973,8 +973,8 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
* the other < 0 number - Something wrong happens
*/
static noinline int
-lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
- size_t num_pages, loff_t pos,
+lock_and_cleanup_extent_if_need(struct btrfs_inode *inode,
+ loff_t pos,
size_t write_bytes,
u64 *lockstart, u64 *lockend, bool nowait,
struct extent_state **cached_state)
@@ -982,7 +982,6 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start_pos;
u64 last_pos;
- int i;
int ret = 0;
start_pos = round_down(pos, fs_info->sectorsize);
@@ -993,15 +992,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
if (nowait) {
if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
- cached_state)) {
- for (i = 0; i < num_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- pages[i] = NULL;
- }
-
+ cached_state))
return -EAGAIN;
- }
} else {
lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
}
@@ -1013,10 +1005,6 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
ordered->file_offset <= last_pos) {
unlock_extent(&inode->io_tree, start_pos, last_pos,
cached_state);
- for (i = 0; i < num_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
return -EAGAIN;
@@ -1029,13 +1017,6 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
ret = 1;
}
- /*
- * We should be called after prepare_pages() which should have locked
- * all pages in the range.
- */
- for (i = 0; i < num_pages; i++)
- WARN_ON(!PageLocked(pages[i]));
-
return ret;
}
@@ -1299,13 +1280,22 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
}
release_bytes = reserve_bytes;
-again:
ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
if (ret) {
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
break;
}
+ extents_locked = lock_and_cleanup_extent_if_need(BTRFS_I(inode),
+ pos, write_bytes, &lockstart, &lockend,
+ nowait, &cached_state);
+ if (extents_locked < 0) {
+ ret = extents_locked;
+ btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
+ break;
+ }
+
+
/*
* This is going to setup the pages array with the number of
* pages we want, so we don't really need to worry about the
@@ -1313,25 +1303,9 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
*/
ret = prepare_pages(inode, pages, num_pages,
pos, write_bytes, force_page_uptodate, false);
- if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode),
- reserve_bytes);
- break;
- }
-
- extents_locked = lock_and_cleanup_extent_if_need(
- BTRFS_I(inode), pages,
- num_pages, pos, write_bytes, &lockstart,
- &lockend, nowait, &cached_state);
- if (extents_locked < 0) {
- if (!nowait && extents_locked == -EAGAIN)
- goto again;
-
+ if (ret)
btrfs_delalloc_release_extents(BTRFS_I(inode),
reserve_bytes);
- ret = extents_locked;
- break;
- }
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
@@ -1380,33 +1354,19 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
dirty_pages, pos, copied,
- &cached_state, only_release_metadata);
-
- /*
- * If we have not locked the extent range, because the range's
- * start offset is >= i_size, we might still have a non-NULL
- * cached extent state, acquired while marking the extent range
- * as delalloc through btrfs_dirty_pages(). Therefore free any
- * possible cached extent state to avoid a memory leak.
- */
- if (extents_locked)
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state);
- else
- free_extent_state(cached_state);
+ NULL, only_release_metadata);
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
- if (ret) {
- btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
+ btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
+ if (extents_locked)
+ unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state);
+ if (ret)
break;
- }
release_bytes = 0;
if (only_release_metadata)
btrfs_check_nocow_unlock(BTRFS_I(inode));
- btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
-
cond_resched();
pos += copied;