@@ -722,7 +722,13 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
faili = nr_pages - 1;
cb->nr_pages = nr_pages;
- add_ra_bio_pages(inode, em_start + em_len, cb);
+ /*
+ * In the parent-locked case we only locked the range we are interested
+ * in. In all other cases, we can opportunistically cache decompressed
+ * data that goes beyond the requested range.
+ */
+ if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED))
+ add_ra_bio_pages(inode, em_start + em_len, cb);
/* include any pages we added in add_ra-bio_pages */
cb->len = bio->bi_iter.bi_size;
@@ -3104,7 +3104,8 @@ static int __do_readpage(struct page *page,
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode->i_sb->s_blocksize;
- unsigned long this_bio_flag = 0;
+ const bool parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
+ unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
set_page_extent_mapped(page);
@@ -3144,15 +3145,19 @@ static int __do_readpage(struct page *page,
kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
- unlock_extent_cached(tree, cur,
- cur + iosize - 1, &cached);
+ if (parent_locked)
+ free_extent_state(cached);
+ else
+ unlock_extent_cached(tree, cur,
+ cur + iosize - 1, &cached);
break;
}
em = __get_extent_map(inode, page, pg_offset, cur,
end - cur + 1, get_extent, em_cached);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
- unlock_extent(tree, cur, end);
+ if (!parent_locked)
+ unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em->start;
@@ -3234,8 +3239,11 @@ static int __do_readpage(struct page *page,
flush_dcache_page(page);
kunmap_atomic(userpage);
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
+ if (parent_locked)
+ free_extent_state(cached);
+ else
+ set_extent_uptodate(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
unlock_extent_cached(tree, cur,
cur + iosize - 1, &cached);
cur = cur + iosize;
@@ -3246,7 +3254,8 @@ static int __do_readpage(struct page *page,
if (test_range_bit(tree, cur, cur_end,
EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
- unlock_extent(tree, cur, cur + iosize - 1);
+ if (!parent_locked)
+ unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3256,7 +3265,8 @@ static int __do_readpage(struct page *page,
*/
if (block_start == EXTENT_MAP_INLINE) {
SetPageError(page);
- unlock_extent(tree, cur, cur + iosize - 1);
+ if (!parent_locked)
+ unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3274,7 +3284,8 @@ static int __do_readpage(struct page *page,
*bio_flags = this_bio_flag;
} else {
SetPageError(page);
- unlock_extent(tree, cur, cur + iosize - 1);
+ if (!parent_locked)
+ unlock_extent(tree, cur, cur + iosize - 1);
goto out;
}
cur = cur + iosize;
@@ -3340,6 +3351,24 @@ int extent_read_full_page(struct page *page, get_extent_t *get_extent,
return ret;
}
+/*
+ * Similar to extent_read_full_page() but the responsability to lock the range
+ * in the respective inode is from the caller.
+ */
+int extent_read_full_page_nolock(struct page *page, get_extent_t *get_extent,
+ int mirror_num)
+{
+ struct bio *bio = NULL;
+ unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
+ int ret;
+
+ ret = __do_readpage(page, get_extent, NULL, &bio, mirror_num,
+ &bio_flags, READ, NULL);
+ if (bio)
+ ret = submit_one_bio(bio, mirror_num, bio_flags);
+ return ret;
+}
+
static void update_nr_written(struct writeback_control *wbc,
unsigned long nr_written)
{
@@ -12,6 +12,7 @@
* type for this bio
*/
#define EXTENT_BIO_COMPRESSED 1
+#define EXTENT_BIO_PARENT_LOCKED 4
#define EXTENT_BIO_FLAG_SHIFT 16
enum {
@@ -191,6 +192,8 @@ int try_release_extent_buffer(struct page *page);
int extent_read_full_page(struct page *page, get_extent_t *get_extent,
int mirror_num);
+int extent_read_full_page_nolock(struct page *page, get_extent_t *get_extent,
+ int mirror_num);
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode);