diff mbox series

[v6,21/22] erofs: implement fscache-based data readahead

Message ID 20220325122223.102958-22-jefflexu@linux.alibaba.com (mailing list archive)
State New, archived
Headers show
Series fscache,erofs: fscache-based on-demand read semantics | expand

Commit Message

Jingbo Xu March 25, 2022, 12:22 p.m. UTC
Implements fscache-based data readahead. Also registers an individual
bdi for each erofs instance to enable readahead.

Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
---
 fs/erofs/fscache.c | 114 +++++++++++++++++++++++++++++++++++++++++++++
 fs/erofs/super.c   |   4 ++
 2 files changed, 118 insertions(+)

Comments

Gao Xiang March 28, 2022, 10:55 a.m. UTC | #1
On Fri, Mar 25, 2022 at 08:22:22PM +0800, Jeffle Xu wrote:
> Implements fscache-based data readahead. Also registers an individual
> bdi for each erofs instance to enable readahead.
> 
> Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
> ---
>  fs/erofs/fscache.c | 114 +++++++++++++++++++++++++++++++++++++++++++++
>  fs/erofs/super.c   |   4 ++
>  2 files changed, 118 insertions(+)
> 
> diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
> index cbb39657615e..589d1e7c2b1b 100644
> --- a/fs/erofs/fscache.c
> +++ b/fs/erofs/fscache.c
> @@ -191,12 +191,126 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
>  	return ret;
>  }
>  
> +static inline size_t erofs_fscache_calc_len(struct erofs_map_blocks *map,
> +					    size_t len, size_t done)
> +{
> +	/*
> +	 * 1) For CHUNK_BASED layout, the output m_la is rounded down to the
> +	 * nearest chunk boundary, and the output m_llen actually starts from
> +	 * the start of the containing chunk.
> +	 * 2) For other cases, the output m_la is equal to o_la.
> +	 */
> +	size_t length = map->m_llen - (map->o_la - map->m_la);
> +
> +	return min_t(size_t, length, len - done);

This helper can be folded too.

> +}
> +
> +static inline void erofs_fscache_unlock_folios(struct readahead_control *rac,
> +					       size_t len)
> +{
> +	while (len) {
> +		struct folio *folio = readahead_folio(rac);
> +
> +		len -= folio_size(folio);
> +		folio_mark_uptodate(folio);
> +		folio_unlock(folio);
> +	}
> +}
> +
> +static void erofs_fscache_readahead(struct readahead_control *rac)
> +{
> +	struct inode *inode = rac->mapping->host;
> +	struct erofs_inode *vi = EROFS_I(inode);
> +	struct super_block *sb = inode->i_sb;
> +	size_t len, done = 0;
> +	loff_t start;
> +	int ret;
> +
> +	if (erofs_inode_is_data_compressed(vi->datalayout)) {
> +		erofs_info(sb, "compressed layout not supported yet");
> +		return;
> +	}

Redundant check.

> +
> +	if (!readahead_count(rac))
> +		return;
> +
> +	start = readahead_pos(rac);
> +	len = readahead_length(rac);
> +
> +	do {
> +		struct erofs_map_blocks map;
> +
> +		map.m_la = map.o_la = start + done;
> +
> +		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
> +		if (ret)
> +			return;
> +
> +		/* Read-ahead Hole
> +		 * Two cases will hit this:
> +		 * 1) EOF. Imposibble in readahead routine;
> +		 * 2) hole. Only CHUNK_BASED layout supports hole.
> +		 */

/*
 *
 */

and typo `Imposibble'. Also I think this comment may not be useful
though.

> +		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
> +			struct iov_iter iter;
> +			loff_t offset = start + done;
> +			size_t count = erofs_fscache_calc_len(&map, len, done);
> +
> +			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages, offset, count);
> +			iov_iter_zero(count, &iter);
> +
> +			erofs_fscache_unlock_folios(rac, count);
> +			ret = count;
> +			continue;
> +		}
> +
> +		ret = erofs_fscache_get_map(&map, sb);
> +		if (ret)
> +			return;
> +
> +		/* Read-ahead Inline */
> +		if (map.m_flags & EROFS_MAP_META) {
> +			struct folio *folio = readahead_folio(rac);
> +
> +			ret = erofs_fscache_readpage_inline(folio, &map);
> +			if (!ret) {
> +				folio_mark_uptodate(folio);
> +				ret = folio_size(folio);
> +			}
> +
> +			folio_unlock(folio);
> +			continue;
> +		}
> +
> +		/* Read-ahead No-inline */
> +		if (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
> +		    vi->datalayout == EROFS_INODE_FLAT_INLINE ||
> +		    vi->datalayout == EROFS_INODE_CHUNK_BASED) {
> +			struct fscache_cookie *cookie = map.m_fscache->cookie;
> +			loff_t offset = start + done;
> +			size_t count = erofs_fscache_calc_len(&map, len, done);

You could promote `offset' and `count' to the outer block. So another
`offset' and `count' in !EROFS_MAP_MAPPED can be dropped then.

Thanks,
Gao Xiang
diff mbox series

Patch

diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index cbb39657615e..589d1e7c2b1b 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -191,12 +191,126 @@  static int erofs_fscache_readpage(struct file *file, struct page *page)
 	return ret;
 }
 
+static inline size_t erofs_fscache_calc_len(struct erofs_map_blocks *map,
+					    size_t len, size_t done)
+{
+	/*
+	 * 1) For CHUNK_BASED layout, the output m_la is rounded down to the
+	 * nearest chunk boundary, and the output m_llen actually starts from
+	 * the start of the containing chunk.
+	 * 2) For other cases, the output m_la is equal to o_la.
+	 */
+	size_t length = map->m_llen - (map->o_la - map->m_la);
+
+	return min_t(size_t, length, len - done);
+}
+
+static inline void erofs_fscache_unlock_folios(struct readahead_control *rac,
+					       size_t len)
+{
+	while (len) {
+		struct folio *folio = readahead_folio(rac);
+
+		len -= folio_size(folio);
+		folio_mark_uptodate(folio);
+		folio_unlock(folio);
+	}
+}
+
+static void erofs_fscache_readahead(struct readahead_control *rac)
+{
+	struct inode *inode = rac->mapping->host;
+	struct erofs_inode *vi = EROFS_I(inode);
+	struct super_block *sb = inode->i_sb;
+	size_t len, done = 0;
+	loff_t start;
+	int ret;
+
+	if (erofs_inode_is_data_compressed(vi->datalayout)) {
+		erofs_info(sb, "compressed layout not supported yet");
+		return;
+	}
+
+	if (!readahead_count(rac))
+		return;
+
+	start = readahead_pos(rac);
+	len = readahead_length(rac);
+
+	do {
+		struct erofs_map_blocks map;
+
+		map.m_la = map.o_la = start + done;
+
+		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+		if (ret)
+			return;
+
+		/* Read-ahead Hole
+		 * Two cases will hit this:
+		 * 1) EOF. Imposibble in readahead routine;
+		 * 2) hole. Only CHUNK_BASED layout supports hole.
+		 */
+		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+			struct iov_iter iter;
+			loff_t offset = start + done;
+			size_t count = erofs_fscache_calc_len(&map, len, done);
+
+			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages, offset, count);
+			iov_iter_zero(count, &iter);
+
+			erofs_fscache_unlock_folios(rac, count);
+			ret = count;
+			continue;
+		}
+
+		ret = erofs_fscache_get_map(&map, sb);
+		if (ret)
+			return;
+
+		/* Read-ahead Inline */
+		if (map.m_flags & EROFS_MAP_META) {
+			struct folio *folio = readahead_folio(rac);
+
+			ret = erofs_fscache_readpage_inline(folio, &map);
+			if (!ret) {
+				folio_mark_uptodate(folio);
+				ret = folio_size(folio);
+			}
+
+			folio_unlock(folio);
+			continue;
+		}
+
+		/* Read-ahead No-inline */
+		if (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
+		    vi->datalayout == EROFS_INODE_FLAT_INLINE ||
+		    vi->datalayout == EROFS_INODE_CHUNK_BASED) {
+			struct fscache_cookie *cookie = map.m_fscache->cookie;
+			loff_t offset = start + done;
+			size_t count = erofs_fscache_calc_len(&map, len, done);
+			loff_t pstart = map.m_pa + (map.o_la - map.m_la);
+
+			ret = erofs_fscache_read_folios(cookie, rac->mapping,
+					offset, count, pstart);
+			if (!ret) {
+				erofs_fscache_unlock_folios(rac, count);
+				ret = count;
+			}
+		} else {
+			DBG_BUGON(1);
+			return;
+		}
+	} while (ret > 0 && ((done += ret) < len));
+}
+
 static const struct address_space_operations erofs_fscache_blob_aops = {
 	.readpage = erofs_fscache_readpage_blob,
 };
 
 const struct address_space_operations erofs_fscache_access_aops = {
 	.readpage = erofs_fscache_readpage,
+	.readahead = erofs_fscache_readahead,
 };
 
 /*
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 9a6f35e0c22b..8ac400581784 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -619,6 +619,10 @@  static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
 			return PTR_ERR(bootstrap);
 
 		sbi->bootstrap = bootstrap;
+
+		err = super_setup_bdi(sb);
+		if (err)
+			return err;
 	}
 
 	err = erofs_read_superblock(sb);