diff mbox series

[v4,1/2] fscache,cachefiles: add prepare_ondemand_read() callback

Message ID 20221117053017.21074-2-jefflexu@linux.alibaba.com (mailing list archive)
State New, archived
Headers show
Series fscache,cachefiles: add prepare_ondemand_read() interface | expand

Commit Message

Jingbo Xu Nov. 17, 2022, 5:30 a.m. UTC
Add prepare_ondemand_read() callback dedicated for the on-demand read
scenario, so that callers from this scenario can be decoupled from
netfs_io_subrequest.

The original cachefiles_prepare_read() is now refactored to a generic
routine accepting a parameter list instead of netfs_io_subrequest.
There's no logic change, except that some debug info retrieved from
netfs_io_subrequest is removed from trace_cachefiles_prep_read().

Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
---
 fs/cachefiles/io.c                | 77 ++++++++++++++++++++-----------
 include/linux/netfs.h             |  8 ++++
 include/trace/events/cachefiles.h | 27 ++++++-----
 3 files changed, 71 insertions(+), 41 deletions(-)

Comments

Jeff Layton Nov. 17, 2022, 11:05 a.m. UTC | #1
On Thu, 2022-11-17 at 13:30 +0800, Jingbo Xu wrote:
> Add prepare_ondemand_read() callback dedicated for the on-demand read
> scenario, so that callers from this scenario can be decoupled from
> netfs_io_subrequest.
> 
> The original cachefiles_prepare_read() is now refactored to a generic
> routine accepting a parameter list instead of netfs_io_subrequest.
> There's no logic change, except that some debug info retrieved from
> netfs_io_subrequest is removed from trace_cachefiles_prep_read().
> 
> Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
> ---
>  fs/cachefiles/io.c                | 77 ++++++++++++++++++++-----------
>  include/linux/netfs.h             |  8 ++++
>  include/trace/events/cachefiles.h | 27 ++++++-----
>  3 files changed, 71 insertions(+), 41 deletions(-)
> 
> diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
> index 000a28f46e59..13648348d9f9 100644
> --- a/fs/cachefiles/io.c
> +++ b/fs/cachefiles/io.c
> @@ -385,38 +385,35 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
>  				  term_func, term_func_priv);
>  }
>  
> -/*
> - * Prepare a read operation, shortening it to a cached/uncached
> - * boundary as appropriate.
> - */
> -static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
> -						      loff_t i_size)
> +static inline enum netfs_io_source
> +cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
> +			   loff_t start, size_t *_len, loff_t i_size,
> +			   unsigned long *_flags)
>  {
>  	enum cachefiles_prepare_read_trace why;
> -	struct netfs_io_request *rreq = subreq->rreq;
> -	struct netfs_cache_resources *cres = &rreq->cache_resources;
> -	struct cachefiles_object *object;
> +	struct cachefiles_object *object = NULL;
>  	struct cachefiles_cache *cache;
>  	struct fscache_cookie *cookie = fscache_cres_cookie(cres);
>  	const struct cred *saved_cred;
>  	struct file *file = cachefiles_cres_file(cres);
>  	enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
> +	size_t len = *_len;
>  	loff_t off, to;
>  	ino_t ino = file ? file_inode(file)->i_ino : 0;
>  	int rc;
>  
> -	_enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size);
> +	_enter("%zx @%llx/%llx", len, start, i_size);
>  
> -	if (subreq->start >= i_size) {
> +	if (start >= i_size) {
>  		ret = NETFS_FILL_WITH_ZEROES;
>  		why = cachefiles_trace_read_after_eof;
>  		goto out_no_object;
>  	}
>  
>  	if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
> -		__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
> +		__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
>  		why = cachefiles_trace_read_no_data;
> -		if (!test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags))
> +		if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
>  			goto out_no_object;
>  	}
>  
> @@ -437,7 +434,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  retry:
>  	off = cachefiles_inject_read_error();
>  	if (off == 0)
> -		off = vfs_llseek(file, subreq->start, SEEK_DATA);
> +		off = vfs_llseek(file, start, SEEK_DATA);
>  	if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
>  		if (off == (loff_t)-ENXIO) {
>  			why = cachefiles_trace_read_seek_nxio;
> @@ -449,21 +446,22 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  		goto out;
>  	}
>  
> -	if (off >= subreq->start + subreq->len) {
> +	if (off >= start + len) {
>  		why = cachefiles_trace_read_found_hole;
>  		goto download_and_store;
>  	}
>  
> -	if (off > subreq->start) {
> +	if (off > start) {
>  		off = round_up(off, cache->bsize);
> -		subreq->len = off - subreq->start;
> +		len = off - start;
> +		*_len = len;
>  		why = cachefiles_trace_read_found_part;
>  		goto download_and_store;
>  	}
>  
>  	to = cachefiles_inject_read_error();
>  	if (to == 0)
> -		to = vfs_llseek(file, subreq->start, SEEK_HOLE);
> +		to = vfs_llseek(file, start, SEEK_HOLE);
>  	if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
>  		trace_cachefiles_io_error(object, file_inode(file), to,
>  					  cachefiles_trace_seek_error);
> @@ -471,12 +469,13 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  		goto out;
>  	}
>  
> -	if (to < subreq->start + subreq->len) {
> -		if (subreq->start + subreq->len >= i_size)
> +	if (to < start + len) {
> +		if (start + len >= i_size)
>  			to = round_up(to, cache->bsize);
>  		else
>  			to = round_down(to, cache->bsize);
> -		subreq->len = to - subreq->start;
> +		len = to - start;
> +		*_len = len;
>  	}
>  
>  	why = cachefiles_trace_read_have_data;
> @@ -484,12 +483,11 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  	goto out;
>  
>  download_and_store:
> -	__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
> -	if (test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) {
> -		rc = cachefiles_ondemand_read(object, subreq->start,
> -					      subreq->len);
> +	__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
> +	if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
> +		rc = cachefiles_ondemand_read(object, start, len);
>  		if (!rc) {
> -			__clear_bit(NETFS_SREQ_ONDEMAND, &subreq->flags);
> +			__clear_bit(NETFS_SREQ_ONDEMAND, _flags);
>  			goto retry;
>  		}
>  		ret = NETFS_INVALID_READ;
> @@ -497,10 +495,34 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  out:
>  	cachefiles_end_secure(cache, saved_cred);
>  out_no_object:
> -	trace_cachefiles_prep_read(subreq, ret, why, ino);
> +	trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino);
>  	return ret;
>  }
>  
> +/*
> + * Prepare a read operation, shortening it to a cached/uncached
> + * boundary as appropriate.
> + */
> +static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
> +						    loff_t i_size)
> +{
> +	return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
> +					  subreq->start, &subreq->len, i_size,
> +					  &subreq->flags);
> +}
> +
> +/*
> + * Prepare an on-demand read operation, shortening it to a cached/uncached
> + * boundary as appropriate.
> + */
> +static enum netfs_io_source
> +cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
> +				 loff_t start, size_t *_len, loff_t i_size,
> +				 unsigned long *_flags)
> +{
> +	return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags);
> +}
> +
>  /*
>   * Prepare for a write to occur.
>   */
> @@ -621,6 +643,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
>  	.write			= cachefiles_write,
>  	.prepare_read		= cachefiles_prepare_read,
>  	.prepare_write		= cachefiles_prepare_write,
> +	.prepare_ondemand_read	= cachefiles_prepare_ondemand_read,
>  	.query_occupancy	= cachefiles_query_occupancy,
>  };
>  
> diff --git a/include/linux/netfs.h b/include/linux/netfs.h
> index f2402ddeafbf..95cc0397f0ee 100644
> --- a/include/linux/netfs.h
> +++ b/include/linux/netfs.h
> @@ -267,6 +267,14 @@ struct netfs_cache_ops {
>  			     loff_t *_start, size_t *_len, loff_t i_size,
>  			     bool no_space_allocated_yet);
>  
> +	/* Prepare an on-demand read operation, shortening it to a cached/uncached
> +	 * boundary as appropriate.
> +	 */
> +	enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
> +						      loff_t start, size_t *_len,
> +						      loff_t i_size,
> +						      unsigned long *_flags);
> +
>  	/* Query the occupancy of the cache in a region, returning where the
>  	 * next chunk of data starts and how long it is.
>  	 */
> diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
> index d8d4d73fe7b6..171c0d7f0bb7 100644
> --- a/include/trace/events/cachefiles.h
> +++ b/include/trace/events/cachefiles.h
> @@ -428,44 +428,43 @@ TRACE_EVENT(cachefiles_vol_coherency,
>  	    );
>  
>  TRACE_EVENT(cachefiles_prep_read,
> -	    TP_PROTO(struct netfs_io_subrequest *sreq,
> +	    TP_PROTO(struct cachefiles_object *obj,
> +		     loff_t start,
> +		     size_t len,
> +		     unsigned short flags,
>  		     enum netfs_io_source source,
>  		     enum cachefiles_prepare_read_trace why,
>  		     ino_t cache_inode),
>  
> -	    TP_ARGS(sreq, source, why, cache_inode),
> +	    TP_ARGS(obj, start, len, flags, source, why, cache_inode),
>  
>  	    TP_STRUCT__entry(
> -		    __field(unsigned int,		rreq		)
> -		    __field(unsigned short,		index		)
> +		    __field(unsigned int,		obj		)
>  		    __field(unsigned short,		flags		)
>  		    __field(enum netfs_io_source,	source		)
>  		    __field(enum cachefiles_prepare_read_trace,	why	)
>  		    __field(size_t,			len		)
>  		    __field(loff_t,			start		)
> -		    __field(unsigned int,		netfs_inode	)
>  		    __field(unsigned int,		cache_inode	)
>  			     ),
>  
>  	    TP_fast_assign(
> -		    __entry->rreq	= sreq->rreq->debug_id;
> -		    __entry->index	= sreq->debug_index;
> -		    __entry->flags	= sreq->flags;
> +		    __entry->obj	= obj ? obj->debug_id : 0;
> +		    __entry->flags	= flags;
>  		    __entry->source	= source;
>  		    __entry->why	= why;
> -		    __entry->len	= sreq->len;
> -		    __entry->start	= sreq->start;
> -		    __entry->netfs_inode = sreq->rreq->inode->i_ino;
> +		    __entry->len	= len;
> +		    __entry->start	= start;
>  		    __entry->cache_inode = cache_inode;
>  			   ),
>  
> -	    TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx ni=%x B=%x",
> -		      __entry->rreq, __entry->index,
> +	    TP_printk("o=%08x %s %s f=%02x s=%llx %zx B=%x",
> +		      __entry->obj,
>  		      __print_symbolic(__entry->source, netfs_sreq_sources),
>  		      __print_symbolic(__entry->why, cachefiles_prepare_read_traces),
>  		      __entry->flags,
>  		      __entry->start, __entry->len,
> -		      __entry->netfs_inode, __entry->cache_inode)
> +		      __entry->cache_inode)
>  	    );
>  
>  TRACE_EVENT(cachefiles_read,


Reviewed-by: Jeff Layton <jlayton@kernel.org>
Jingbo Xu Nov. 23, 2022, 3:29 a.m. UTC | #2
Hi David,

How do you think of this patch?  It's already -rc6 and we'd like to
enable erofs large folios for Linux 6.2.

If it's possible, could you review or ack this patch?  Many thanks.


Thanks,
Jingbo Xu


On 11/17/22 1:30 PM, Jingbo Xu wrote:
> Add prepare_ondemand_read() callback dedicated for the on-demand read
> scenario, so that callers from this scenario can be decoupled from
> netfs_io_subrequest.
> 
> The original cachefiles_prepare_read() is now refactored to a generic
> routine accepting a parameter list instead of netfs_io_subrequest.
> There's no logic change, except that some debug info retrieved from
> netfs_io_subrequest is removed from trace_cachefiles_prep_read().
> 
> Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
> ---
>  fs/cachefiles/io.c                | 77 ++++++++++++++++++++-----------
>  include/linux/netfs.h             |  8 ++++
>  include/trace/events/cachefiles.h | 27 ++++++-----
>  3 files changed, 71 insertions(+), 41 deletions(-)
> 
> diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
> index 000a28f46e59..13648348d9f9 100644
> --- a/fs/cachefiles/io.c
> +++ b/fs/cachefiles/io.c
> @@ -385,38 +385,35 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
>  				  term_func, term_func_priv);
>  }
>  
> -/*
> - * Prepare a read operation, shortening it to a cached/uncached
> - * boundary as appropriate.
> - */
> -static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
> -						      loff_t i_size)
> +static inline enum netfs_io_source
> +cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
> +			   loff_t start, size_t *_len, loff_t i_size,
> +			   unsigned long *_flags)
>  {
>  	enum cachefiles_prepare_read_trace why;
> -	struct netfs_io_request *rreq = subreq->rreq;
> -	struct netfs_cache_resources *cres = &rreq->cache_resources;
> -	struct cachefiles_object *object;
> +	struct cachefiles_object *object = NULL;
>  	struct cachefiles_cache *cache;
>  	struct fscache_cookie *cookie = fscache_cres_cookie(cres);
>  	const struct cred *saved_cred;
>  	struct file *file = cachefiles_cres_file(cres);
>  	enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
> +	size_t len = *_len;
>  	loff_t off, to;
>  	ino_t ino = file ? file_inode(file)->i_ino : 0;
>  	int rc;
>  
> -	_enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size);
> +	_enter("%zx @%llx/%llx", len, start, i_size);
>  
> -	if (subreq->start >= i_size) {
> +	if (start >= i_size) {
>  		ret = NETFS_FILL_WITH_ZEROES;
>  		why = cachefiles_trace_read_after_eof;
>  		goto out_no_object;
>  	}
>  
>  	if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
> -		__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
> +		__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
>  		why = cachefiles_trace_read_no_data;
> -		if (!test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags))
> +		if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
>  			goto out_no_object;
>  	}
>  
> @@ -437,7 +434,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  retry:
>  	off = cachefiles_inject_read_error();
>  	if (off == 0)
> -		off = vfs_llseek(file, subreq->start, SEEK_DATA);
> +		off = vfs_llseek(file, start, SEEK_DATA);
>  	if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
>  		if (off == (loff_t)-ENXIO) {
>  			why = cachefiles_trace_read_seek_nxio;
> @@ -449,21 +446,22 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  		goto out;
>  	}
>  
> -	if (off >= subreq->start + subreq->len) {
> +	if (off >= start + len) {
>  		why = cachefiles_trace_read_found_hole;
>  		goto download_and_store;
>  	}
>  
> -	if (off > subreq->start) {
> +	if (off > start) {
>  		off = round_up(off, cache->bsize);
> -		subreq->len = off - subreq->start;
> +		len = off - start;
> +		*_len = len;
>  		why = cachefiles_trace_read_found_part;
>  		goto download_and_store;
>  	}
>  
>  	to = cachefiles_inject_read_error();
>  	if (to == 0)
> -		to = vfs_llseek(file, subreq->start, SEEK_HOLE);
> +		to = vfs_llseek(file, start, SEEK_HOLE);
>  	if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
>  		trace_cachefiles_io_error(object, file_inode(file), to,
>  					  cachefiles_trace_seek_error);
> @@ -471,12 +469,13 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  		goto out;
>  	}
>  
> -	if (to < subreq->start + subreq->len) {
> -		if (subreq->start + subreq->len >= i_size)
> +	if (to < start + len) {
> +		if (start + len >= i_size)
>  			to = round_up(to, cache->bsize);
>  		else
>  			to = round_down(to, cache->bsize);
> -		subreq->len = to - subreq->start;
> +		len = to - start;
> +		*_len = len;
>  	}
>  
>  	why = cachefiles_trace_read_have_data;
> @@ -484,12 +483,11 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  	goto out;
>  
>  download_and_store:
> -	__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
> -	if (test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) {
> -		rc = cachefiles_ondemand_read(object, subreq->start,
> -					      subreq->len);
> +	__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
> +	if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
> +		rc = cachefiles_ondemand_read(object, start, len);
>  		if (!rc) {
> -			__clear_bit(NETFS_SREQ_ONDEMAND, &subreq->flags);
> +			__clear_bit(NETFS_SREQ_ONDEMAND, _flags);
>  			goto retry;
>  		}
>  		ret = NETFS_INVALID_READ;
> @@ -497,10 +495,34 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
>  out:
>  	cachefiles_end_secure(cache, saved_cred);
>  out_no_object:
> -	trace_cachefiles_prep_read(subreq, ret, why, ino);
> +	trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino);
>  	return ret;
>  }
>  
> +/*
> + * Prepare a read operation, shortening it to a cached/uncached
> + * boundary as appropriate.
> + */
> +static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
> +						    loff_t i_size)
> +{
> +	return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
> +					  subreq->start, &subreq->len, i_size,
> +					  &subreq->flags);
> +}
> +
> +/*
> + * Prepare an on-demand read operation, shortening it to a cached/uncached
> + * boundary as appropriate.
> + */
> +static enum netfs_io_source
> +cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
> +				 loff_t start, size_t *_len, loff_t i_size,
> +				 unsigned long *_flags)
> +{
> +	return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags);
> +}
> +
>  /*
>   * Prepare for a write to occur.
>   */
> @@ -621,6 +643,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
>  	.write			= cachefiles_write,
>  	.prepare_read		= cachefiles_prepare_read,
>  	.prepare_write		= cachefiles_prepare_write,
> +	.prepare_ondemand_read	= cachefiles_prepare_ondemand_read,
>  	.query_occupancy	= cachefiles_query_occupancy,
>  };
>  
> diff --git a/include/linux/netfs.h b/include/linux/netfs.h
> index f2402ddeafbf..95cc0397f0ee 100644
> --- a/include/linux/netfs.h
> +++ b/include/linux/netfs.h
> @@ -267,6 +267,14 @@ struct netfs_cache_ops {
>  			     loff_t *_start, size_t *_len, loff_t i_size,
>  			     bool no_space_allocated_yet);
>  
> +	/* Prepare an on-demand read operation, shortening it to a cached/uncached
> +	 * boundary as appropriate.
> +	 */
> +	enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
> +						      loff_t start, size_t *_len,
> +						      loff_t i_size,
> +						      unsigned long *_flags);
> +
>  	/* Query the occupancy of the cache in a region, returning where the
>  	 * next chunk of data starts and how long it is.
>  	 */
> diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
> index d8d4d73fe7b6..171c0d7f0bb7 100644
> --- a/include/trace/events/cachefiles.h
> +++ b/include/trace/events/cachefiles.h
> @@ -428,44 +428,43 @@ TRACE_EVENT(cachefiles_vol_coherency,
>  	    );
>  
>  TRACE_EVENT(cachefiles_prep_read,
> -	    TP_PROTO(struct netfs_io_subrequest *sreq,
> +	    TP_PROTO(struct cachefiles_object *obj,
> +		     loff_t start,
> +		     size_t len,
> +		     unsigned short flags,
>  		     enum netfs_io_source source,
>  		     enum cachefiles_prepare_read_trace why,
>  		     ino_t cache_inode),
>  
> -	    TP_ARGS(sreq, source, why, cache_inode),
> +	    TP_ARGS(obj, start, len, flags, source, why, cache_inode),
>  
>  	    TP_STRUCT__entry(
> -		    __field(unsigned int,		rreq		)
> -		    __field(unsigned short,		index		)
> +		    __field(unsigned int,		obj		)
>  		    __field(unsigned short,		flags		)
>  		    __field(enum netfs_io_source,	source		)
>  		    __field(enum cachefiles_prepare_read_trace,	why	)
>  		    __field(size_t,			len		)
>  		    __field(loff_t,			start		)
> -		    __field(unsigned int,		netfs_inode	)
>  		    __field(unsigned int,		cache_inode	)
>  			     ),
>  
>  	    TP_fast_assign(
> -		    __entry->rreq	= sreq->rreq->debug_id;
> -		    __entry->index	= sreq->debug_index;
> -		    __entry->flags	= sreq->flags;
> +		    __entry->obj	= obj ? obj->debug_id : 0;
> +		    __entry->flags	= flags;
>  		    __entry->source	= source;
>  		    __entry->why	= why;
> -		    __entry->len	= sreq->len;
> -		    __entry->start	= sreq->start;
> -		    __entry->netfs_inode = sreq->rreq->inode->i_ino;
> +		    __entry->len	= len;
> +		    __entry->start	= start;
>  		    __entry->cache_inode = cache_inode;
>  			   ),
>  
> -	    TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx ni=%x B=%x",
> -		      __entry->rreq, __entry->index,
> +	    TP_printk("o=%08x %s %s f=%02x s=%llx %zx B=%x",
> +		      __entry->obj,
>  		      __print_symbolic(__entry->source, netfs_sreq_sources),
>  		      __print_symbolic(__entry->why, cachefiles_prepare_read_traces),
>  		      __entry->flags,
>  		      __entry->start, __entry->len,
> -		      __entry->netfs_inode, __entry->cache_inode)
> +		      __entry->cache_inode)
>  	    );
>  
>  TRACE_EVENT(cachefiles_read,
David Howells Nov. 23, 2022, 4:44 p.m. UTC | #3
Jingbo Xu <jefflexu@linux.alibaba.com> wrote:

> -/*
> - * Prepare a read operation, shortening it to a cached/uncached
> - * boundary as appropriate.
> - */
> -static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
> -						      loff_t i_size)
> +static inline enum netfs_io_source
> +cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
> +			   loff_t start, size_t *_len, loff_t i_size,
> +			   unsigned long *_flags)

That's not exactly what I meant, but I guess it would work as the compiler
would probably inline it into both callers.

> -		      __entry->netfs_inode, __entry->cache_inode)
> +		      __entry->cache_inode)

Can you not lose the netfs_inode number from the tracepoint, please?  Feel
free to display 0 there for your purposes.

David
Jingbo Xu Nov. 24, 2022, 3:19 a.m. UTC | #4
Hi David,

Really thanks for the comment.

On 11/24/22 12:44 AM, David Howells wrote:
> Jingbo Xu <jefflexu@linux.alibaba.com> wrote:
> 
>> -/*
>> - * Prepare a read operation, shortening it to a cached/uncached
>> - * boundary as appropriate.
>> - */
>> -static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
>> -						      loff_t i_size)
>> +static inline enum netfs_io_source
>> +cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
>> +			   loff_t start, size_t *_len, loff_t i_size,
>> +			   unsigned long *_flags)
> 
> That's not exactly what I meant, but I guess it would work as the compiler
> would probably inline it into both callers.

Yeah, I just have no better way if we don't want another function
calling introduced by this patch.  If we keep cachefiles_prepare_read()
untouched, the on-demand users need to construct a temporary subrequest
on the stack, and Jeff pointed out that this way is somewhat fragile and
not robust enough.

Anyway I would keep moving forward in the direction of current patch,
and add back the netfs_inode number to the tracepoint.  I would send v5
soon.

Thanks again for the reply :-)

> 
>> -		      __entry->netfs_inode, __entry->cache_inode)
>> +		      __entry->cache_inode)
> 
> Can you not lose the netfs_inode number from the tracepoint, please?  Feel
> free to display 0 there for your purposes.
> 

Sure.
diff mbox series

Patch

diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 000a28f46e59..13648348d9f9 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -385,38 +385,35 @@  static int cachefiles_write(struct netfs_cache_resources *cres,
 				  term_func, term_func_priv);
 }
 
-/*
- * Prepare a read operation, shortening it to a cached/uncached
- * boundary as appropriate.
- */
-static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
-						      loff_t i_size)
+static inline enum netfs_io_source
+cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
+			   loff_t start, size_t *_len, loff_t i_size,
+			   unsigned long *_flags)
 {
 	enum cachefiles_prepare_read_trace why;
-	struct netfs_io_request *rreq = subreq->rreq;
-	struct netfs_cache_resources *cres = &rreq->cache_resources;
-	struct cachefiles_object *object;
+	struct cachefiles_object *object = NULL;
 	struct cachefiles_cache *cache;
 	struct fscache_cookie *cookie = fscache_cres_cookie(cres);
 	const struct cred *saved_cred;
 	struct file *file = cachefiles_cres_file(cres);
 	enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
+	size_t len = *_len;
 	loff_t off, to;
 	ino_t ino = file ? file_inode(file)->i_ino : 0;
 	int rc;
 
-	_enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size);
+	_enter("%zx @%llx/%llx", len, start, i_size);
 
-	if (subreq->start >= i_size) {
+	if (start >= i_size) {
 		ret = NETFS_FILL_WITH_ZEROES;
 		why = cachefiles_trace_read_after_eof;
 		goto out_no_object;
 	}
 
 	if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
-		__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
+		__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
 		why = cachefiles_trace_read_no_data;
-		if (!test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags))
+		if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
 			goto out_no_object;
 	}
 
@@ -437,7 +434,7 @@  static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
 retry:
 	off = cachefiles_inject_read_error();
 	if (off == 0)
-		off = vfs_llseek(file, subreq->start, SEEK_DATA);
+		off = vfs_llseek(file, start, SEEK_DATA);
 	if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
 		if (off == (loff_t)-ENXIO) {
 			why = cachefiles_trace_read_seek_nxio;
@@ -449,21 +446,22 @@  static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
 		goto out;
 	}
 
-	if (off >= subreq->start + subreq->len) {
+	if (off >= start + len) {
 		why = cachefiles_trace_read_found_hole;
 		goto download_and_store;
 	}
 
-	if (off > subreq->start) {
+	if (off > start) {
 		off = round_up(off, cache->bsize);
-		subreq->len = off - subreq->start;
+		len = off - start;
+		*_len = len;
 		why = cachefiles_trace_read_found_part;
 		goto download_and_store;
 	}
 
 	to = cachefiles_inject_read_error();
 	if (to == 0)
-		to = vfs_llseek(file, subreq->start, SEEK_HOLE);
+		to = vfs_llseek(file, start, SEEK_HOLE);
 	if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
 		trace_cachefiles_io_error(object, file_inode(file), to,
 					  cachefiles_trace_seek_error);
@@ -471,12 +469,13 @@  static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
 		goto out;
 	}
 
-	if (to < subreq->start + subreq->len) {
-		if (subreq->start + subreq->len >= i_size)
+	if (to < start + len) {
+		if (start + len >= i_size)
 			to = round_up(to, cache->bsize);
 		else
 			to = round_down(to, cache->bsize);
-		subreq->len = to - subreq->start;
+		len = to - start;
+		*_len = len;
 	}
 
 	why = cachefiles_trace_read_have_data;
@@ -484,12 +483,11 @@  static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
 	goto out;
 
 download_and_store:
-	__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
-	if (test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) {
-		rc = cachefiles_ondemand_read(object, subreq->start,
-					      subreq->len);
+	__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
+	if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
+		rc = cachefiles_ondemand_read(object, start, len);
 		if (!rc) {
-			__clear_bit(NETFS_SREQ_ONDEMAND, &subreq->flags);
+			__clear_bit(NETFS_SREQ_ONDEMAND, _flags);
 			goto retry;
 		}
 		ret = NETFS_INVALID_READ;
@@ -497,10 +495,34 @@  static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
 out:
 	cachefiles_end_secure(cache, saved_cred);
 out_no_object:
-	trace_cachefiles_prep_read(subreq, ret, why, ino);
+	trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino);
 	return ret;
 }
 
+/*
+ * Prepare a read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
+						    loff_t i_size)
+{
+	return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
+					  subreq->start, &subreq->len, i_size,
+					  &subreq->flags);
+}
+
+/*
+ * Prepare an on-demand read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+static enum netfs_io_source
+cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
+				 loff_t start, size_t *_len, loff_t i_size,
+				 unsigned long *_flags)
+{
+	return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags);
+}
+
 /*
  * Prepare for a write to occur.
  */
@@ -621,6 +643,7 @@  static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
 	.write			= cachefiles_write,
 	.prepare_read		= cachefiles_prepare_read,
 	.prepare_write		= cachefiles_prepare_write,
+	.prepare_ondemand_read	= cachefiles_prepare_ondemand_read,
 	.query_occupancy	= cachefiles_query_occupancy,
 };
 
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index f2402ddeafbf..95cc0397f0ee 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -267,6 +267,14 @@  struct netfs_cache_ops {
 			     loff_t *_start, size_t *_len, loff_t i_size,
 			     bool no_space_allocated_yet);
 
+	/* Prepare an on-demand read operation, shortening it to a cached/uncached
+	 * boundary as appropriate.
+	 */
+	enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
+						      loff_t start, size_t *_len,
+						      loff_t i_size,
+						      unsigned long *_flags);
+
 	/* Query the occupancy of the cache in a region, returning where the
 	 * next chunk of data starts and how long it is.
 	 */
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index d8d4d73fe7b6..171c0d7f0bb7 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -428,44 +428,43 @@  TRACE_EVENT(cachefiles_vol_coherency,
 	    );
 
 TRACE_EVENT(cachefiles_prep_read,
-	    TP_PROTO(struct netfs_io_subrequest *sreq,
+	    TP_PROTO(struct cachefiles_object *obj,
+		     loff_t start,
+		     size_t len,
+		     unsigned short flags,
 		     enum netfs_io_source source,
 		     enum cachefiles_prepare_read_trace why,
 		     ino_t cache_inode),
 
-	    TP_ARGS(sreq, source, why, cache_inode),
+	    TP_ARGS(obj, start, len, flags, source, why, cache_inode),
 
 	    TP_STRUCT__entry(
-		    __field(unsigned int,		rreq		)
-		    __field(unsigned short,		index		)
+		    __field(unsigned int,		obj		)
 		    __field(unsigned short,		flags		)
 		    __field(enum netfs_io_source,	source		)
 		    __field(enum cachefiles_prepare_read_trace,	why	)
 		    __field(size_t,			len		)
 		    __field(loff_t,			start		)
-		    __field(unsigned int,		netfs_inode	)
 		    __field(unsigned int,		cache_inode	)
 			     ),
 
 	    TP_fast_assign(
-		    __entry->rreq	= sreq->rreq->debug_id;
-		    __entry->index	= sreq->debug_index;
-		    __entry->flags	= sreq->flags;
+		    __entry->obj	= obj ? obj->debug_id : 0;
+		    __entry->flags	= flags;
 		    __entry->source	= source;
 		    __entry->why	= why;
-		    __entry->len	= sreq->len;
-		    __entry->start	= sreq->start;
-		    __entry->netfs_inode = sreq->rreq->inode->i_ino;
+		    __entry->len	= len;
+		    __entry->start	= start;
 		    __entry->cache_inode = cache_inode;
 			   ),
 
-	    TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx ni=%x B=%x",
-		      __entry->rreq, __entry->index,
+	    TP_printk("o=%08x %s %s f=%02x s=%llx %zx B=%x",
+		      __entry->obj,
 		      __print_symbolic(__entry->source, netfs_sreq_sources),
 		      __print_symbolic(__entry->why, cachefiles_prepare_read_traces),
 		      __entry->flags,
 		      __entry->start, __entry->len,
-		      __entry->netfs_inode, __entry->cache_inode)
+		      __entry->cache_inode)
 	    );
 
 TRACE_EVENT(cachefiles_read,