diff mbox series

[RFC,v2,05/14] readahead: align index to mapping_min_order in ondemand_ra and force_ra

Message ID 20240213093713.1753368-6-kernel@pankajraghav.com (mailing list archive)
State New
Headers show
Series enable bs > ps in XFS | expand

Commit Message

Pankaj Raghav (Samsung) Feb. 13, 2024, 9:37 a.m. UTC
From: Luis Chamberlain <mcgrof@kernel.org>

Align the ra->start and ra->size to mapping_min_order in
ondemand_readahead(), and align the index to mapping_min_order in
force_page_cache_ra(). This will ensure that the folios allocated for
readahead that are added to the page cache are aligned to
mapping_min_order.

Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
---
 mm/readahead.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 40 insertions(+), 8 deletions(-)

Comments

Hannes Reinecke Feb. 13, 2024, 3 p.m. UTC | #1
On 2/13/24 10:37, Pankaj Raghav (Samsung) wrote:
> From: Luis Chamberlain <mcgrof@kernel.org>
> 
> Align the ra->start and ra->size to mapping_min_order in
> ondemand_readahead(), and align the index to mapping_min_order in
> force_page_cache_ra(). This will ensure that the folios allocated for
> readahead that are added to the page cache are aligned to
> mapping_min_order.
> 
> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
> Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
> ---
>   mm/readahead.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
>   1 file changed, 40 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 4fa7d0e65706..5e1ec7705c78 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -315,6 +315,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
>   	struct file_ra_state *ra = ractl->ra;
>   	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
>   	unsigned long max_pages, index;
> +	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
>   
>   	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
>   		return;
> @@ -324,6 +325,13 @@ void force_page_cache_ra(struct readahead_control *ractl,
>   	 * be up to the optimal hardware IO size
>   	 */
>   	index = readahead_index(ractl);
> +	if (!IS_ALIGNED(index, min_nrpages)) {
> +		unsigned long old_index = index;
> +
> +		index = round_down(index, min_nrpages);
> +		nr_to_read += (old_index - index);
> +	}
> +
>   	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
>   	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
>   	while (nr_to_read) {
> @@ -332,6 +340,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
>   		if (this_chunk > nr_to_read)
>   			this_chunk = nr_to_read;
>   		ractl->_index = index;
> +		VM_BUG_ON(!IS_ALIGNED(index, min_nrpages));
>   		do_page_cache_ra(ractl, this_chunk, 0);
>   
>   		index += this_chunk;
> @@ -344,11 +353,20 @@ void force_page_cache_ra(struct readahead_control *ractl,
>    * for small size, x 4 for medium, and x 2 for large
>    * for 128k (32 page) max ra
>    * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
> + *
> + * For higher order address space requirements we ensure no initial reads
> + * are ever less than the min number of pages required.
> + *
> + * We *always* cap the max io size allowed by the device.
>    */
> -static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
> +static unsigned long get_init_ra_size(unsigned long size,
> +				      unsigned int min_nrpages,
> +				      unsigned long max)
>   {
>   	unsigned long newsize = roundup_pow_of_two(size);
>   
> +	newsize = max_t(unsigned long, newsize, min_nrpages);
> +
>   	if (newsize <= max / 32)
>   		newsize = newsize * 4;
>   	else if (newsize <= max / 4)
> @@ -356,6 +374,8 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
>   	else
>   		newsize = max;
>   
> +	VM_BUG_ON(newsize & (min_nrpages - 1));
> +
>   	return newsize;
>   }
>   
> @@ -364,14 +384,16 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
>    *  return it as the new window size.
>    */
>   static unsigned long get_next_ra_size(struct file_ra_state *ra,
> +				      unsigned int min_nrpages,
>   				      unsigned long max)
>   {
> -	unsigned long cur = ra->size;
> +	unsigned long cur = max(ra->size, min_nrpages);
>   
>   	if (cur < max / 16)
>   		return 4 * cur;
>   	if (cur <= max / 2)
>   		return 2 * cur;
> +
>   	return max;
>   }
>   
> @@ -561,7 +583,11 @@ static void ondemand_readahead(struct readahead_control *ractl,
>   	unsigned long add_pages;
>   	pgoff_t index = readahead_index(ractl);
>   	pgoff_t expected, prev_index;
> -	unsigned int order = folio ? folio_order(folio) : 0;
> +	unsigned int min_order = mapping_min_folio_order(ractl->mapping);
> +	unsigned int min_nrpages = mapping_min_folio_nrpages(ractl->mapping);
> +	unsigned int order = folio ? folio_order(folio) : min_order;
> +
> +	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
>   
>   	/*
>   	 * If the request exceeds the readahead window, allow the read to
> @@ -583,8 +609,8 @@ static void ondemand_readahead(struct readahead_control *ractl,
>   	expected = round_down(ra->start + ra->size - ra->async_size,
>   			1UL << order);
>   	if (index == expected || index == (ra->start + ra->size)) {
> -		ra->start += ra->size;
> -		ra->size = get_next_ra_size(ra, max_pages);
> +		ra->start += round_down(ra->size, min_nrpages);
> +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
>   		ra->async_size = ra->size;
>   		goto readit;
>   	}
> @@ -603,13 +629,18 @@ static void ondemand_readahead(struct readahead_control *ractl,
>   				max_pages);
>   		rcu_read_unlock();
>   
> +		start = round_down(start, min_nrpages);
> +
> +		VM_BUG_ON(folio->index & (folio_nr_pages(folio) - 1));
> +
>   		if (!start || start - index > max_pages)
>   			return;
>   
>   		ra->start = start;
>   		ra->size = start - index;	/* old async_size */
> +

Stale whitespace.

>   		ra->size += req_size;
> -		ra->size = get_next_ra_size(ra, max_pages);
> +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
>   		ra->async_size = ra->size;
>   		goto readit;
>   	}
> @@ -646,7 +677,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>   
>   initial_readahead:
>   	ra->start = index;
> -	ra->size = get_init_ra_size(req_size, max_pages);
> +	ra->size = get_init_ra_size(req_size, min_nrpages, max_pages);
>   	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
>   
>   readit:
> @@ -657,7 +688,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>   	 * Take care of maximum IO pages as above.
>   	 */
>   	if (index == ra->start && ra->size == ra->async_size) {
> -		add_pages = get_next_ra_size(ra, max_pages);
> +		add_pages = get_next_ra_size(ra, min_nrpages, max_pages);
>   		if (ra->size + add_pages <= max_pages) {
>   			ra->async_size = add_pages;
>   			ra->size += add_pages;
> @@ -668,6 +699,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>   	}
>   
>   	ractl->_index = ra->start;
> +	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
>   	page_cache_ra_order(ractl, ra, order);
>   }
>   
Otherwise looks good.

Cheers,

Hannes
Darrick J. Wong Feb. 13, 2024, 4:46 p.m. UTC | #2
On Tue, Feb 13, 2024 at 10:37:04AM +0100, Pankaj Raghav (Samsung) wrote:
> From: Luis Chamberlain <mcgrof@kernel.org>
> 
> Align the ra->start and ra->size to mapping_min_order in
> ondemand_readahead(), and align the index to mapping_min_order in
> force_page_cache_ra(). This will ensure that the folios allocated for
> readahead that are added to the page cache are aligned to
> mapping_min_order.
> 
> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
> Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>

Acked-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  mm/readahead.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 40 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 4fa7d0e65706..5e1ec7705c78 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -315,6 +315,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
>  	struct file_ra_state *ra = ractl->ra;
>  	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
>  	unsigned long max_pages, index;
> +	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
>  
>  	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
>  		return;
> @@ -324,6 +325,13 @@ void force_page_cache_ra(struct readahead_control *ractl,
>  	 * be up to the optimal hardware IO size
>  	 */
>  	index = readahead_index(ractl);
> +	if (!IS_ALIGNED(index, min_nrpages)) {
> +		unsigned long old_index = index;
> +
> +		index = round_down(index, min_nrpages);
> +		nr_to_read += (old_index - index);
> +	}
> +
>  	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
>  	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
>  	while (nr_to_read) {
> @@ -332,6 +340,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
>  		if (this_chunk > nr_to_read)
>  			this_chunk = nr_to_read;
>  		ractl->_index = index;
> +		VM_BUG_ON(!IS_ALIGNED(index, min_nrpages));
>  		do_page_cache_ra(ractl, this_chunk, 0);
>  
>  		index += this_chunk;
> @@ -344,11 +353,20 @@ void force_page_cache_ra(struct readahead_control *ractl,
>   * for small size, x 4 for medium, and x 2 for large
>   * for 128k (32 page) max ra
>   * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
> + *
> + * For higher order address space requirements we ensure no initial reads
> + * are ever less than the min number of pages required.
> + *
> + * We *always* cap the max io size allowed by the device.
>   */
> -static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
> +static unsigned long get_init_ra_size(unsigned long size,
> +				      unsigned int min_nrpages,
> +				      unsigned long max)
>  {
>  	unsigned long newsize = roundup_pow_of_two(size);
>  
> +	newsize = max_t(unsigned long, newsize, min_nrpages);
> +
>  	if (newsize <= max / 32)
>  		newsize = newsize * 4;
>  	else if (newsize <= max / 4)
> @@ -356,6 +374,8 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
>  	else
>  		newsize = max;
>  
> +	VM_BUG_ON(newsize & (min_nrpages - 1));
> +
>  	return newsize;
>  }
>  
> @@ -364,14 +384,16 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
>   *  return it as the new window size.
>   */
>  static unsigned long get_next_ra_size(struct file_ra_state *ra,
> +				      unsigned int min_nrpages,
>  				      unsigned long max)
>  {
> -	unsigned long cur = ra->size;
> +	unsigned long cur = max(ra->size, min_nrpages);
>  
>  	if (cur < max / 16)
>  		return 4 * cur;
>  	if (cur <= max / 2)
>  		return 2 * cur;
> +
>  	return max;
>  }
>  
> @@ -561,7 +583,11 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  	unsigned long add_pages;
>  	pgoff_t index = readahead_index(ractl);
>  	pgoff_t expected, prev_index;
> -	unsigned int order = folio ? folio_order(folio) : 0;
> +	unsigned int min_order = mapping_min_folio_order(ractl->mapping);
> +	unsigned int min_nrpages = mapping_min_folio_nrpages(ractl->mapping);
> +	unsigned int order = folio ? folio_order(folio) : min_order;
> +
> +	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
>  
>  	/*
>  	 * If the request exceeds the readahead window, allow the read to
> @@ -583,8 +609,8 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  	expected = round_down(ra->start + ra->size - ra->async_size,
>  			1UL << order);
>  	if (index == expected || index == (ra->start + ra->size)) {
> -		ra->start += ra->size;
> -		ra->size = get_next_ra_size(ra, max_pages);
> +		ra->start += round_down(ra->size, min_nrpages);
> +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
>  		ra->async_size = ra->size;
>  		goto readit;
>  	}
> @@ -603,13 +629,18 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  				max_pages);
>  		rcu_read_unlock();
>  
> +		start = round_down(start, min_nrpages);
> +
> +		VM_BUG_ON(folio->index & (folio_nr_pages(folio) - 1));
> +
>  		if (!start || start - index > max_pages)
>  			return;
>  
>  		ra->start = start;
>  		ra->size = start - index;	/* old async_size */
> +
>  		ra->size += req_size;
> -		ra->size = get_next_ra_size(ra, max_pages);
> +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
>  		ra->async_size = ra->size;
>  		goto readit;
>  	}
> @@ -646,7 +677,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  
>  initial_readahead:
>  	ra->start = index;
> -	ra->size = get_init_ra_size(req_size, max_pages);
> +	ra->size = get_init_ra_size(req_size, min_nrpages, max_pages);
>  	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
>  
>  readit:
> @@ -657,7 +688,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  	 * Take care of maximum IO pages as above.
>  	 */
>  	if (index == ra->start && ra->size == ra->async_size) {
> -		add_pages = get_next_ra_size(ra, max_pages);
> +		add_pages = get_next_ra_size(ra, min_nrpages, max_pages);
>  		if (ra->size + add_pages <= max_pages) {
>  			ra->async_size = add_pages;
>  			ra->size += add_pages;
> @@ -668,6 +699,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  	}
>  
>  	ractl->_index = ra->start;
> +	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
>  	page_cache_ra_order(ractl, ra, order);
>  }
>  
> -- 
> 2.43.0
> 
>
Dave Chinner Feb. 13, 2024, 10:29 p.m. UTC | #3
On Tue, Feb 13, 2024 at 10:37:04AM +0100, Pankaj Raghav (Samsung) wrote:
> From: Luis Chamberlain <mcgrof@kernel.org>
> 
> Align the ra->start and ra->size to mapping_min_order in
> ondemand_readahead(), and align the index to mapping_min_order in
> force_page_cache_ra(). This will ensure that the folios allocated for
> readahead that are added to the page cache are aligned to
> mapping_min_order.
> 
> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
> Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
> ---
>  mm/readahead.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 40 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 4fa7d0e65706..5e1ec7705c78 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -315,6 +315,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
>  	struct file_ra_state *ra = ractl->ra;
>  	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
>  	unsigned long max_pages, index;
> +	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
>  
>  	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
>  		return;
> @@ -324,6 +325,13 @@ void force_page_cache_ra(struct readahead_control *ractl,
>  	 * be up to the optimal hardware IO size
>  	 */
>  	index = readahead_index(ractl);
> +	if (!IS_ALIGNED(index, min_nrpages)) {
> +		unsigned long old_index = index;
> +
> +		index = round_down(index, min_nrpages);
> +		nr_to_read += (old_index - index);
> +	}

	new_index = mapping_align_start_index(mapping, index);
	if (new_index != index) {
		nr_to_read += index - new_index;
		index = new_index
	}

> +
>  	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
>  	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);

This needs to have a size of at least the minimum folio order size
so readahead can fill entire folios, not get neutered to the maximum
IO size the underlying storage supports.

>  	while (nr_to_read) {
> @@ -332,6 +340,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
>  		if (this_chunk > nr_to_read)
>  			this_chunk = nr_to_read;
>  		ractl->_index = index;
> +		VM_BUG_ON(!IS_ALIGNED(index, min_nrpages));
>  		do_page_cache_ra(ractl, this_chunk, 0);
>  
>  		index += this_chunk;
> @@ -344,11 +353,20 @@ void force_page_cache_ra(struct readahead_control *ractl,
>   * for small size, x 4 for medium, and x 2 for large
>   * for 128k (32 page) max ra
>   * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
> + *
> + * For higher order address space requirements we ensure no initial reads
> + * are ever less than the min number of pages required.
> + *
> + * We *always* cap the max io size allowed by the device.
>   */
> -static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
> +static unsigned long get_init_ra_size(unsigned long size,
> +				      unsigned int min_nrpages,
> +				      unsigned long max)
>  {
>  	unsigned long newsize = roundup_pow_of_two(size);
>  
> +	newsize = max_t(unsigned long, newsize, min_nrpages);

This really doesn't need to care about min_nrpages. That rounding
can be done in the caller when the new size is returned.

>  	if (newsize <= max / 32)
>  		newsize = newsize * 4;
>  	else if (newsize <= max / 4)
> @@ -356,6 +374,8 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
>  	else
>  		newsize = max;
>  
> +	VM_BUG_ON(newsize & (min_nrpages - 1));
> +
>  	return newsize;
>  }
>  
> @@ -364,14 +384,16 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
>   *  return it as the new window size.
>   */
>  static unsigned long get_next_ra_size(struct file_ra_state *ra,
> +				      unsigned int min_nrpages,
>  				      unsigned long max)
>  {
> -	unsigned long cur = ra->size;
> +	unsigned long cur = max(ra->size, min_nrpages);
>  
>  	if (cur < max / 16)
>  		return 4 * cur;
>  	if (cur <= max / 2)
>  		return 2 * cur;
> +
>  	return max;

Ditto.

>  }
>  
> @@ -561,7 +583,11 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  	unsigned long add_pages;
>  	pgoff_t index = readahead_index(ractl);
>  	pgoff_t expected, prev_index;
> -	unsigned int order = folio ? folio_order(folio) : 0;
> +	unsigned int min_order = mapping_min_folio_order(ractl->mapping);
> +	unsigned int min_nrpages = mapping_min_folio_nrpages(ractl->mapping);
> +	unsigned int order = folio ? folio_order(folio) : min_order;

Huh? If we have a folio, then the order is whatever that folio is,
otherwise we use min_order. What if the folio is larger than
min_order? Doesn't that mean that this:

> @@ -583,8 +609,8 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  	expected = round_down(ra->start + ra->size - ra->async_size,
>  			1UL << order);
>  	if (index == expected || index == (ra->start + ra->size)) {
> -		ra->start += ra->size;
> -		ra->size = get_next_ra_size(ra, max_pages);
> +		ra->start += round_down(ra->size, min_nrpages);
> +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);

may set up the incorrect readahead range because the folio order is
larger than min_nrpages?

>  		ra->async_size = ra->size;
>  		goto readit;
>  	}
> @@ -603,13 +629,18 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  				max_pages);
>  		rcu_read_unlock();
>  
> +		start = round_down(start, min_nrpages);

		start = mapping_align_start_index(mapping, start);
> +
> +		VM_BUG_ON(folio->index & (folio_nr_pages(folio) - 1));
> +
>  		if (!start || start - index > max_pages)
>  			return;
>  
>  		ra->start = start;
>  		ra->size = start - index;	/* old async_size */
> +
>  		ra->size += req_size;
> -		ra->size = get_next_ra_size(ra, max_pages);
> +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);

		ra->size = max(min_nrpages, get_next_ra_size(ra, max_pages));

>  		ra->async_size = ra->size;
>  		goto readit;
>  	}
> @@ -646,7 +677,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>  
>  initial_readahead:
>  	ra->start = index;
> -	ra->size = get_init_ra_size(req_size, max_pages);
> +	ra->size = get_init_ra_size(req_size, min_nrpages, max_pages);

	ra->size = max(min_nrpages, get_init_ra_size(req_size, max_pages));

-Dave.
Pankaj Raghav (Samsung) Feb. 14, 2024, 3:10 p.m. UTC | #4
> > @@ -324,6 +325,13 @@ void force_page_cache_ra(struct readahead_control *ractl,
> >  	 * be up to the optimal hardware IO size
> >  	 */
> >  	index = readahead_index(ractl);
> > +	if (!IS_ALIGNED(index, min_nrpages)) {
> > +		unsigned long old_index = index;
> > +
> > +		index = round_down(index, min_nrpages);
> > +		nr_to_read += (old_index - index);
> > +	}
> 
> 	new_index = mapping_align_start_index(mapping, index);
> 	if (new_index != index) {
> 		nr_to_read += index - new_index;
> 		index = new_index
Looks good.

> 	}
> 
> > +
> >  	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
> >  	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
> 
> This needs to have a size of at least the minimum folio order size
> so readahead can fill entire folios, not get neutered to the maximum
> IO size the underlying storage supports.

So something like:

> >  	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
> >  	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
nr_to_read = max(nr_to_read, min_order);

> 
> > + * For higher order address space requirements we ensure no initial reads
> > + * are ever less than the min number of pages required.
> > + *
> > + * We *always* cap the max io size allowed by the device.
> >   */
> > -static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
> > +static unsigned long get_init_ra_size(unsigned long size,
> > +				      unsigned int min_nrpages,
> > +				      unsigned long max)
> >  {
> >  	unsigned long newsize = roundup_pow_of_two(size);
> >  
> > +	newsize = max_t(unsigned long, newsize, min_nrpages);
> 
> This really doesn't need to care about min_nrpages. That rounding
> can be done in the caller when the new size is returned.

Sounds good.

> 
> >  	if (newsize <= max / 32)
> >  		newsize = newsize * 4;
> 
> >  
> >  
> > @@ -561,7 +583,11 @@ static void ondemand_readahead(struct readahead_control *ractl,
> >  	unsigned long add_pages;
> >  	pgoff_t index = readahead_index(ractl);
> >  	pgoff_t expected, prev_index;
> > -	unsigned int order = folio ? folio_order(folio) : 0;
> > +	unsigned int min_order = mapping_min_folio_order(ractl->mapping);
> > +	unsigned int min_nrpages = mapping_min_folio_nrpages(ractl->mapping);
> > +	unsigned int order = folio ? folio_order(folio) : min_order;
> 
> Huh? If we have a folio, then the order is whatever that folio is,
> otherwise we use min_order. What if the folio is larger than
> min_order? Doesn't that mean that this:
> 
> > @@ -583,8 +609,8 @@ static void ondemand_readahead(struct readahead_control *ractl,
> >  	expected = round_down(ra->start + ra->size - ra->async_size,
> >  			1UL << order);
> >  	if (index == expected || index == (ra->start + ra->size)) {
> > -		ra->start += ra->size;
> > -		ra->size = get_next_ra_size(ra, max_pages);
> > +		ra->start += round_down(ra->size, min_nrpages);
> > +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
> 
> may set up the incorrect readahead range because the folio order is
> larger than min_nrpages?

Hmm... So I think we should just increment ra->start by ra->size, and
make sure to round the new size we get from get_next_ra_size() to
min_nrpages. Then we will not disturb the readahead range and always
increase the range in multiples of min_nrpages:

ra->start += ra->size;
ra->size = round_up(get_next_ra_size(ra, max_pages), min_nrpages);

> 
> >  		ra->async_size = ra->size;
> >  		goto readit;
> >  	}
> > @@ -603,13 +629,18 @@ static void ondemand_readahead(struct readahead_control *ractl,
> >  				max_pages);
> >  		rcu_read_unlock();
> >  
> > +		start = round_down(start, min_nrpages);
> 
> 		start = mapping_align_start_index(mapping, start);
> > +
> > +		VM_BUG_ON(folio->index & (folio_nr_pages(folio) - 1));
> > +
> >  		if (!start || start - index > max_pages)
> >  			return;
> >  
> >  		ra->start = start;
> >  		ra->size = start - index;	/* old async_size */
> > +
> >  		ra->size += req_size;
> > -		ra->size = get_next_ra_size(ra, max_pages);
> > +		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
> 
> 		ra->size = max(min_nrpages, get_next_ra_size(ra, max_pages));

If this is a round_up of size instead of max operation, we can
always ensure the ra->start from index aligned to min_nrpages. See my
reasoning in the previous comment.

--
Pankaj
diff mbox series

Patch

diff --git a/mm/readahead.c b/mm/readahead.c
index 4fa7d0e65706..5e1ec7705c78 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -315,6 +315,7 @@  void force_page_cache_ra(struct readahead_control *ractl,
 	struct file_ra_state *ra = ractl->ra;
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages, index;
+	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
 
 	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
 		return;
@@ -324,6 +325,13 @@  void force_page_cache_ra(struct readahead_control *ractl,
 	 * be up to the optimal hardware IO size
 	 */
 	index = readahead_index(ractl);
+	if (!IS_ALIGNED(index, min_nrpages)) {
+		unsigned long old_index = index;
+
+		index = round_down(index, min_nrpages);
+		nr_to_read += (old_index - index);
+	}
+
 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
 	while (nr_to_read) {
@@ -332,6 +340,7 @@  void force_page_cache_ra(struct readahead_control *ractl,
 		if (this_chunk > nr_to_read)
 			this_chunk = nr_to_read;
 		ractl->_index = index;
+		VM_BUG_ON(!IS_ALIGNED(index, min_nrpages));
 		do_page_cache_ra(ractl, this_chunk, 0);
 
 		index += this_chunk;
@@ -344,11 +353,20 @@  void force_page_cache_ra(struct readahead_control *ractl,
  * for small size, x 4 for medium, and x 2 for large
  * for 128k (32 page) max ra
  * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
+ *
+ * For higher order address space requirements we ensure no initial reads
+ * are ever less than the min number of pages required.
+ *
+ * We *always* cap the max io size allowed by the device.
  */
-static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
+static unsigned long get_init_ra_size(unsigned long size,
+				      unsigned int min_nrpages,
+				      unsigned long max)
 {
 	unsigned long newsize = roundup_pow_of_two(size);
 
+	newsize = max_t(unsigned long, newsize, min_nrpages);
+
 	if (newsize <= max / 32)
 		newsize = newsize * 4;
 	else if (newsize <= max / 4)
@@ -356,6 +374,8 @@  static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
 	else
 		newsize = max;
 
+	VM_BUG_ON(newsize & (min_nrpages - 1));
+
 	return newsize;
 }
 
@@ -364,14 +384,16 @@  static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
  *  return it as the new window size.
  */
 static unsigned long get_next_ra_size(struct file_ra_state *ra,
+				      unsigned int min_nrpages,
 				      unsigned long max)
 {
-	unsigned long cur = ra->size;
+	unsigned long cur = max(ra->size, min_nrpages);
 
 	if (cur < max / 16)
 		return 4 * cur;
 	if (cur <= max / 2)
 		return 2 * cur;
+
 	return max;
 }
 
@@ -561,7 +583,11 @@  static void ondemand_readahead(struct readahead_control *ractl,
 	unsigned long add_pages;
 	pgoff_t index = readahead_index(ractl);
 	pgoff_t expected, prev_index;
-	unsigned int order = folio ? folio_order(folio) : 0;
+	unsigned int min_order = mapping_min_folio_order(ractl->mapping);
+	unsigned int min_nrpages = mapping_min_folio_nrpages(ractl->mapping);
+	unsigned int order = folio ? folio_order(folio) : min_order;
+
+	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
 
 	/*
 	 * If the request exceeds the readahead window, allow the read to
@@ -583,8 +609,8 @@  static void ondemand_readahead(struct readahead_control *ractl,
 	expected = round_down(ra->start + ra->size - ra->async_size,
 			1UL << order);
 	if (index == expected || index == (ra->start + ra->size)) {
-		ra->start += ra->size;
-		ra->size = get_next_ra_size(ra, max_pages);
+		ra->start += round_down(ra->size, min_nrpages);
+		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
 		ra->async_size = ra->size;
 		goto readit;
 	}
@@ -603,13 +629,18 @@  static void ondemand_readahead(struct readahead_control *ractl,
 				max_pages);
 		rcu_read_unlock();
 
+		start = round_down(start, min_nrpages);
+
+		VM_BUG_ON(folio->index & (folio_nr_pages(folio) - 1));
+
 		if (!start || start - index > max_pages)
 			return;
 
 		ra->start = start;
 		ra->size = start - index;	/* old async_size */
+
 		ra->size += req_size;
-		ra->size = get_next_ra_size(ra, max_pages);
+		ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
 		ra->async_size = ra->size;
 		goto readit;
 	}
@@ -646,7 +677,7 @@  static void ondemand_readahead(struct readahead_control *ractl,
 
 initial_readahead:
 	ra->start = index;
-	ra->size = get_init_ra_size(req_size, max_pages);
+	ra->size = get_init_ra_size(req_size, min_nrpages, max_pages);
 	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
 
 readit:
@@ -657,7 +688,7 @@  static void ondemand_readahead(struct readahead_control *ractl,
 	 * Take care of maximum IO pages as above.
 	 */
 	if (index == ra->start && ra->size == ra->async_size) {
-		add_pages = get_next_ra_size(ra, max_pages);
+		add_pages = get_next_ra_size(ra, min_nrpages, max_pages);
 		if (ra->size + add_pages <= max_pages) {
 			ra->async_size = add_pages;
 			ra->size += add_pages;
@@ -668,6 +699,7 @@  static void ondemand_readahead(struct readahead_control *ractl,
 	}
 
 	ractl->_index = ra->start;
+	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
 	page_cache_ra_order(ractl, ra, order);
 }