diff mbox series

[RFC,2/7] filemap: Change mapping_set_folio_min_order() -> mapping_set_folio_orders()

Message ID 20240422143923.3927601-3-john.g.garry@oracle.com (mailing list archive)
State New, archived
Headers show
Series buffered block atomic writes | expand

Commit Message

John Garry April 22, 2024, 2:39 p.m. UTC
Borrowed from:

https://lore.kernel.org/linux-fsdevel/20240213093713.1753368-2-kernel@pankajraghav.com/
(credit given in due course)

We will need to be able to only use a single folio order for buffered
atomic writes, so allow the mapping folio order min and max be set.

We still have the restriction of not being able to support order-1
folios - it will be required to lift this limit at some stage.

Signed-off-by: John Garry <john.g.garry@oracle.com>
---
 fs/xfs/xfs_icache.c     | 10 ++++++----
 include/linux/pagemap.h | 20 +++++++++++++-------
 mm/filemap.c            |  8 +++++++-
 3 files changed, 26 insertions(+), 12 deletions(-)

Comments

Pankaj Raghav (Samsung) April 25, 2024, 2:47 p.m. UTC | #1
On Mon, Apr 22, 2024 at 02:39:18PM +0000, John Garry wrote:
> Borrowed from:
> 
> https://lore.kernel.org/linux-fsdevel/20240213093713.1753368-2-kernel@pankajraghav.com/
> (credit given in due course)
> 
> We will need to be able to only use a single folio order for buffered
> atomic writes, so allow the mapping folio order min and max be set.

> 
> We still have the restriction of not being able to support order-1
> folios - it will be required to lift this limit at some stage.

This is already supported upstream for file-backed folios:
commit: 8897277acfef7f70fdecc054073bea2542fc7a1b

> index fc8eb9c94e9c..c22455fa28a1 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -363,9 +363,10 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
>  #endif
>  
>  /*
> - * mapping_set_folio_min_order() - Set the minimum folio order
> + * mapping_set_folio_orders() - Set the minimum and max folio order

In the new series (sorry forgot to CC you), I added a new helper called
mapping_set_folio_order_range() which does something similar to avoid
confusion based on willy's suggestion:
https://lore.kernel.org/linux-xfs/20240425113746.335530-3-kernel@pankajraghav.com/

mapping_set_folio_min_order() also sets max folio order to be 
MAX_PAGECACHE_ORDER order anyway. So no need of explicitly calling it
here?

>  /**
> @@ -400,7 +406,7 @@ static inline void mapping_set_folio_min_order(struct address_space *mapping,
>   */
>  static inline void mapping_set_large_folios(struct address_space *mapping)
>  {
> -	mapping_set_folio_min_order(mapping, 0);
> +	mapping_set_folio_orders(mapping, 0, MAX_PAGECACHE_ORDER);
>  }
>  
>  static inline unsigned int mapping_max_folio_order(struct address_space *mapping)
> diff --git a/mm/filemap.c b/mm/filemap.c
> index d81530b0aac0..d5effe50ddcb 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1898,9 +1898,15 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
>  no_page:
>  	if (!folio && (fgp_flags & FGP_CREAT)) {
>  		unsigned int min_order = mapping_min_folio_order(mapping);
> -		unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
> +		unsigned int max_order = mapping_max_folio_order(mapping);
> +		unsigned int order = FGF_GET_ORDER(fgp_flags);
>  		int err;
>  
> +		if (order > max_order)
> +			order = max_order;
> +		else if (order < min_order)
> +			order = max_order;

order = min_order; ?
--
Pankaj
John Garry April 26, 2024, 8:02 a.m. UTC | #2
On 25/04/2024 15:47, Pankaj Raghav (Samsung) wrote:
> On Mon, Apr 22, 2024 at 02:39:18PM +0000, John Garry wrote:
>> Borrowed from:
>>
>> https://urldefense.com/v3/__https://lore.kernel.org/linux-fsdevel/20240213093713.1753368-2-kernel@pankajraghav.com/__;!!ACWV5N9M2RV99hQ!LvajFab0xQx8oBWDlDtVY8duiLDjOKX91G4YqadoCu6gqatA2H0FzBUvdSC69dqXNoe2QvStSwrxIZ142MXOKk8$
>> (credit given in due course)
>>
>> We will need to be able to only use a single folio order for buffered
>> atomic writes, so allow the mapping folio order min and max be set.
> 
>>
>> We still have the restriction of not being able to support order-1
>> folios - it will be required to lift this limit at some stage.
> 
> This is already supported upstream for file-backed folios:
> commit: 8897277acfef7f70fdecc054073bea2542fc7a1b

ok

> 
>> index fc8eb9c94e9c..c22455fa28a1 100644
>> --- a/include/linux/pagemap.h
>> +++ b/include/linux/pagemap.h
>> @@ -363,9 +363,10 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
>>   #endif
>>   
>>   /*
>> - * mapping_set_folio_min_order() - Set the minimum folio order
>> + * mapping_set_folio_orders() - Set the minimum and max folio order
> 
> In the new series (sorry forgot to CC you),

no worries, I saw it

> I added a new helper called
> mapping_set_folio_order_range() which does something similar to avoid
> confusion based on willy's suggestion:
> https://urldefense.com/v3/__https://lore.kernel.org/linux-xfs/20240425113746.335530-3-kernel@pankajraghav.com/__;!!ACWV5N9M2RV99hQ!LvajFab0xQx8oBWDlDtVY8duiLDjOKX91G4YqadoCu6gqatA2H0FzBUvdSC69dqXNoe2QvStSwrxIZ14opzAoso$
> 

Fine, I can include that

> mapping_set_folio_min_order() also sets max folio order to be
> MAX_PAGECACHE_ORDER order anyway. So no need of explicitly calling it
> here?
> 

Here mapping_set_folio_min_order() is being replaced with 
mapping_set_folio_order_range(), so not sure why you mention that. 
Regardless, I'll use your mapping_set_folio_order_range().

>>   /**
>> @@ -400,7 +406,7 @@ static inline void mapping_set_folio_min_order(struct address_space *mapping,
>>    */
>>   static inline void mapping_set_large_folios(struct address_space *mapping)
>>   {
>> -	mapping_set_folio_min_order(mapping, 0);
>> +	mapping_set_folio_orders(mapping, 0, MAX_PAGECACHE_ORDER);
>>   }
>>   
>>   static inline unsigned int mapping_max_folio_order(struct address_space *mapping)
>> diff --git a/mm/filemap.c b/mm/filemap.c
>> index d81530b0aac0..d5effe50ddcb 100644
>> --- a/mm/filemap.c
>> +++ b/mm/filemap.c
>> @@ -1898,9 +1898,15 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
>>   no_page:
>>   	if (!folio && (fgp_flags & FGP_CREAT)) {
>>   		unsigned int min_order = mapping_min_folio_order(mapping);
>> -		unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
>> +		unsigned int max_order = mapping_max_folio_order(mapping);
>> +		unsigned int order = FGF_GET_ORDER(fgp_flags);
>>   		int err;
>>   
>> +		if (order > max_order)
>> +			order = max_order;
>> +		else if (order < min_order)
>> +			order = max_order;
> 
> order = min_order; ?

right

Thanks,
John
diff mbox series

Patch

diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 8fb5cf0f5a09..6186887bd6ff 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -89,8 +89,9 @@  xfs_inode_alloc(
 	/* VFS doesn't initialise i_mode or i_state! */
 	VFS_I(ip)->i_mode = 0;
 	VFS_I(ip)->i_state = 0;
-	mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
-				    M_IGEO(mp)->min_folio_order);
+	mapping_set_folio_orders(VFS_I(ip)->i_mapping,
+				    M_IGEO(mp)->min_folio_order,
+				    MAX_PAGECACHE_ORDER);
 
 	XFS_STATS_INC(mp, vn_active);
 	ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -325,8 +326,9 @@  xfs_reinit_inode(
 	inode->i_rdev = dev;
 	inode->i_uid = uid;
 	inode->i_gid = gid;
-	mapping_set_folio_min_order(inode->i_mapping,
-				    M_IGEO(mp)->min_folio_order);
+	mapping_set_folio_orders(inode->i_mapping,
+				    M_IGEO(mp)->min_folio_order,
+				    MAX_PAGECACHE_ORDER);
 	return error;
 }
 
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index fc8eb9c94e9c..c22455fa28a1 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -363,9 +363,10 @@  static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 #endif
 
 /*
- * mapping_set_folio_min_order() - Set the minimum folio order
+ * mapping_set_folio_orders() - Set the minimum and max folio order
  * @mapping: The address_space.
  * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
+ * @max: Maximum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
  *
  * The filesystem should call this function in its inode constructor to
  * indicate which base size of folio the VFS can use to cache the contents
@@ -376,15 +377,20 @@  static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  * Context: This should not be called while the inode is active as it
  * is non-atomic.
  */
-static inline void mapping_set_folio_min_order(struct address_space *mapping,
-					       unsigned int min)
+
+static inline void mapping_set_folio_orders(struct address_space *mapping,
+					    unsigned int min, unsigned int max)
 {
-	if (min > MAX_PAGECACHE_ORDER)
-		min = MAX_PAGECACHE_ORDER;
+	if (min == 1)
+		min = 2;
+	if (max < min)
+		max = min;
+	if (max > MAX_PAGECACHE_ORDER)
+		max = MAX_PAGECACHE_ORDER;
 
 	mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
 			 (min << AS_FOLIO_ORDER_MIN) |
-			 (MAX_PAGECACHE_ORDER << AS_FOLIO_ORDER_MAX);
+			 (max << AS_FOLIO_ORDER_MAX);
 }
 
 /**
@@ -400,7 +406,7 @@  static inline void mapping_set_folio_min_order(struct address_space *mapping,
  */
 static inline void mapping_set_large_folios(struct address_space *mapping)
 {
-	mapping_set_folio_min_order(mapping, 0);
+	mapping_set_folio_orders(mapping, 0, MAX_PAGECACHE_ORDER);
 }
 
 static inline unsigned int mapping_max_folio_order(struct address_space *mapping)
diff --git a/mm/filemap.c b/mm/filemap.c
index d81530b0aac0..d5effe50ddcb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1898,9 +1898,15 @@  struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
 no_page:
 	if (!folio && (fgp_flags & FGP_CREAT)) {
 		unsigned int min_order = mapping_min_folio_order(mapping);
-		unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
+		unsigned int max_order = mapping_max_folio_order(mapping);
+		unsigned int order = FGF_GET_ORDER(fgp_flags);
 		int err;
 
+		if (order > max_order)
+			order = max_order;
+		else if (order < min_order)
+			order = max_order;
+
 		index = mapping_align_start_index(mapping, index);
 
 		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))