diff mbox series

[v5,8/9] mm: shmem: split large entry if the swapin folio is not large

Message ID 4a0f12f27c54a62eb4d9ca1265fed3a62531a63e.1723434324.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series support large folio swap-out and swap-in for shmem | expand

Commit Message

Baolin Wang Aug. 12, 2024, 7:42 a.m. UTC
Now the swap device can only swap-in order 0 folio, even though a large
folio is swapped out. This requires us to split the large entry previously
saved in the shmem pagecache to support the swap in of small folios.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 mm/shmem.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 100 insertions(+)

Comments

Hugh Dickins Aug. 25, 2024, 10:31 p.m. UTC | #1
On Mon, 12 Aug 2024, Baolin Wang wrote:

> Now the swap device can only swap-in order 0 folio, even though a large
> folio is swapped out. This requires us to split the large entry previously
> saved in the shmem pagecache to support the swap in of small folios.
> 
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
>  mm/shmem.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 100 insertions(+)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 345e25425e37..996062dc196b 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1990,6 +1990,81 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
>  	swap_free_nr(swap, nr_pages);
>  }
>  
> +static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
> +				   swp_entry_t swap, int new_order, gfp_t gfp)
> +{
> +	struct address_space *mapping = inode->i_mapping;
> +	XA_STATE_ORDER(xas, &mapping->i_pages, index, new_order);
> +	void *alloced_shadow = NULL;
> +	int alloced_order = 0, i;

gfp needs to be adjusted: see fix patch below.

> +
> +	for (;;) {
> +		int order = -1, split_order = 0;
> +		void *old = NULL;
> +
> +		xas_lock_irq(&xas);
> +		old = xas_load(&xas);
> +		if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
> +			xas_set_err(&xas, -EEXIST);
> +			goto unlock;
> +		}
> +
> +		order = xas_get_order(&xas);
> +
> +		/* Swap entry may have changed before we re-acquire the lock */
> +		if (alloced_order &&
> +		    (old != alloced_shadow || order != alloced_order)) {
> +			xas_destroy(&xas);
> +			alloced_order = 0;
> +		}
> +
> +		/* Try to split large swap entry in pagecache */
> +		if (order > 0 && order > new_order) {

I have not even attempted to understand all the manipulations of order and
new_order and alloced_order and split_order.  And further down it turns out
that this is only ever called with new_order 0.

You may be wanting to cater for more generality in future, but for now
please cut this down to the new_order 0 case, and omit that parameter.
It will be easier for us to think about the xa_get_order() races if
the possibilities are more limited.

> +			if (!alloced_order) {
> +				split_order = order;
> +				goto unlock;
> +			}
> +			xas_split(&xas, old, order);
> +
> +			/*
> +			 * Re-set the swap entry after splitting, and the swap
> +			 * offset of the original large entry must be continuous.
> +			 */
> +			for (i = 0; i < 1 << order; i += (1 << new_order)) {
> +				pgoff_t aligned_index = round_down(index, 1 << order);
> +				swp_entry_t tmp;
> +
> +				tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
> +				__xa_store(&mapping->i_pages, aligned_index + i,
> +					   swp_to_radix_entry(tmp), 0);
> +			}

So that is done under xas lock: good. But is the intermediate state
visible to RCU readers, and could that be a problem?

> +		}
> +
> +unlock:
> +		xas_unlock_irq(&xas);
> +
> +		/* split needed, alloc here and retry. */
> +		if (split_order) {
> +			xas_split_alloc(&xas, old, split_order, gfp);
> +			if (xas_error(&xas))
> +				goto error;
> +			alloced_shadow = old;
> +			alloced_order = split_order;
> +			xas_reset(&xas);
> +			continue;
> +		}
> +
> +		if (!xas_nomem(&xas, gfp))
> +			break;
> +	}
> +
> +error:
> +	if (xas_error(&xas))
> +		return xas_error(&xas);
> +
> +	return alloced_order;
> +}
> +
>  /*
>   * Swap in the folio pointed to by *foliop.
>   * Caller has to make sure that *foliop contains a valid swapped folio.
> @@ -2026,12 +2101,37 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
>  	/* Look it up and read it in.. */
>  	folio = swap_cache_get_folio(swap, NULL, 0);
>  	if (!folio) {
> +		int split_order;
> +
>  		/* Or update major stats only when swapin succeeds?? */
>  		if (fault_type) {
>  			*fault_type |= VM_FAULT_MAJOR;
>  			count_vm_event(PGMAJFAULT);
>  			count_memcg_event_mm(fault_mm, PGMAJFAULT);
>  		}
> +
> +		/*
> +		 * Now swap device can only swap in order 0 folio, then we
> +		 * should split the large swap entry stored in the pagecache
> +		 * if necessary.
> +		 */
> +		split_order = shmem_split_large_entry(inode, index, swap, 0, gfp);
> +		if (split_order < 0) {
> +			error = split_order;
> +			goto failed;
> +		}
> +
> +		/*
> +		 * If the large swap entry has already been split, it is
> +		 * necessary to recalculate the new swap entry based on
> +		 * the old order alignment.
> +		 */
> +		if (split_order > 0) {
> +			pgoff_t offset = index - round_down(index, 1 << split_order);
> +
> +			swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
> +		}
> +
>  		/* Here we actually start the io */
>  		folio = shmem_swapin_cluster(swap, gfp, info, index);
>  		if (!folio) {
> -- 

[PATCH] mm: shmem: split large entry if the swapin folio is not large fix

Fix all the
Unexpected gfp: 0x2 (__GFP_HIGHMEM). Fixing up to gfp: 0x1120d0
(__GFP_IO|__GFP_FS|__GFP_NOWARN|__GFP_NORETRY|__GFP_HARDWALL|__GFP_RECLAIMABLE).
Fix your code!
warnings from kmalloc_fix_flags() from xas_split_alloc() from
shmem_split_large_entry().

Fixes: a960844d5ac9 ("mm: shmem: split large entry if the swapin folio is not large")
Signed-off-by: Hugh Dickins <hughd@google.com>
---
 mm/shmem.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/mm/shmem.c b/mm/shmem.c
index ae2245dce8ae..85e3bd3e709e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1999,6 +1999,9 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
 	void *alloced_shadow = NULL;
 	int alloced_order = 0, i;
 
+	/* Convert user data gfp flags to xarray node gfp flags */
+	gfp &= GFP_RECLAIM_MASK;
+
 	for (;;) {
 		int order = -1, split_order = 0;
 		void *old = NULL;
Baolin Wang Aug. 27, 2024, 6:46 a.m. UTC | #2
On 2024/8/26 06:31, Hugh Dickins wrote:
> On Mon, 12 Aug 2024, Baolin Wang wrote:
> 
>> Now the swap device can only swap-in order 0 folio, even though a large
>> folio is swapped out. This requires us to split the large entry previously
>> saved in the shmem pagecache to support the swap in of small folios.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>>   mm/shmem.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 100 insertions(+)
>>
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 345e25425e37..996062dc196b 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -1990,6 +1990,81 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
>>   	swap_free_nr(swap, nr_pages);
>>   }
>>   
>> +static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
>> +				   swp_entry_t swap, int new_order, gfp_t gfp)
>> +{
>> +	struct address_space *mapping = inode->i_mapping;
>> +	XA_STATE_ORDER(xas, &mapping->i_pages, index, new_order);
>> +	void *alloced_shadow = NULL;
>> +	int alloced_order = 0, i;
> 
> gfp needs to be adjusted: see fix patch below.

Ah, good catch. Thank you Hugh.

>> +
>> +	for (;;) {
>> +		int order = -1, split_order = 0;
>> +		void *old = NULL;
>> +
>> +		xas_lock_irq(&xas);
>> +		old = xas_load(&xas);
>> +		if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
>> +			xas_set_err(&xas, -EEXIST);
>> +			goto unlock;
>> +		}
>> +
>> +		order = xas_get_order(&xas);
>> +
>> +		/* Swap entry may have changed before we re-acquire the lock */
>> +		if (alloced_order &&
>> +		    (old != alloced_shadow || order != alloced_order)) {
>> +			xas_destroy(&xas);
>> +			alloced_order = 0;
>> +		}
>> +
>> +		/* Try to split large swap entry in pagecache */
>> +		if (order > 0 && order > new_order) {
> 
> I have not even attempted to understand all the manipulations of order and
> new_order and alloced_order and split_order.  And further down it turns out
> that this is only ever called with new_order 0.
> 
> You may be wanting to cater for more generality in future, but for now
> please cut this down to the new_order 0 case, and omit that parameter.
> It will be easier for us to think about the xa_get_order() races if
> the possibilities are more limited.

Sure. I will drop the 'new_order' with following fix.

> 
>> +			if (!alloced_order) {
>> +				split_order = order;
>> +				goto unlock;
>> +			}
>> +			xas_split(&xas, old, order);
>> +
>> +			/*
>> +			 * Re-set the swap entry after splitting, and the swap
>> +			 * offset of the original large entry must be continuous.
>> +			 */
>> +			for (i = 0; i < 1 << order; i += (1 << new_order)) {
>> +				pgoff_t aligned_index = round_down(index, 1 << order);
>> +				swp_entry_t tmp;
>> +
>> +				tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
>> +				__xa_store(&mapping->i_pages, aligned_index + i,
>> +					   swp_to_radix_entry(tmp), 0);
>> +			}
> 
> So that is done under xas lock: good. But is the intermediate state
> visible to RCU readers, and could that be a problem?

In xas_split(), the multi-index entry has been split into smaller 
entries, and each of these smaller entries has been set with the old 
swap value. During the process of __xa_store(), these entries will be 
re-set to the new swap value. Although RCU readers might observe the old 
swap value, I have not seen any problems until now (may be I missed 
something).

For concurrent shmem swap-in cases, there are some checks in 
shmem_swapin_folio() (including folio->swap.val and shmem_confirm_swap() 
validation ) to ensure the correctness of the swap values.

For the shmem_partial_swap_usage(), we may get racy swap usages, but it 
is not a problem form its comments:
" * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a 
problem."

For shmem truncation, when removing the racy swap entry from shmem page 
cache, it will use xa_cmpxchg_irq() to sync the correct swap state.


[PATCH] mm: shmem: split large entry if the swapin folio is not large
  fix 2

Now we only split large folio to order 0, so drop the 'new_order'
parameter.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
  mm/shmem.c | 10 +++++-----
  1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index d8038a66b110..f00b7b99ad09 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1998,10 +1998,10 @@ static void shmem_set_folio_swapin_error(struct 
inode *inode, pgoff_t index,
  }

  static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
-                                  swp_entry_t swap, int new_order, 
gfp_t gfp)
+                                  swp_entry_t swap, gfp_t gfp)
  {
         struct address_space *mapping = inode->i_mapping;
-       XA_STATE_ORDER(xas, &mapping->i_pages, index, new_order);
+       XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
         void *alloced_shadow = NULL;
         int alloced_order = 0, i;

@@ -2026,7 +2026,7 @@ static int shmem_split_large_entry(struct inode 
*inode, pgoff_t index,
                 }

                 /* Try to split large swap entry in pagecache */
-               if (order > 0 && order > new_order) {
+               if (order > 0) {
                         if (!alloced_order) {
                                 split_order = order;
                                 goto unlock;
@@ -2037,7 +2037,7 @@ static int shmem_split_large_entry(struct inode 
*inode, pgoff_t index,
                          * Re-set the swap entry after splitting, and 
the swap
                          * offset of the original large entry must be 
continuous.
                          */
-                       for (i = 0; i < 1 << order; i += (1 << new_order)) {
+                       for (i = 0; i < 1 << order; i++) {
                                 pgoff_t aligned_index = 
round_down(index, 1 << order);
                                 swp_entry_t tmp;

@@ -2123,7 +2123,7 @@ static int shmem_swapin_folio(struct inode *inode, 
pgoff_t index,
                  * should split the large swap entry stored in the 
pagecache
                  * if necessary.
                  */
-               split_order = shmem_split_large_entry(inode, index, 
swap, 0, gfp);
+               split_order = shmem_split_large_entry(inode, index, 
swap, gfp);
                 if (split_order < 0) {
                         error = split_order;
                         goto failed;
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index 345e25425e37..996062dc196b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1990,6 +1990,81 @@  static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
 	swap_free_nr(swap, nr_pages);
 }
 
+static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
+				   swp_entry_t swap, int new_order, gfp_t gfp)
+{
+	struct address_space *mapping = inode->i_mapping;
+	XA_STATE_ORDER(xas, &mapping->i_pages, index, new_order);
+	void *alloced_shadow = NULL;
+	int alloced_order = 0, i;
+
+	for (;;) {
+		int order = -1, split_order = 0;
+		void *old = NULL;
+
+		xas_lock_irq(&xas);
+		old = xas_load(&xas);
+		if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
+			xas_set_err(&xas, -EEXIST);
+			goto unlock;
+		}
+
+		order = xas_get_order(&xas);
+
+		/* Swap entry may have changed before we re-acquire the lock */
+		if (alloced_order &&
+		    (old != alloced_shadow || order != alloced_order)) {
+			xas_destroy(&xas);
+			alloced_order = 0;
+		}
+
+		/* Try to split large swap entry in pagecache */
+		if (order > 0 && order > new_order) {
+			if (!alloced_order) {
+				split_order = order;
+				goto unlock;
+			}
+			xas_split(&xas, old, order);
+
+			/*
+			 * Re-set the swap entry after splitting, and the swap
+			 * offset of the original large entry must be continuous.
+			 */
+			for (i = 0; i < 1 << order; i += (1 << new_order)) {
+				pgoff_t aligned_index = round_down(index, 1 << order);
+				swp_entry_t tmp;
+
+				tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
+				__xa_store(&mapping->i_pages, aligned_index + i,
+					   swp_to_radix_entry(tmp), 0);
+			}
+		}
+
+unlock:
+		xas_unlock_irq(&xas);
+
+		/* split needed, alloc here and retry. */
+		if (split_order) {
+			xas_split_alloc(&xas, old, split_order, gfp);
+			if (xas_error(&xas))
+				goto error;
+			alloced_shadow = old;
+			alloced_order = split_order;
+			xas_reset(&xas);
+			continue;
+		}
+
+		if (!xas_nomem(&xas, gfp))
+			break;
+	}
+
+error:
+	if (xas_error(&xas))
+		return xas_error(&xas);
+
+	return alloced_order;
+}
+
 /*
  * Swap in the folio pointed to by *foliop.
  * Caller has to make sure that *foliop contains a valid swapped folio.
@@ -2026,12 +2101,37 @@  static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
 	/* Look it up and read it in.. */
 	folio = swap_cache_get_folio(swap, NULL, 0);
 	if (!folio) {
+		int split_order;
+
 		/* Or update major stats only when swapin succeeds?? */
 		if (fault_type) {
 			*fault_type |= VM_FAULT_MAJOR;
 			count_vm_event(PGMAJFAULT);
 			count_memcg_event_mm(fault_mm, PGMAJFAULT);
 		}
+
+		/*
+		 * Now swap device can only swap in order 0 folio, then we
+		 * should split the large swap entry stored in the pagecache
+		 * if necessary.
+		 */
+		split_order = shmem_split_large_entry(inode, index, swap, 0, gfp);
+		if (split_order < 0) {
+			error = split_order;
+			goto failed;
+		}
+
+		/*
+		 * If the large swap entry has already been split, it is
+		 * necessary to recalculate the new swap entry based on
+		 * the old order alignment.
+		 */
+		if (split_order > 0) {
+			pgoff_t offset = index - round_down(index, 1 << split_order);
+
+			swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+		}
+
 		/* Here we actually start the io */
 		folio = shmem_swapin_cluster(swap, gfp, info, index);
 		if (!folio) {