diff mbox series

[06/11] mm/truncate: add folio_unmap_invalidate() helper

Message ID 20241213155557.105419-7-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Uncached buffered IO | expand

Commit Message

Jens Axboe Dec. 13, 2024, 3:55 p.m. UTC
Add a folio_unmap_invalidate() helper, which unmaps and invalidates a
given folio. The caller must already have locked the folio. Embed the
old invalidate_complete_folio2() helper in there as well, as nobody else
calls it.

Use this new helper in invalidate_inode_pages2_range(), rather than
duplicate the code there.

In preparation for using this elsewhere as well, have it take a gfp_t
mask rather than assume GFP_KERNEL is the right choice. This bubbles
back to invalidate_complete_folio2() as well.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 mm/internal.h |  2 ++
 mm/truncate.c | 54 ++++++++++++++++++++++++++-------------------------
 2 files changed, 30 insertions(+), 26 deletions(-)

Comments

Kirill A. Shutemov Dec. 20, 2024, 11:13 a.m. UTC | #1
On Fri, Dec 13, 2024 at 08:55:20AM -0700, Jens Axboe wrote:
> @@ -629,18 +641,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
>  				folio_unlock(folio);
>  				continue;
>  			}
> -			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
>  			folio_wait_writeback(folio);

Any particular reason you drop this VM_BUG_ON_FOLIO()?

> -
> -			if (folio_mapped(folio))
> -				unmap_mapping_folio(folio);
> -			BUG_ON(folio_mapped(folio));
> -
> -			ret2 = folio_launder(mapping, folio);
> -			if (ret2 == 0) {
> -				if (!invalidate_complete_folio2(mapping, folio))
> -					ret2 = -EBUSY;
> -			}
> +			ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
>  			if (ret2 < 0)
>  				ret = ret2;
>  			folio_unlock(folio);
Jens Axboe Dec. 20, 2024, 2:16 p.m. UTC | #2
On 12/20/24 4:13 AM, Kirill A. Shutemov wrote:
> On Fri, Dec 13, 2024 at 08:55:20AM -0700, Jens Axboe wrote:
>> @@ -629,18 +641,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
>>  				folio_unlock(folio);
>>  				continue;
>>  			}
>> -			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
>>  			folio_wait_writeback(folio);
> 
> Any particular reason you drop this VM_BUG_ON_FOLIO()?

No reason at all, I think it just slipped under the radar. I've put it back
now, thanks!
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index cb8d8e8e3ffa..ed3c3690eb03 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -392,6 +392,8 @@  void unmap_page_range(struct mmu_gather *tlb,
 			     struct vm_area_struct *vma,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+			   gfp_t gfp);
 
 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
 		unsigned int order);
diff --git a/mm/truncate.c b/mm/truncate.c
index 7c304d2f0052..7b6f4399319f 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -525,6 +525,15 @@  unsigned long invalidate_mapping_pages(struct address_space *mapping,
 }
 EXPORT_SYMBOL(invalidate_mapping_pages);
 
+static int folio_launder(struct address_space *mapping, struct folio *folio)
+{
+	if (!folio_test_dirty(folio))
+		return 0;
+	if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
+		return 0;
+	return mapping->a_ops->launder_folio(folio);
+}
+
 /*
  * This is like mapping_evict_folio(), except it ignores the folio's
  * refcount.  We do this because invalidate_inode_pages2() needs stronger
@@ -532,14 +541,26 @@  EXPORT_SYMBOL(invalidate_mapping_pages);
  * shrink_folio_list() has a temp ref on them, or because they're transiently
  * sitting in the folio_add_lru() caches.
  */
-static int invalidate_complete_folio2(struct address_space *mapping,
-					struct folio *folio)
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+			   gfp_t gfp)
 {
-	if (folio->mapping != mapping)
-		return 0;
+	int ret;
+
+	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
-	if (!filemap_release_folio(folio, GFP_KERNEL))
+	if (folio_test_dirty(folio))
 		return 0;
+	if (folio_mapped(folio))
+		unmap_mapping_folio(folio);
+	BUG_ON(folio_mapped(folio));
+
+	ret = folio_launder(mapping, folio);
+	if (ret)
+		return ret;
+	if (folio->mapping != mapping)
+		return -EBUSY;
+	if (!filemap_release_folio(folio, gfp))
+		return -EBUSY;
 
 	spin_lock(&mapping->host->i_lock);
 	xa_lock_irq(&mapping->i_pages);
@@ -558,16 +579,7 @@  static int invalidate_complete_folio2(struct address_space *mapping,
 failed:
 	xa_unlock_irq(&mapping->i_pages);
 	spin_unlock(&mapping->host->i_lock);
-	return 0;
-}
-
-static int folio_launder(struct address_space *mapping, struct folio *folio)
-{
-	if (!folio_test_dirty(folio))
-		return 0;
-	if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
-		return 0;
-	return mapping->a_ops->launder_folio(folio);
+	return -EBUSY;
 }
 
 /**
@@ -629,18 +641,8 @@  int invalidate_inode_pages2_range(struct address_space *mapping,
 				folio_unlock(folio);
 				continue;
 			}
-			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
 			folio_wait_writeback(folio);
-
-			if (folio_mapped(folio))
-				unmap_mapping_folio(folio);
-			BUG_ON(folio_mapped(folio));
-
-			ret2 = folio_launder(mapping, folio);
-			if (ret2 == 0) {
-				if (!invalidate_complete_folio2(mapping, folio))
-					ret2 = -EBUSY;
-			}
+			ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
 			if (ret2 < 0)
 				ret = ret2;
 			folio_unlock(folio);