diff mbox series

[06/12] mm/truncate: add folio_unmap_invalidate() helper

Message ID 20241220154831.1086649-7-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Uncached buffered IO | expand

Commit Message

Jens Axboe Dec. 20, 2024, 3:47 p.m. UTC
Add a folio_unmap_invalidate() helper, which unmaps and invalidates a
given folio. The caller must already have locked the folio. Embed the
old invalidate_complete_folio2() helper in there as well, as nobody else
calls it.

Use this new helper in invalidate_inode_pages2_range(), rather than
duplicate the code there.

In preparation for using this elsewhere as well, have it take a gfp_t
mask rather than assume GFP_KERNEL is the right choice. This bubbles
back to invalidate_complete_folio2() as well.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 mm/internal.h |  2 ++
 mm/truncate.c | 53 +++++++++++++++++++++++++++------------------------
 2 files changed, 30 insertions(+), 25 deletions(-)

Comments

Matthew Wilcox (Oracle) Dec. 20, 2024, 4:21 p.m. UTC | #1
On Fri, Dec 20, 2024 at 08:47:44AM -0700, Jens Axboe wrote:
> +int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
> +			   gfp_t gfp)
>  {
> -	if (folio->mapping != mapping)
> -		return 0;
> +	int ret;
> +
> +	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
>  
> -	if (!filemap_release_folio(folio, GFP_KERNEL))
> +	if (folio_test_dirty(folio))
>  		return 0;
> +	if (folio_mapped(folio))
> +		unmap_mapping_folio(folio);
> +	BUG_ON(folio_mapped(folio));
> +
> +	ret = folio_launder(mapping, folio);
> +	if (ret)
> +		return ret;
> +	if (folio->mapping != mapping)
> +		return -EBUSY;

The position of this test confuses me.  Usually we want to test
folio->mapping early on, since if the folio is no longer part of this
file, we want to stop doing things to it, rather than go to the trouble
of unmapping it.  Also, why do we want to return -EBUSY in this case?
If the folio is no longer part of this file, it has been successfully
removed from this file, right?
Jens Axboe Dec. 20, 2024, 4:28 p.m. UTC | #2
On 12/20/24 9:21 AM, Matthew Wilcox wrote:
> On Fri, Dec 20, 2024 at 08:47:44AM -0700, Jens Axboe wrote:
>> +int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
>> +			   gfp_t gfp)
>>  {
>> -	if (folio->mapping != mapping)
>> -		return 0;
>> +	int ret;
>> +
>> +	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
>>  
>> -	if (!filemap_release_folio(folio, GFP_KERNEL))
>> +	if (folio_test_dirty(folio))
>>  		return 0;
>> +	if (folio_mapped(folio))
>> +		unmap_mapping_folio(folio);
>> +	BUG_ON(folio_mapped(folio));
>> +
>> +	ret = folio_launder(mapping, folio);
>> +	if (ret)
>> +		return ret;
>> +	if (folio->mapping != mapping)
>> +		return -EBUSY;
> 
> The position of this test confuses me.  Usually we want to test
> folio->mapping early on, since if the folio is no longer part of this
> file, we want to stop doing things to it, rather than go to the trouble
> of unmapping it.  Also, why do we want to return -EBUSY in this case?
> If the folio is no longer part of this file, it has been successfully
> removed from this file, right?

It's simply doing what the code did before. I do agree the mapping check
is a bit odd at that point, but that's how
invalidate_inode_pages2_range() and folio_launder() was setup. We can
certainly clean that up after the merge of these helpers, but I didn't
want to introduce any potential changes with this merge.

-EBUSY was the return from a 0 return from those two helpers before.
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index cb8d8e8e3ffa..ed3c3690eb03 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -392,6 +392,8 @@  void unmap_page_range(struct mmu_gather *tlb,
 			     struct vm_area_struct *vma,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+			   gfp_t gfp);
 
 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
 		unsigned int order);
diff --git a/mm/truncate.c b/mm/truncate.c
index 7c304d2f0052..e2e115adfbc5 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -525,6 +525,15 @@  unsigned long invalidate_mapping_pages(struct address_space *mapping,
 }
 EXPORT_SYMBOL(invalidate_mapping_pages);
 
+static int folio_launder(struct address_space *mapping, struct folio *folio)
+{
+	if (!folio_test_dirty(folio))
+		return 0;
+	if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
+		return 0;
+	return mapping->a_ops->launder_folio(folio);
+}
+
 /*
  * This is like mapping_evict_folio(), except it ignores the folio's
  * refcount.  We do this because invalidate_inode_pages2() needs stronger
@@ -532,14 +541,26 @@  EXPORT_SYMBOL(invalidate_mapping_pages);
  * shrink_folio_list() has a temp ref on them, or because they're transiently
  * sitting in the folio_add_lru() caches.
  */
-static int invalidate_complete_folio2(struct address_space *mapping,
-					struct folio *folio)
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+			   gfp_t gfp)
 {
-	if (folio->mapping != mapping)
-		return 0;
+	int ret;
+
+	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
-	if (!filemap_release_folio(folio, GFP_KERNEL))
+	if (folio_test_dirty(folio))
 		return 0;
+	if (folio_mapped(folio))
+		unmap_mapping_folio(folio);
+	BUG_ON(folio_mapped(folio));
+
+	ret = folio_launder(mapping, folio);
+	if (ret)
+		return ret;
+	if (folio->mapping != mapping)
+		return -EBUSY;
+	if (!filemap_release_folio(folio, gfp))
+		return -EBUSY;
 
 	spin_lock(&mapping->host->i_lock);
 	xa_lock_irq(&mapping->i_pages);
@@ -558,16 +579,7 @@  static int invalidate_complete_folio2(struct address_space *mapping,
 failed:
 	xa_unlock_irq(&mapping->i_pages);
 	spin_unlock(&mapping->host->i_lock);
-	return 0;
-}
-
-static int folio_launder(struct address_space *mapping, struct folio *folio)
-{
-	if (!folio_test_dirty(folio))
-		return 0;
-	if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
-		return 0;
-	return mapping->a_ops->launder_folio(folio);
+	return -EBUSY;
 }
 
 /**
@@ -631,16 +643,7 @@  int invalidate_inode_pages2_range(struct address_space *mapping,
 			}
 			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
 			folio_wait_writeback(folio);
-
-			if (folio_mapped(folio))
-				unmap_mapping_folio(folio);
-			BUG_ON(folio_mapped(folio));
-
-			ret2 = folio_launder(mapping, folio);
-			if (ret2 == 0) {
-				if (!invalidate_complete_folio2(mapping, folio))
-					ret2 = -EBUSY;
-			}
+			ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
 			if (ret2 < 0)
 				ret = ret2;
 			folio_unlock(folio);