diff mbox series

[3/4] truncate: Remove indices argument from truncate_folio_batch_exceptionals()

Message ID 20221011215634.478330-4-vishal.moola@gmail.com (mailing list archive)
State New
Headers show
Series Rework find_get_entries() and find_lock_entries() | expand

Commit Message

Vishal Moola Oct. 11, 2022, 9:56 p.m. UTC
The indices array is unnecessary. Folios keep track of their xarray indices
in the folio->index field which can simply be accessed as needed.

This change is in preparation for the removal of the indices arguments of
find_get_entries() and find_lock_entries().

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 mm/truncate.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/mm/truncate.c b/mm/truncate.c
index 846ddbdb27a4..4e63d885498a 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -58,7 +58,7 @@  static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  * exceptional entries similar to what folio_batch_remove_exceptionals() does.
  */
 static void truncate_folio_batch_exceptionals(struct address_space *mapping,
-				struct folio_batch *fbatch, pgoff_t *indices)
+				struct folio_batch *fbatch)
 {
 	int i, j;
 	bool dax;
@@ -82,7 +82,6 @@  static void truncate_folio_batch_exceptionals(struct address_space *mapping,
 
 	for (i = j; i < folio_batch_count(fbatch); i++) {
 		struct folio *folio = fbatch->folios[i];
-		pgoff_t index = indices[i];
 
 		if (!xa_is_value(folio)) {
 			fbatch->folios[j++] = folio;
@@ -90,11 +89,11 @@  static void truncate_folio_batch_exceptionals(struct address_space *mapping,
 		}
 
 		if (unlikely(dax)) {
-			dax_delete_mapping_entry(mapping, index);
+			dax_delete_mapping_entry(mapping, folio->index);
 			continue;
 		}
 
-		__clear_shadow_entry(mapping, index, folio);
+		__clear_shadow_entry(mapping, folio->index, folio);
 	}
 
 	if (!dax) {
@@ -363,7 +362,7 @@  void truncate_inode_pages_range(struct address_space *mapping,
 	index = start;
 	while (index < end && find_lock_entries(mapping, &index, end - 1,
 			&fbatch, indices)) {
-		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
+		truncate_folio_batch_exceptionals(mapping, &fbatch);
 		for (i = 0; i < folio_batch_count(&fbatch); i++)
 			truncate_cleanup_folio(fbatch.folios[i]);
 		delete_from_page_cache_batch(mapping, &fbatch);
@@ -424,7 +423,7 @@  void truncate_inode_pages_range(struct address_space *mapping,
 			truncate_inode_folio(mapping, folio);
 			folio_unlock(folio);
 		}
-		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
+		truncate_folio_batch_exceptionals(mapping, &fbatch);
 		folio_batch_release(&fbatch);
 	}
 }