diff mbox series

[1/2] mm/filemap: remove hugetlb special casing in filemap.c

Message ID 20230609194947.37196-2-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series change ->index to PAGE_SIZE for hugetlb pages | expand

Commit Message

Sidhartha Kumar June 9, 2023, 7:49 p.m. UTC
This patch aims to remove special cased hugetlb handling code within the
page cache by changing the granularity of each index to the base page size
rather than the huge page size.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 include/linux/pagemap.h |  6 ------
 mm/filemap.c            | 36 +++++++++++-------------------------
 2 files changed, 11 insertions(+), 31 deletions(-)

Comments

Matthew Wilcox June 9, 2023, 8:05 p.m. UTC | #1
On Fri, Jun 09, 2023 at 12:49:46PM -0700, Sidhartha Kumar wrote:
> @@ -850,12 +847,9 @@ static inline loff_t folio_file_pos(struct folio *folio)
>  
>  /*
>   * Get the offset in PAGE_SIZE (even for hugetlb folios).
> - * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
>   */
>  static inline pgoff_t folio_pgoff(struct folio *folio)
>  {
> -	if (unlikely(folio_test_hugetlb(folio)))
> -		return hugetlb_basepage_index(&folio->page);
>  	return folio->index;
>  }
>  

Unfortunately, you can't split the patches like this.  If somebody's
running a git bisect for an entirely different problem and lands on the
boundary between these two patches, they'll have a non-functional kernel
(at least if they're using hugetlbfs).  So these two patches have to be
combined.  Maybe there's another way to split up the patches, but I
don't immediately see it.  Maybe after I read more of them.
Sidhartha Kumar June 9, 2023, 8:18 p.m. UTC | #2
On 6/9/23 1:05 PM, Matthew Wilcox wrote:
> On Fri, Jun 09, 2023 at 12:49:46PM -0700, Sidhartha Kumar wrote:
>> @@ -850,12 +847,9 @@ static inline loff_t folio_file_pos(struct folio *folio)
>>   
>>   /*
>>    * Get the offset in PAGE_SIZE (even for hugetlb folios).
>> - * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
>>    */
>>   static inline pgoff_t folio_pgoff(struct folio *folio)
>>   {
>> -	if (unlikely(folio_test_hugetlb(folio)))
>> -		return hugetlb_basepage_index(&folio->page);
>>   	return folio->index;
>>   }
>>   
> 
> Unfortunately, you can't split the patches like this.  If somebody's
> running a git bisect for an entirely different problem and lands on the
> boundary between these two patches, they'll have a non-functional kernel
> (at least if they're using hugetlbfs).  So these two patches have to be
> combined.  Maybe there's another way to split up the patches, but I
> don't immediately see it.  Maybe after I read more of them.

While this is under review, should I keep it split so the filemap and
hugetlb parts are separate and easier to review or should I combine 
these two patches together for the v2 immediately?

Thanks
Mike Kravetz June 15, 2023, 10:13 p.m. UTC | #3
On 06/09/23 12:49, Sidhartha Kumar wrote:
> This patch aims to remove special cased hugetlb handling code within the
> page cache by changing the granularity of each index to the base page size
> rather than the huge page size.
> 
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> ---
>  include/linux/pagemap.h |  6 ------
>  mm/filemap.c            | 36 +++++++++++-------------------------
>  2 files changed, 11 insertions(+), 31 deletions(-)

I agree with Matthew that this patch can not be sent independently/prior
to the patch with hugetlb changes.

Code changes to remove hugetlb special casing below look fine.

Does not matter for your code changes, but I think some of the routines where
you are removing hugetlb checks can not be passed hugetlb folios/vmas today.
Specifically: folio_more_pages, filemap_get_folios_contig and
filemap_get_folios_tag.
diff mbox series

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 716953ee1ebdb..17c414fc2136e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -723,9 +723,6 @@  static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
  */
 static inline bool folio_contains(struct folio *folio, pgoff_t index)
 {
-	/* HugeTLBfs indexes the page cache in units of hpage_size */
-	if (folio_test_hugetlb(folio))
-		return folio->index == index;
 	return index - folio_index(folio) < folio_nr_pages(folio);
 }
 
@@ -850,12 +847,9 @@  static inline loff_t folio_file_pos(struct folio *folio)
 
 /*
  * Get the offset in PAGE_SIZE (even for hugetlb folios).
- * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
  */
 static inline pgoff_t folio_pgoff(struct folio *folio)
 {
-	if (unlikely(folio_test_hugetlb(folio)))
-		return hugetlb_basepage_index(&folio->page);
 	return folio->index;
 }
 
diff --git a/mm/filemap.c b/mm/filemap.c
index 60f6f63cfacba..7462d33f70e2f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -134,11 +134,8 @@  static void page_cache_delete(struct address_space *mapping,
 
 	mapping_set_update(&xas, mapping);
 
-	/* hugetlb pages are represented by a single entry in the xarray */
-	if (!folio_test_hugetlb(folio)) {
-		xas_set_order(&xas, folio->index, folio_order(folio));
-		nr = folio_nr_pages(folio);
-	}
+	xas_set_order(&xas, folio->index, folio_order(folio));
+	nr = folio_nr_pages(folio);
 
 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
@@ -237,7 +234,7 @@  void filemap_free_folio(struct address_space *mapping, struct folio *folio)
 	if (free_folio)
 		free_folio(folio);
 
-	if (folio_test_large(folio) && !folio_test_hugetlb(folio))
+	if (folio_test_large(folio))
 		refs = folio_nr_pages(folio);
 	folio_put_refs(folio, refs);
 }
@@ -858,14 +855,15 @@  noinline int __filemap_add_folio(struct address_space *mapping,
 
 	if (!huge) {
 		int error = mem_cgroup_charge(folio, NULL, gfp);
-		VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
 		if (error)
 			return error;
 		charged = true;
-		xas_set_order(&xas, index, folio_order(folio));
-		nr = folio_nr_pages(folio);
 	}
 
+	VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
+	xas_set_order(&xas, index, folio_order(folio));
+	nr = folio_nr_pages(folio);
+
 	gfp &= GFP_RECLAIM_MASK;
 	folio_ref_add(folio, nr);
 	folio->mapping = mapping;
@@ -2048,7 +2046,7 @@  unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
 		int idx = folio_batch_count(fbatch) - 1;
 
 		folio = fbatch->folios[idx];
-		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
+		if (!xa_is_value(folio))
 			nr = folio_nr_pages(folio);
 		*start = indices[idx] + nr;
 	}
@@ -2112,7 +2110,7 @@  unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
 		int idx = folio_batch_count(fbatch) - 1;
 
 		folio = fbatch->folios[idx];
-		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
+		if (!xa_is_value(folio))
 			nr = folio_nr_pages(folio);
 		*start = indices[idx] + nr;
 	}
@@ -2153,9 +2151,6 @@  unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
 			continue;
 		if (!folio_batch_add(fbatch, folio)) {
 			unsigned long nr = folio_nr_pages(folio);
-
-			if (folio_test_hugetlb(folio))
-				nr = 1;
 			*start = folio->index + nr;
 			goto out;
 		}
@@ -2181,7 +2176,7 @@  EXPORT_SYMBOL(filemap_get_folios);
 static inline
 bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
 {
-	if (!folio_test_large(folio) || folio_test_hugetlb(folio))
+	if (!folio_test_large(folio))
 		return false;
 	if (index >= max)
 		return false;
@@ -2231,9 +2226,6 @@  unsigned filemap_get_folios_contig(struct address_space *mapping,
 
 		if (!folio_batch_add(fbatch, folio)) {
 			nr = folio_nr_pages(folio);
-
-			if (folio_test_hugetlb(folio))
-				nr = 1;
 			*start = folio->index + nr;
 			goto out;
 		}
@@ -2250,10 +2242,7 @@  unsigned filemap_get_folios_contig(struct address_space *mapping,
 
 	if (nr) {
 		folio = fbatch->folios[nr - 1];
-		if (folio_test_hugetlb(folio))
-			*start = folio->index + 1;
-		else
-			*start = folio->index + folio_nr_pages(folio);
+		*start = folio->index + folio_nr_pages(folio);
 	}
 out:
 	rcu_read_unlock();
@@ -2291,9 +2280,6 @@  unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
 			continue;
 		if (!folio_batch_add(fbatch, folio)) {
 			unsigned long nr = folio_nr_pages(folio);
-
-			if (folio_test_hugetlb(folio))
-				nr = 1;
 			*start = folio->index + nr;
 			goto out;
 		}