diff mbox series

[RFC,07/11] shmem: remove huge arg from shmem_alloc_and_add_folio()

Message ID 20231028211518.3424020-8-da.gomez@samsung.com (mailing list archive)
State New, archived
Headers show
Series [RFC,01/11] XArray: add cmpxchg order test | expand

Commit Message

Daniel Gomez Oct. 28, 2023, 9:15 p.m. UTC
The huge flag is already part of of the memory allocation flag (gfp_t).
Make use of the VM_HUGEPAGE bit set by vma_thp_gfp_mask() to know if
the allocation must be a huge page.

Drop CONFIG_TRANSPARENT_HUGEPAGE check in shmem_alloc_and_add_folio()
as VM_HUGEPAGE won't be set unless THP config is enabled.

Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
---
 mm/shmem.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

Comments

Matthew Wilcox Oct. 29, 2023, 11:17 p.m. UTC | #1
On Sat, Oct 28, 2023 at 09:15:45PM +0000, Daniel Gomez wrote:
> The huge flag is already part of of the memory allocation flag (gfp_t).
> Make use of the VM_HUGEPAGE bit set by vma_thp_gfp_mask() to know if
> the allocation must be a huge page.

... what?

> +	if (gfp & VM_HUGEPAGE) {

Does sparse not complain about this?  VM_HUGEPAGE is never part of
the GFP flags and there's supposed to be annotations that make the
various checkers warn.
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index e2893cf2287f..9d68211373c4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1644,7 +1644,7 @@  static struct folio *shmem_alloc_folio(gfp_t gfp,
 
 static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
 		struct inode *inode, pgoff_t index,
-		struct mm_struct *fault_mm, bool huge)
+		struct mm_struct *fault_mm)
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct shmem_inode_info *info = SHMEM_I(inode);
@@ -1652,10 +1652,7 @@  static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
 	long pages;
 	int error;
 
-	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
-		huge = false;
-
-	if (huge) {
+	if (gfp & VM_HUGEPAGE) {
 		pages = HPAGE_PMD_NR;
 		index = round_down(index, HPAGE_PMD_NR);
 
@@ -1690,7 +1687,7 @@  static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
 		if (xa_find(&mapping->i_pages, &index,
 				index + pages - 1, XA_PRESENT)) {
 			error = -EEXIST;
-		} else if (huge) {
+		} else if (gfp & VM_HUGEPAGE) {
 			count_vm_event(THP_FILE_FALLBACK);
 			count_vm_event(THP_FILE_FALLBACK_CHARGE);
 		}
@@ -2054,7 +2051,7 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 		huge_gfp = vma_thp_gfp_mask(vma);
 		huge_gfp = limit_gfp_mask(huge_gfp, gfp);
 		folio = shmem_alloc_and_add_folio(huge_gfp,
-				inode, index, fault_mm, true);
+				inode, index, fault_mm);
 		if (!IS_ERR(folio)) {
 			count_vm_event(THP_FILE_ALLOC);
 			goto alloced;
@@ -2063,7 +2060,7 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 			goto repeat;
 	}
 
-	folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false);
+	folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm);
 	if (IS_ERR(folio)) {
 		error = PTR_ERR(folio);
 		if (error == -EEXIST)