@@ -86,7 +86,12 @@ extern void shmem_truncate_range(struct
extern int shmem_unuse(unsigned int type, bool frontswap,
unsigned long *fs_pages_to_unuse);
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
+extern bool shmem_is_huge(struct vm_area_struct *vma,
+ struct inode *inode, pgoff_t index);
+static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
+{
+ return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
+}
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -96,8 +101,6 @@ enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
- SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
@@ -471,39 +471,35 @@ static bool shmem_confirm_swap(struct ad
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* ifdef here to avoid bloating shmem.o when not necessary */
-static int shmem_huge __read_mostly;
+static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-bool shmem_huge_enabled(struct vm_area_struct *vma)
+bool shmem_is_huge(struct vm_area_struct *vma,
+ struct inode *inode, pgoff_t index)
{
- struct inode *inode = file_inode(vma->vm_file);
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
loff_t i_size;
- pgoff_t off;
- if ((vma->vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
- if (shmem_huge == SHMEM_HUGE_FORCE)
- return true;
if (shmem_huge == SHMEM_HUGE_DENY)
return false;
- switch (sbinfo->huge) {
- case SHMEM_HUGE_NEVER:
+ if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
+ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return false;
+ if (shmem_huge == SHMEM_HUGE_FORCE)
+ return true;
+
+ switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
return true;
case SHMEM_HUGE_WITHIN_SIZE:
- off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
+ index = round_up(index, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >= HPAGE_PMD_SIZE &&
- i_size >> PAGE_SHIFT >= off)
+ if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
return true;
fallthrough;
case SHMEM_HUGE_ADVISE:
- /* TODO: implement fadvise() hints */
- return (vma->vm_flags & VM_HUGEPAGE);
+ if (vma && (vma->vm_flags & VM_HUGEPAGE))
+ return true;
+ fallthrough;
default:
- VM_BUG_ON(1);
return false;
}
}
@@ -677,6 +673,12 @@ static long shmem_unused_huge_count(stru
#define shmem_huge SHMEM_HUGE_DENY
+bool shmem_is_huge(struct vm_area_struct *vma,
+ struct inode *inode, pgoff_t index)
+{
+ return false;
+}
+
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shrink_control *sc, unsigned long nr_to_split)
{
@@ -1812,7 +1814,6 @@ static int shmem_getpage_gfp(struct inod
struct shmem_sb_info *sbinfo;
struct mm_struct *charge_mm;
struct page *page;
- enum sgp_type sgp_huge = sgp;
pgoff_t hindex = index;
gfp_t huge_gfp;
int error;
@@ -1821,8 +1822,6 @@ static int shmem_getpage_gfp(struct inod
if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
return -EFBIG;
- if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
- sgp = SGP_CACHE;
repeat:
if (sgp <= SGP_CACHE &&
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
@@ -1886,36 +1885,12 @@ repeat:
return 0;
}
- /* shmem_symlink() */
- if (!shmem_mapping(mapping))
- goto alloc_nohuge;
- if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
+ /* Never use a huge page for shmem_symlink() */
+ if (S_ISLNK(inode->i_mode))
goto alloc_nohuge;
- if (shmem_huge == SHMEM_HUGE_FORCE)
- goto alloc_huge;
- switch (sbinfo->huge) {
- case SHMEM_HUGE_NEVER:
+ if (!shmem_is_huge(vma, inode, index))
goto alloc_nohuge;
- case SHMEM_HUGE_WITHIN_SIZE: {
- loff_t i_size;
- pgoff_t off;
-
- off = round_up(index, HPAGE_PMD_NR);
- i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >= HPAGE_PMD_SIZE &&
- i_size >> PAGE_SHIFT >= off)
- goto alloc_huge;
- fallthrough;
- }
- case SHMEM_HUGE_ADVISE:
- if (sgp_huge == SGP_HUGE)
- goto alloc_huge;
- /* TODO: implement fadvise() hints */
- goto alloc_nohuge;
- }
-
-alloc_huge:
huge_gfp = vma_thp_gfp_mask(vma);
huge_gfp = limit_gfp_mask(huge_gfp, gfp);
page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
@@ -2071,7 +2046,6 @@ static vm_fault_t shmem_fault(struct vm_
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
- enum sgp_type sgp;
int err;
vm_fault_t ret = VM_FAULT_LOCKED;
@@ -2134,15 +2108,7 @@ static vm_fault_t shmem_fault(struct vm_
spin_unlock(&inode->i_lock);
}
- sgp = SGP_CACHE;
-
- if ((vma->vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- sgp = SGP_NOHUGE;
- else if (vma->vm_flags & VM_HUGEPAGE)
- sgp = SGP_HUGE;
-
- err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
+ err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
gfp, vma, vmf, &ret);
if (err)
return vmf_error(err);
@@ -3950,7 +3916,7 @@ int __init shmem_init(void)
if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else
- shmem_huge = 0; /* just in case it was patched */
+ shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
#endif
return 0;