@@ -114,6 +114,7 @@ int shmem_unuse(unsigned int type);
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
loff_t write_end, bool shmem_huge_force);
+bool shmem_hpage_pmd_enabled(void);
#else
static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
@@ -121,6 +122,11 @@ static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
{
return 0;
}
+
+static inline bool shmem_hpage_pmd_enabled(void)
+{
+ return false;
+}
#endif
#ifdef CONFIG_SHMEM
@@ -416,9 +416,11 @@ static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
static bool hugepage_pmd_enabled(void)
{
/*
- * We cover both the anon and the file-backed case here; file-backed
+ * We cover the anon, shmem and the file-backed case here; file-backed
* hugepages, when configured in, are determined by the global control.
* Anon pmd-sized hugepages are determined by the pmd-size control.
+ * Shmem pmd-sized hugepages are also determined by its pmd-size control,
+ * except when the global shmem_huge is set to SHMEM_HUGE_DENY.
*/
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
hugepage_global_enabled())
@@ -430,6 +432,8 @@ static bool hugepage_pmd_enabled(void)
if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
hugepage_global_enabled())
return true;
+ if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
+ return true;
return false;
}
@@ -1653,6 +1653,23 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+bool shmem_hpage_pmd_enabled(void)
+{
+ if (shmem_huge == SHMEM_HUGE_DENY)
+ return false;
+ if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
+ return true;
+ if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
+ return true;
+ if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
+ return true;
+ if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
+ shmem_huge != SHMEM_HUGE_NEVER)
+ return true;
+
+ return false;
+}
+
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
loff_t write_end, bool shmem_huge_force)
@@ -5036,7 +5053,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
char tmp[16];
- int huge;
+ int huge, err;
if (count + 1 > sizeof(tmp))
return -EINVAL;
@@ -5060,7 +5077,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
shmem_huge = huge;
if (shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
- return count;
+
+ err = start_stop_khugepaged();
+ return err ? err : count;
}
struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
@@ -5137,6 +5156,12 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
ret = -EINVAL;
}
+ if (ret > 0) {
+ int err = start_stop_khugepaged();
+
+ if (err)
+ ret = err;
+ }
return ret;
}