diff mbox series

[06/16] huge tmpfs: shmem_is_huge(vma, inode, index)

Message ID dae523ab-c75b-f532-af9d-8b6a1d4e29b@google.com (mailing list archive)
State New, archived
Headers show
Series tmpfs: HUGEPAGE and MEM_LOCK fcntls and memfds | expand

Commit Message

Hugh Dickins July 30, 2021, 7:42 a.m. UTC
Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
that a consistent set of checks can be applied, even when the inode is
accessed through read/write syscalls (with NULL vma) instead of mmaps
(the index argument is seldom of interest, but required by mount option
"huge=within_size").  Clean up and rearrange the checks a little.

This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
still true that khugepaged's collapse_file() at that point wants a small
page, the race that might allocate it a huge page is too unlikely to be
worth optimizing against (we are there *because* there was at least one
small page in the way), and handled by a later PageTransCompound check.

Replace a couple of 0s by explicit SHMEM_HUGE_NEVERs; and replace the
obscure !shmem_mapping() symlink check by explicit S_ISLNK() - nothing
else needs that symlink check, so leave it there in shmem_getpage_gfp().

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 include/linux/shmem_fs.h |  9 +++--
 mm/khugepaged.c          |  2 +-
 mm/shmem.c               | 84 ++++++++++++----------------------------
 3 files changed, 32 insertions(+), 63 deletions(-)

Comments

Yang Shi July 30, 2021, 11:34 p.m. UTC | #1
On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@google.com> wrote:
>
> Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> that a consistent set of checks can be applied, even when the inode is
> accessed through read/write syscalls (with NULL vma) instead of mmaps
> (the index argument is seldom of interest, but required by mount option
> "huge=within_size").  Clean up and rearrange the checks a little.
>
> This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> still true that khugepaged's collapse_file() at that point wants a small
> page, the race that might allocate it a huge page is too unlikely to be
> worth optimizing against (we are there *because* there was at least one
> small page in the way), and handled by a later PageTransCompound check.

Yes, it seems too unlikely. But if it happens the PageTransCompound
check may be not good enough since the page allocated by
shmem_getpage() may be charged to wrong memcg (root memcg). And it
won't be replaced by a newly allocated huge page so the wrong charge
can't be undone.

And, another question is it seems the newly allocated huge page will
just be uncharged instead of being freed until
"khugepaged_pages_to_scan" pages are scanned. The
khugepaged_prealloc_page() is called to free the allocated huge page
before each call to khugepaged_scan_mm_slot(). But
khugepaged_scan_file() -> collapse_fille() -> khugepaged_alloc_page()
may be called multiple times in the loop in khugepaged_scan_mm_slot(),
so khugepaged_alloc_page() may see that page to trigger VM_BUG IIUC.

The code is quite convoluted, I'm not sure whether I miss something or
not. And this problem seems very hard to trigger in real life
workload.

>
> Replace a couple of 0s by explicit SHMEM_HUGE_NEVERs; and replace the
> obscure !shmem_mapping() symlink check by explicit S_ISLNK() - nothing
> else needs that symlink check, so leave it there in shmem_getpage_gfp().
>
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
>  include/linux/shmem_fs.h |  9 +++--
>  mm/khugepaged.c          |  2 +-
>  mm/shmem.c               | 84 ++++++++++++----------------------------
>  3 files changed, 32 insertions(+), 63 deletions(-)
>
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index 9b7f7ac52351..3b05a28e34c4 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -86,7 +86,12 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
>  extern int shmem_unuse(unsigned int type, bool frontswap,
>                        unsigned long *fs_pages_to_unuse);
>
> -extern bool shmem_huge_enabled(struct vm_area_struct *vma);
> +extern bool shmem_is_huge(struct vm_area_struct *vma,
> +                         struct inode *inode, pgoff_t index);
> +static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
> +{
> +       return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
> +}
>  extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
>  extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
>                                                 pgoff_t start, pgoff_t end);
> @@ -95,8 +100,6 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
>  enum sgp_type {
>         SGP_READ,       /* don't exceed i_size, don't allocate page */
>         SGP_CACHE,      /* don't exceed i_size, may allocate page */
> -       SGP_NOHUGE,     /* like SGP_CACHE, but no huge pages */
> -       SGP_HUGE,       /* like SGP_CACHE, huge pages preferred */
>         SGP_WRITE,      /* may exceed i_size, may allocate !Uptodate page */
>         SGP_FALLOC,     /* like SGP_WRITE, but make existing page Uptodate */
>  };
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index b0412be08fa2..cecb19c3e965 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1721,7 +1721,7 @@ static void collapse_file(struct mm_struct *mm,
>                                 xas_unlock_irq(&xas);
>                                 /* swap in or instantiate fallocated page */
>                                 if (shmem_getpage(mapping->host, index, &page,
> -                                                 SGP_NOHUGE)) {
> +                                                 SGP_CACHE)) {
>                                         result = SCAN_FAIL;
>                                         goto xa_unlocked;
>                                 }
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 740d48ef1eb5..6def7391084c 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -474,39 +474,35 @@ static bool shmem_confirm_swap(struct address_space *mapping,
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  /* ifdef here to avoid bloating shmem.o when not necessary */
>
> -static int shmem_huge __read_mostly;
> +static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
>
> -bool shmem_huge_enabled(struct vm_area_struct *vma)
> +bool shmem_is_huge(struct vm_area_struct *vma,
> +                  struct inode *inode, pgoff_t index)
>  {
> -       struct inode *inode = file_inode(vma->vm_file);
> -       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
>         loff_t i_size;
> -       pgoff_t off;
>
> -       if ((vma->vm_flags & VM_NOHUGEPAGE) ||
> -           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
> -               return false;
> -       if (shmem_huge == SHMEM_HUGE_FORCE)
> -               return true;
>         if (shmem_huge == SHMEM_HUGE_DENY)
>                 return false;
> -       switch (sbinfo->huge) {
> -       case SHMEM_HUGE_NEVER:
> +       if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
> +           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
>                 return false;
> +       if (shmem_huge == SHMEM_HUGE_FORCE)
> +               return true;
> +
> +       switch (SHMEM_SB(inode->i_sb)->huge) {
>         case SHMEM_HUGE_ALWAYS:
>                 return true;
>         case SHMEM_HUGE_WITHIN_SIZE:
> -               off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
> +               index = round_up(index, HPAGE_PMD_NR);
>                 i_size = round_up(i_size_read(inode), PAGE_SIZE);
> -               if (i_size >= HPAGE_PMD_SIZE &&
> -                               i_size >> PAGE_SHIFT >= off)
> +               if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
>                         return true;
>                 fallthrough;
>         case SHMEM_HUGE_ADVISE:
> -               /* TODO: implement fadvise() hints */
> -               return (vma->vm_flags & VM_HUGEPAGE);
> +               if (vma && (vma->vm_flags & VM_HUGEPAGE))
> +                       return true;
> +               fallthrough;
>         default:
> -               VM_BUG_ON(1);
>                 return false;
>         }
>  }
> @@ -680,6 +676,12 @@ static long shmem_unused_huge_count(struct super_block *sb,
>
>  #define shmem_huge SHMEM_HUGE_DENY
>
> +bool shmem_is_huge(struct vm_area_struct *vma,
> +                  struct inode *inode, pgoff_t index)
> +{
> +       return false;
> +}
> +
>  static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
>                 struct shrink_control *sc, unsigned long nr_to_split)
>  {
> @@ -1829,7 +1831,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
>         struct shmem_sb_info *sbinfo;
>         struct mm_struct *charge_mm;
>         struct page *page;
> -       enum sgp_type sgp_huge = sgp;
>         pgoff_t hindex = index;
>         gfp_t huge_gfp;
>         int error;
> @@ -1838,8 +1839,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
>
>         if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
>                 return -EFBIG;
> -       if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
> -               sgp = SGP_CACHE;
>  repeat:
>         if (sgp <= SGP_CACHE &&
>             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
> @@ -1898,36 +1897,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
>                 return 0;
>         }
>
> -       /* shmem_symlink() */
> -       if (!shmem_mapping(mapping))
> -               goto alloc_nohuge;
> -       if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
> +       /* Never use a huge page for shmem_symlink() */
> +       if (S_ISLNK(inode->i_mode))
>                 goto alloc_nohuge;
> -       if (shmem_huge == SHMEM_HUGE_FORCE)
> -               goto alloc_huge;
> -       switch (sbinfo->huge) {
> -       case SHMEM_HUGE_NEVER:
> +       if (!shmem_is_huge(vma, inode, index))
>                 goto alloc_nohuge;
> -       case SHMEM_HUGE_WITHIN_SIZE: {
> -               loff_t i_size;
> -               pgoff_t off;
> -
> -               off = round_up(index, HPAGE_PMD_NR);
> -               i_size = round_up(i_size_read(inode), PAGE_SIZE);
> -               if (i_size >= HPAGE_PMD_SIZE &&
> -                   i_size >> PAGE_SHIFT >= off)
> -                       goto alloc_huge;
>
> -               fallthrough;
> -       }
> -       case SHMEM_HUGE_ADVISE:
> -               if (sgp_huge == SGP_HUGE)
> -                       goto alloc_huge;
> -               /* TODO: implement fadvise() hints */
> -               goto alloc_nohuge;
> -       }
> -
> -alloc_huge:
>         huge_gfp = vma_thp_gfp_mask(vma);
>         huge_gfp = limit_gfp_mask(huge_gfp, gfp);
>         page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
> @@ -2083,7 +2058,6 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
>         struct vm_area_struct *vma = vmf->vma;
>         struct inode *inode = file_inode(vma->vm_file);
>         gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
> -       enum sgp_type sgp;
>         int err;
>         vm_fault_t ret = VM_FAULT_LOCKED;
>
> @@ -2146,15 +2120,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
>                 spin_unlock(&inode->i_lock);
>         }
>
> -       sgp = SGP_CACHE;
> -
> -       if ((vma->vm_flags & VM_NOHUGEPAGE) ||
> -           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
> -               sgp = SGP_NOHUGE;
> -       else if (vma->vm_flags & VM_HUGEPAGE)
> -               sgp = SGP_HUGE;
> -
> -       err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
> +       err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
>                                   gfp, vma, vmf, &ret);
>         if (err)
>                 return vmf_error(err);
> @@ -3961,7 +3927,7 @@ int __init shmem_init(void)
>         if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
>                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
>         else
> -               shmem_huge = 0; /* just in case it was patched */
> +               shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
>  #endif
>         return 0;
>
> --
> 2.26.2
>
Hugh Dickins Aug. 1, 2021, 5:22 a.m. UTC | #2
On Fri, 30 Jul 2021, Yang Shi wrote:
> On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@google.com> wrote:
> >
> > Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> > that a consistent set of checks can be applied, even when the inode is
> > accessed through read/write syscalls (with NULL vma) instead of mmaps
> > (the index argument is seldom of interest, but required by mount option
> > "huge=within_size").  Clean up and rearrange the checks a little.
> >
> > This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> > were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> > still true that khugepaged's collapse_file() at that point wants a small
> > page, the race that might allocate it a huge page is too unlikely to be
> > worth optimizing against (we are there *because* there was at least one
> > small page in the way), and handled by a later PageTransCompound check.
> 
> Yes, it seems too unlikely. But if it happens the PageTransCompound
> check may be not good enough since the page allocated by
> shmem_getpage() may be charged to wrong memcg (root memcg). And it
> won't be replaced by a newly allocated huge page so the wrong charge
> can't be undone.

Good point on the memcg charge: I hadn't thought of that.  Of course
it's not specific to SGP_CACHE versus SGP_NOHUGE (this patch), but I
admit that a huge mischarge is hugely worse than a small mischarge.

We could fix it by making shmem_getpage_gfp() non-static, and pointing
to the vma (hence its mm, hence its memcg) here, couldn't we?  Easily
done, but I don't really want to make shmem_getpage_gfp() public just
for this, for two reasons.

One is that the huge race it just so unlikely; and a mischarge to root
is not the end of the world, so long as it's not reproducible.  It can
only happen on the very first page of the huge extent, and the prior
"Stop if extent has been truncated" check makes sure there was one
entry in the extent at that point: so the race with hole-punch can only
occur after we xas_unlock_irq(&xas) immediately before shmem_getpage()
looks up the page in the tree (and I say hole-punch not truncate,
because shmem_getpage()'s i_size check will reject when truncated).
I don't doubt that it could happen, but stand by not optimizing against.

Other reason is that doing shmem_getpage() (or shmem_getpage_gfp())
there is unhealthy for unrelated reasons, that I cannot afford to get
into sending patches for at this time: but some of our users found the
worst-case latencies in collapse_file() intolerable - shmem_getpage()
may be reading in from swap, while the locked head of the huge page
being built is in the page cache keeping other users waiting.  So,
I'd say there's something worse than memcg in that shmem_getpage(),
but fixing that cannot be a part of this series.

> 
> And, another question is it seems the newly allocated huge page will
> just be uncharged instead of being freed until
> "khugepaged_pages_to_scan" pages are scanned. The
> khugepaged_prealloc_page() is called to free the allocated huge page
> before each call to khugepaged_scan_mm_slot(). But
> khugepaged_scan_file() -> collapse_fille() -> khugepaged_alloc_page()
> may be called multiple times in the loop in khugepaged_scan_mm_slot(),
> so khugepaged_alloc_page() may see that page to trigger VM_BUG IIUC.
> 
> The code is quite convoluted, I'm not sure whether I miss something or
> not. And this problem seems very hard to trigger in real life
> workload.

Just to clarify, those two paragraphs are not about this patch, but about
what happens to mm/khugepaged.c's newly allocated huge page, when collapse
fails for any reason.

Yes, the code is convoluted: that's because it takes very different paths
when CONFIG_NUMA=y (when it cannot predict which node to allocate from)
and when not NUMA (when it can allocate the huge page at a good unlocked
moment, and carry it forward from one attempt to the next).

I don't like it at all, the two paths are confusing: sometimes I wonder
whether we should just remove the !CONFIG_NUMA path entirely; and other
times I wonder in the other direction, whether the CONFIG_NUMA=y path
ought to go the other way when it finds nr_node_ids is 1.  Undecided.

I'm confident that if you work through the two cases (thinking about
only one of them at once!), you'll find that the failure paths (not
to mention the successful paths) do actually work correctly without
leaking (well, maybe the !NUMA path can hold on to one huge page
indefinitely, I forget, but I wouldn't count that as leaking).

Collapse failure is not uncommon and leaking huge pages gets noticed.

Hugh
Hugh Dickins Aug. 1, 2021, 5:37 a.m. UTC | #3
On Sat, 31 Jul 2021, Hugh Dickins wrote:
> On Fri, 30 Jul 2021, Yang Shi wrote:
> > On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@google.com> wrote:
> > >
> > > Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> > > that a consistent set of checks can be applied, even when the inode is
> > > accessed through read/write syscalls (with NULL vma) instead of mmaps
> > > (the index argument is seldom of interest, but required by mount option
> > > "huge=within_size").  Clean up and rearrange the checks a little.
> > >
> > > This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> > > were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> > > still true that khugepaged's collapse_file() at that point wants a small
> > > page, the race that might allocate it a huge page is too unlikely to be
> > > worth optimizing against (we are there *because* there was at least one
> > > small page in the way), and handled by a later PageTransCompound check.
> > 
> > Yes, it seems too unlikely. But if it happens the PageTransCompound
> > check may be not good enough since the page allocated by
> > shmem_getpage() may be charged to wrong memcg (root memcg). And it
> > won't be replaced by a newly allocated huge page so the wrong charge
> > can't be undone.
> 
> Good point on the memcg charge: I hadn't thought of that.  Of course
> it's not specific to SGP_CACHE versus SGP_NOHUGE (this patch), but I
> admit that a huge mischarge is hugely worse than a small mischarge.

Stupid me (and maybe I haven't given this enough consideration yet):
but, much better than SGP_NOHUGE, much better than SGP_CACHE, would be
SGP_READ there, wouldn't it?  Needs to beware of the NULL too, of course.

Hugh
Yang Shi Aug. 2, 2021, 9:14 p.m. UTC | #4
On Sat, Jul 31, 2021 at 10:22 PM Hugh Dickins <hughd@google.com> wrote:
>
> On Fri, 30 Jul 2021, Yang Shi wrote:
> > On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@google.com> wrote:
> > >
> > > Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> > > that a consistent set of checks can be applied, even when the inode is
> > > accessed through read/write syscalls (with NULL vma) instead of mmaps
> > > (the index argument is seldom of interest, but required by mount option
> > > "huge=within_size").  Clean up and rearrange the checks a little.
> > >
> > > This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> > > were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> > > still true that khugepaged's collapse_file() at that point wants a small
> > > page, the race that might allocate it a huge page is too unlikely to be
> > > worth optimizing against (we are there *because* there was at least one
> > > small page in the way), and handled by a later PageTransCompound check.
> >
> > Yes, it seems too unlikely. But if it happens the PageTransCompound
> > check may be not good enough since the page allocated by
> > shmem_getpage() may be charged to wrong memcg (root memcg). And it
> > won't be replaced by a newly allocated huge page so the wrong charge
> > can't be undone.
>
> Good point on the memcg charge: I hadn't thought of that.  Of course
> it's not specific to SGP_CACHE versus SGP_NOHUGE (this patch), but I
> admit that a huge mischarge is hugely worse than a small mischarge.

The small page could be collapsed to a huge page sooner or later, so
the mischarge may be transient. But huge page can't be replaced.

>
> We could fix it by making shmem_getpage_gfp() non-static, and pointing
> to the vma (hence its mm, hence its memcg) here, couldn't we?  Easily
> done, but I don't really want to make shmem_getpage_gfp() public just
> for this, for two reasons.
>
> One is that the huge race it just so unlikely; and a mischarge to root
> is not the end of the world, so long as it's not reproducible.  It can
> only happen on the very first page of the huge extent, and the prior

OK, if so the mischarge is not as bad as what I thought in the first place.

> "Stop if extent has been truncated" check makes sure there was one
> entry in the extent at that point: so the race with hole-punch can only
> occur after we xas_unlock_irq(&xas) immediately before shmem_getpage()
> looks up the page in the tree (and I say hole-punch not truncate,
> because shmem_getpage()'s i_size check will reject when truncated).
> I don't doubt that it could happen, but stand by not optimizing against.

I agree the race is so unlikely and it may be not worth optimizing
against it right now, but a note or a comment may be worth.

>
> Other reason is that doing shmem_getpage() (or shmem_getpage_gfp())
> there is unhealthy for unrelated reasons, that I cannot afford to get
> into sending patches for at this time: but some of our users found the
> worst-case latencies in collapse_file() intolerable - shmem_getpage()
> may be reading in from swap, while the locked head of the huge page
> being built is in the page cache keeping other users waiting.  So,
> I'd say there's something worse than memcg in that shmem_getpage(),
> but fixing that cannot be a part of this series.

Yeah, that is a different problem.

>
> >
> > And, another question is it seems the newly allocated huge page will
> > just be uncharged instead of being freed until
> > "khugepaged_pages_to_scan" pages are scanned. The
> > khugepaged_prealloc_page() is called to free the allocated huge page
> > before each call to khugepaged_scan_mm_slot(). But
> > khugepaged_scan_file() -> collapse_fille() -> khugepaged_alloc_page()
> > may be called multiple times in the loop in khugepaged_scan_mm_slot(),
> > so khugepaged_alloc_page() may see that page to trigger VM_BUG IIUC.
> >
> > The code is quite convoluted, I'm not sure whether I miss something or
> > not. And this problem seems very hard to trigger in real life
> > workload.
>
> Just to clarify, those two paragraphs are not about this patch, but about
> what happens to mm/khugepaged.c's newly allocated huge page, when collapse
> fails for any reason.
>
> Yes, the code is convoluted: that's because it takes very different paths
> when CONFIG_NUMA=y (when it cannot predict which node to allocate from)
> and when not NUMA (when it can allocate the huge page at a good unlocked
> moment, and carry it forward from one attempt to the next).
>
> I don't like it at all, the two paths are confusing: sometimes I wonder
> whether we should just remove the !CONFIG_NUMA path entirely; and other
> times I wonder in the other direction, whether the CONFIG_NUMA=y path
> ought to go the other way when it finds nr_node_ids is 1.  Undecided.

I'm supposed it is just performance consideration to keep the
allocated huge page, but I'm not sure how much the difference would be
if we remove it (remove the !CONFIG_NUMA path) because the pcp could
cache THP now since Mel's patch 44042b449872 ("mm/page_alloc: allow
high-order pages to be stored on the per-cpu lists").

It seems to provide the similar optimization but in the buddy
allocator layer so that khugepaged doesn't have to maintain its own
implementation.

>
> I'm confident that if you work through the two cases (thinking about
> only one of them at once!), you'll find that the failure paths (not
> to mention the successful paths) do actually work correctly without
> leaking (well, maybe the !NUMA path can hold on to one huge page
> indefinitely, I forget, but I wouldn't count that as leaking).

IIUC the NUMA page could hold on to one huge page indefinitely. But
I've never seen the BUG personally, so maybe you are right.

>
> Collapse failure is not uncommon and leaking huge pages gets noticed.
>
> Hugh
Hugh Dickins Aug. 4, 2021, 8:28 a.m. UTC | #5
On Mon, 2 Aug 2021, Yang Shi wrote:
> On Sat, Jul 31, 2021 at 10:22 PM Hugh Dickins <hughd@google.com> wrote:
> > On Fri, 30 Jul 2021, Yang Shi wrote:
> > > On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@google.com> wrote:
> > > >
> > > > Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> > > > that a consistent set of checks can be applied, even when the inode is
> > > > accessed through read/write syscalls (with NULL vma) instead of mmaps
> > > > (the index argument is seldom of interest, but required by mount option
> > > > "huge=within_size").  Clean up and rearrange the checks a little.
> > > >
> > > > This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> > > > were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> > > > still true that khugepaged's collapse_file() at that point wants a small
> > > > page, the race that might allocate it a huge page is too unlikely to be
> > > > worth optimizing against (we are there *because* there was at least one
> > > > small page in the way), and handled by a later PageTransCompound check.
> > >
> > > Yes, it seems too unlikely. But if it happens the PageTransCompound
> > > check may be not good enough since the page allocated by
> > > shmem_getpage() may be charged to wrong memcg (root memcg). And it
> > > won't be replaced by a newly allocated huge page so the wrong charge
> > > can't be undone.
> >
> > Good point on the memcg charge: I hadn't thought of that.  Of course
> > it's not specific to SGP_CACHE versus SGP_NOHUGE (this patch), but I
> > admit that a huge mischarge is hugely worse than a small mischarge.
> 
> The small page could be collapsed to a huge page sooner or later, so
> the mischarge may be transient. But huge page can't be replaced.

You're right, if all goes well, the mischarged small page could be
collapsed to a correctly charged huge page sooner or later (but all
may not go well), whereas the mischarged huge page is stuck there.

> 
> >
> > We could fix it by making shmem_getpage_gfp() non-static, and pointing
> > to the vma (hence its mm, hence its memcg) here, couldn't we?  Easily
> > done, but I don't really want to make shmem_getpage_gfp() public just
> > for this, for two reasons.
> >
> > One is that the huge race it just so unlikely; and a mischarge to root
> > is not the end of the world, so long as it's not reproducible.  It can
> > only happen on the very first page of the huge extent, and the prior
> 
> OK, if so the mischarge is not as bad as what I thought in the first place.
> 
> > "Stop if extent has been truncated" check makes sure there was one
> > entry in the extent at that point: so the race with hole-punch can only
> > occur after we xas_unlock_irq(&xas) immediately before shmem_getpage()
> > looks up the page in the tree (and I say hole-punch not truncate,
> > because shmem_getpage()'s i_size check will reject when truncated).
> > I don't doubt that it could happen, but stand by not optimizing against.
> 
> I agree the race is so unlikely and it may be not worth optimizing
> against it right now, but a note or a comment may be worth.

Thanks, but despite us agreeing that the race is too unlikely to be worth
optimizing against, it does still nag at me ever since you questioned it:
silly, but I can't quite be convinced by my own dismissals.

I do still want to get rid of SGP_HUGE and SGP_NOHUGE, clearing up those
huge allocation decisions remains the intention; but now think to add
SGP_NOALLOC for collapse_file() in place of SGP_NOHUGE or SGP_CACHE -
to rule out that possibility of mischarge after racing hole-punch,
no matter whether it's huge or small.  If any such race occurs,
collapse_file() should just give up.

This being the "Stupid me" SGP_READ idea, except that of course would
not work: because half the point of that block in collapse_file() is
to initialize the !Uptodate pages, whereas SGP_READ avoids doing so.

There is, of course, the danger that in fixing this unlikely mischarge,
I've got the code wrong and am introducing a bug: here's what a 17/16
would look like, though it will be better inserted early.  I got sick
of all the "if (page "s, and was glad of the opportunity to fix that
outdated "bring it back from swap" comment - swap got done above.

What do you think? Should I add this in or leave it out?

Thanks,
Hugh

--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -108,6 +108,7 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 /* Flag allocation requirements to shmem_getpage */
 enum sgp_type {
 	SGP_READ,	/* don't exceed i_size, don't allocate page */
+	SGP_NOALLOC,	/* like SGP_READ, but do use fallocated page */
 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
 	SGP_WRITE,	/* may exceed i_size, may allocate !Uptodate page */
 	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1721,7 +1721,7 @@ static void collapse_file(struct mm_struct *mm,
 				xas_unlock_irq(&xas);
 				/* swap in or instantiate fallocated page */
 				if (shmem_getpage(mapping->host, index, &page,
-						  SGP_CACHE)) {
+						  SGP_NOALLOC)) {
 					result = SCAN_FAIL;
 					goto xa_unlocked;
 				}
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1903,26 +1903,27 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 		return error;
 	}
 
-	if (page)
+	if (page) {
 		hindex = page->index;
-	if (page && sgp == SGP_WRITE)
-		mark_page_accessed(page);
-
-	/* fallocated page? */
-	if (page && !PageUptodate(page)) {
+		if (sgp == SGP_WRITE)
+			mark_page_accessed(page);
+		if (PageUptodate(page))
+			goto out;
+		/* fallocated page */
 		if (sgp != SGP_READ)
 			goto clear;
 		unlock_page(page);
 		put_page(page);
-		page = NULL;
-		hindex = index;
 	}
-	if (page || sgp == SGP_READ)
-		goto out;
+
+	*pagep = NULL;
+	if (sgp == SGP_READ)
+		return 0;
+	if (sgp == SGP_NOALLOC)
+		return -ENOENT;
 
 	/*
-	 * Fast cache lookup did not find it:
-	 * bring it back from swap or allocate.
+	 * Fast cache lookup and swap lookup did not find it: allocate.
 	 */
 
 	if (vma && userfaultfd_missing(vma)) {
Yang Shi Aug. 4, 2021, 7:01 p.m. UTC | #6
On Wed, Aug 4, 2021 at 1:28 AM Hugh Dickins <hughd@google.com> wrote:
>
> On Mon, 2 Aug 2021, Yang Shi wrote:
> > On Sat, Jul 31, 2021 at 10:22 PM Hugh Dickins <hughd@google.com> wrote:
> > > On Fri, 30 Jul 2021, Yang Shi wrote:
> > > > On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@google.com> wrote:
> > > > >
> > > > > Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> > > > > that a consistent set of checks can be applied, even when the inode is
> > > > > accessed through read/write syscalls (with NULL vma) instead of mmaps
> > > > > (the index argument is seldom of interest, but required by mount option
> > > > > "huge=within_size").  Clean up and rearrange the checks a little.
> > > > >
> > > > > This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> > > > > were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> > > > > still true that khugepaged's collapse_file() at that point wants a small
> > > > > page, the race that might allocate it a huge page is too unlikely to be
> > > > > worth optimizing against (we are there *because* there was at least one
> > > > > small page in the way), and handled by a later PageTransCompound check.
> > > >
> > > > Yes, it seems too unlikely. But if it happens the PageTransCompound
> > > > check may be not good enough since the page allocated by
> > > > shmem_getpage() may be charged to wrong memcg (root memcg). And it
> > > > won't be replaced by a newly allocated huge page so the wrong charge
> > > > can't be undone.
> > >
> > > Good point on the memcg charge: I hadn't thought of that.  Of course
> > > it's not specific to SGP_CACHE versus SGP_NOHUGE (this patch), but I
> > > admit that a huge mischarge is hugely worse than a small mischarge.
> >
> > The small page could be collapsed to a huge page sooner or later, so
> > the mischarge may be transient. But huge page can't be replaced.
>
> You're right, if all goes well, the mischarged small page could be
> collapsed to a correctly charged huge page sooner or later (but all
> may not go well), whereas the mischarged huge page is stuck there.
>
> >
> > >
> > > We could fix it by making shmem_getpage_gfp() non-static, and pointing
> > > to the vma (hence its mm, hence its memcg) here, couldn't we?  Easily
> > > done, but I don't really want to make shmem_getpage_gfp() public just
> > > for this, for two reasons.
> > >
> > > One is that the huge race it just so unlikely; and a mischarge to root
> > > is not the end of the world, so long as it's not reproducible.  It can
> > > only happen on the very first page of the huge extent, and the prior
> >
> > OK, if so the mischarge is not as bad as what I thought in the first place.
> >
> > > "Stop if extent has been truncated" check makes sure there was one
> > > entry in the extent at that point: so the race with hole-punch can only
> > > occur after we xas_unlock_irq(&xas) immediately before shmem_getpage()
> > > looks up the page in the tree (and I say hole-punch not truncate,
> > > because shmem_getpage()'s i_size check will reject when truncated).
> > > I don't doubt that it could happen, but stand by not optimizing against.
> >
> > I agree the race is so unlikely and it may be not worth optimizing
> > against it right now, but a note or a comment may be worth.
>
> Thanks, but despite us agreeing that the race is too unlikely to be worth
> optimizing against, it does still nag at me ever since you questioned it:
> silly, but I can't quite be convinced by my own dismissals.
>
> I do still want to get rid of SGP_HUGE and SGP_NOHUGE, clearing up those
> huge allocation decisions remains the intention; but now think to add
> SGP_NOALLOC for collapse_file() in place of SGP_NOHUGE or SGP_CACHE -
> to rule out that possibility of mischarge after racing hole-punch,
> no matter whether it's huge or small.  If any such race occurs,
> collapse_file() should just give up.
>
> This being the "Stupid me" SGP_READ idea, except that of course would
> not work: because half the point of that block in collapse_file() is
> to initialize the !Uptodate pages, whereas SGP_READ avoids doing so.
>
> There is, of course, the danger that in fixing this unlikely mischarge,
> I've got the code wrong and am introducing a bug: here's what a 17/16
> would look like, though it will be better inserted early.  I got sick
> of all the "if (page "s, and was glad of the opportunity to fix that
> outdated "bring it back from swap" comment - swap got done above.
>
> What do you think? Should I add this in or leave it out?

Thanks for keeping investigating this. The patch looks good to me. I
think we could go this way. Just a nit below.

>
> Thanks,
> Hugh
>
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -108,6 +108,7 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
>  /* Flag allocation requirements to shmem_getpage */
>  enum sgp_type {
>         SGP_READ,       /* don't exceed i_size, don't allocate page */
> +       SGP_NOALLOC,    /* like SGP_READ, but do use fallocated page */

The comment looks misleading, it seems SGP_NOALLOC does clear the
Uptodate flag but SGP_READ doesn't. Or it is fine not to distinguish
this difference?

>         SGP_CACHE,      /* don't exceed i_size, may allocate page */
>         SGP_WRITE,      /* may exceed i_size, may allocate !Uptodate page */
>         SGP_FALLOC,     /* like SGP_WRITE, but make existing page Uptodate */
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1721,7 +1721,7 @@ static void collapse_file(struct mm_struct *mm,
>                                 xas_unlock_irq(&xas);
>                                 /* swap in or instantiate fallocated page */
>                                 if (shmem_getpage(mapping->host, index, &page,
> -                                                 SGP_CACHE)) {
> +                                                 SGP_NOALLOC)) {
>                                         result = SCAN_FAIL;
>                                         goto xa_unlocked;
>                                 }
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1903,26 +1903,27 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
>                 return error;
>         }
>
> -       if (page)
> +       if (page) {
>                 hindex = page->index;
> -       if (page && sgp == SGP_WRITE)
> -               mark_page_accessed(page);
> -
> -       /* fallocated page? */
> -       if (page && !PageUptodate(page)) {
> +               if (sgp == SGP_WRITE)
> +                       mark_page_accessed(page);
> +               if (PageUptodate(page))
> +                       goto out;
> +               /* fallocated page */
>                 if (sgp != SGP_READ)
>                         goto clear;
>                 unlock_page(page);
>                 put_page(page);
> -               page = NULL;
> -               hindex = index;
>         }
> -       if (page || sgp == SGP_READ)
> -               goto out;
> +
> +       *pagep = NULL;
> +       if (sgp == SGP_READ)
> +               return 0;
> +       if (sgp == SGP_NOALLOC)
> +               return -ENOENT;
>
>         /*
> -        * Fast cache lookup did not find it:
> -        * bring it back from swap or allocate.
> +        * Fast cache lookup and swap lookup did not find it: allocate.
>          */
>
>         if (vma && userfaultfd_missing(vma)) {
Yang Shi Aug. 5, 2021, 11:04 p.m. UTC | #7
On Mon, Aug 2, 2021 at 2:14 PM Yang Shi <shy828301@gmail.com> wrote:
>
> On Sat, Jul 31, 2021 at 10:22 PM Hugh Dickins <hughd@google.com> wrote:
> >
> > On Fri, 30 Jul 2021, Yang Shi wrote:
> > > On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@google.com> wrote:
> > > >
> > > > Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> > > > that a consistent set of checks can be applied, even when the inode is
> > > > accessed through read/write syscalls (with NULL vma) instead of mmaps
> > > > (the index argument is seldom of interest, but required by mount option
> > > > "huge=within_size").  Clean up and rearrange the checks a little.
> > > >
> > > > This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> > > > were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> > > > still true that khugepaged's collapse_file() at that point wants a small
> > > > page, the race that might allocate it a huge page is too unlikely to be
> > > > worth optimizing against (we are there *because* there was at least one
> > > > small page in the way), and handled by a later PageTransCompound check.
> > >
> > > Yes, it seems too unlikely. But if it happens the PageTransCompound
> > > check may be not good enough since the page allocated by
> > > shmem_getpage() may be charged to wrong memcg (root memcg). And it
> > > won't be replaced by a newly allocated huge page so the wrong charge
> > > can't be undone.
> >
> > Good point on the memcg charge: I hadn't thought of that.  Of course
> > it's not specific to SGP_CACHE versus SGP_NOHUGE (this patch), but I
> > admit that a huge mischarge is hugely worse than a small mischarge.
>
> The small page could be collapsed to a huge page sooner or later, so
> the mischarge may be transient. But huge page can't be replaced.
>
> >
> > We could fix it by making shmem_getpage_gfp() non-static, and pointing
> > to the vma (hence its mm, hence its memcg) here, couldn't we?  Easily
> > done, but I don't really want to make shmem_getpage_gfp() public just
> > for this, for two reasons.
> >
> > One is that the huge race it just so unlikely; and a mischarge to root
> > is not the end of the world, so long as it's not reproducible.  It can
> > only happen on the very first page of the huge extent, and the prior
>
> OK, if so the mischarge is not as bad as what I thought in the first place.
>
> > "Stop if extent has been truncated" check makes sure there was one
> > entry in the extent at that point: so the race with hole-punch can only
> > occur after we xas_unlock_irq(&xas) immediately before shmem_getpage()
> > looks up the page in the tree (and I say hole-punch not truncate,
> > because shmem_getpage()'s i_size check will reject when truncated).
> > I don't doubt that it could happen, but stand by not optimizing against.
>
> I agree the race is so unlikely and it may be not worth optimizing
> against it right now, but a note or a comment may be worth.
>
> >
> > Other reason is that doing shmem_getpage() (or shmem_getpage_gfp())
> > there is unhealthy for unrelated reasons, that I cannot afford to get
> > into sending patches for at this time: but some of our users found the
> > worst-case latencies in collapse_file() intolerable - shmem_getpage()
> > may be reading in from swap, while the locked head of the huge page
> > being built is in the page cache keeping other users waiting.  So,
> > I'd say there's something worse than memcg in that shmem_getpage(),
> > but fixing that cannot be a part of this series.
>
> Yeah, that is a different problem.
>
> >
> > >
> > > And, another question is it seems the newly allocated huge page will
> > > just be uncharged instead of being freed until
> > > "khugepaged_pages_to_scan" pages are scanned. The
> > > khugepaged_prealloc_page() is called to free the allocated huge page
> > > before each call to khugepaged_scan_mm_slot(). But
> > > khugepaged_scan_file() -> collapse_fille() -> khugepaged_alloc_page()
> > > may be called multiple times in the loop in khugepaged_scan_mm_slot(),
> > > so khugepaged_alloc_page() may see that page to trigger VM_BUG IIUC.
> > >
> > > The code is quite convoluted, I'm not sure whether I miss something or
> > > not. And this problem seems very hard to trigger in real life
> > > workload.
> >
> > Just to clarify, those two paragraphs are not about this patch, but about
> > what happens to mm/khugepaged.c's newly allocated huge page, when collapse
> > fails for any reason.
> >
> > Yes, the code is convoluted: that's because it takes very different paths
> > when CONFIG_NUMA=y (when it cannot predict which node to allocate from)
> > and when not NUMA (when it can allocate the huge page at a good unlocked
> > moment, and carry it forward from one attempt to the next).
> >
> > I don't like it at all, the two paths are confusing: sometimes I wonder
> > whether we should just remove the !CONFIG_NUMA path entirely; and other
> > times I wonder in the other direction, whether the CONFIG_NUMA=y path
> > ought to go the other way when it finds nr_node_ids is 1.  Undecided.
>
> I'm supposed it is just performance consideration to keep the
> allocated huge page, but I'm not sure how much the difference would be
> if we remove it (remove the !CONFIG_NUMA path) because the pcp could
> cache THP now since Mel's patch 44042b449872 ("mm/page_alloc: allow
> high-order pages to be stored on the per-cpu lists").
>
> It seems to provide the similar optimization but in the buddy
> allocator layer so that khugepaged doesn't have to maintain its own
> implementation.
>
> >
> > I'm confident that if you work through the two cases (thinking about
> > only one of them at once!), you'll find that the failure paths (not
> > to mention the successful paths) do actually work correctly without
> > leaking (well, maybe the !NUMA path can hold on to one huge page
> > indefinitely, I forget, but I wouldn't count that as leaking).
>
> IIUC the NUMA page could hold on to one huge page indefinitely. But
> I've never seen the BUG personally, so maybe you are right.

By rereading the code, I think you are correct. Both cases do work
correctly without leaking. And the !CONFIG_NUMA case may carry the
huge page indefinitely.

I think it is because khugepaged may collapse memory for another NUMA
node in the next loop, so it doesn't make too much sense to carry the
huge page, but it may be an optimization for !CONFIG_NUMA case.

However, as I mentioned in earlier email the new pcp implementation
could cache THP now, so we might not need keep this convoluted logic
anymore. Just free the page if collapse is failed then re-allocate
THP. The carried THP might improve the success rate a little bit but I
doubt how noticeable it would be, may be not worth for the extra
complexity at all.

>
> >
> > Collapse failure is not uncommon and leaking huge pages gets noticed.
> >
> > Hugh
Hugh Dickins Aug. 6, 2021, 5:21 a.m. UTC | #8
On Wed, 4 Aug 2021, Yang Shi wrote:
> On Wed, Aug 4, 2021 at 1:28 AM Hugh Dickins <hughd@google.com> wrote:
> >
> > Thanks, but despite us agreeing that the race is too unlikely to be worth
> > optimizing against, it does still nag at me ever since you questioned it:
> > silly, but I can't quite be convinced by my own dismissals.
> >
> > I do still want to get rid of SGP_HUGE and SGP_NOHUGE, clearing up those
> > huge allocation decisions remains the intention; but now think to add
> > SGP_NOALLOC for collapse_file() in place of SGP_NOHUGE or SGP_CACHE -
> > to rule out that possibility of mischarge after racing hole-punch,
> > no matter whether it's huge or small.  If any such race occurs,
> > collapse_file() should just give up.
> >
> > This being the "Stupid me" SGP_READ idea, except that of course would
> > not work: because half the point of that block in collapse_file() is
> > to initialize the !Uptodate pages, whereas SGP_READ avoids doing so.
> >
> > There is, of course, the danger that in fixing this unlikely mischarge,
> > I've got the code wrong and am introducing a bug: here's what a 17/16
> > would look like, though it will be better inserted early.  I got sick
> > of all the "if (page "s, and was glad of the opportunity to fix that
> > outdated "bring it back from swap" comment - swap got done above.
> >
> > What do you think? Should I add this in or leave it out?
> 
> Thanks for keeping investigating this. The patch looks good to me. I
> think we could go this way. Just a nit below.

Thanks, I'll add it into the series, a patch before SGP_NOHUGE goes away;
but I'm not intending to respin the series until there's more feedback
from others - fcntl versus fadvise is the main issue so far.

> > --- a/include/linux/shmem_fs.h
> > +++ b/include/linux/shmem_fs.h
> > @@ -108,6 +108,7 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
> >  /* Flag allocation requirements to shmem_getpage */
> >  enum sgp_type {
> >         SGP_READ,       /* don't exceed i_size, don't allocate page */
> > +       SGP_NOALLOC,    /* like SGP_READ, but do use fallocated page */
> 
> The comment looks misleading, it seems SGP_NOALLOC does clear the
> Uptodate flag but SGP_READ doesn't. Or it is fine not to distinguish
> this difference?

I think you meant to say, SGP_NOALLOC does *set* the Uptodate flag but
SGP_READ doesn't.  And a more significant difference, as coded to suit
collapse_file(), is that SGP_NOALLOC returns failure on hole, whereas
SGP_READ returns success: I should have mentioned that.

When I wrote "like SGP_READ" there, I just meant "like what's said in
the line above": would "ditto" be okay with you, and I say
	SGP_NOALLOC,	/* ditto, but fail on hole, or use fallocated page */

I don't really want to get into the "Uptodate" business there.
And I'm afraid someone is going to ask me to write multi-line comments
on each of those SGP_flags, and I'm going to plead "read the source"!

Oh, now I see why you said SGP_NOALLOC does clear the Uptodate flag:
"goto clear", haha: when we clear the page we set the Uptodate flag.

And I may have another patch to slot in: I was half expecting you to
question why SGP_READ behaves as it does, so in preparing its defence
I checked, and found it was not doing quite what I remembered: changes
were made a long time ago, which have left it slightly suboptimal.
But that really has nothing to do with the rest of this series,
and I don't need to run it past you before reposting.

I hope that some of the features in this series can be useful to you.

Thanks,
Hugh
Hugh Dickins Aug. 6, 2021, 5:43 a.m. UTC | #9
On Thu, 5 Aug 2021, Yang Shi wrote:
> 
> By rereading the code, I think you are correct. Both cases do work
> correctly without leaking. And the !CONFIG_NUMA case may carry the
> huge page indefinitely.
> 
> I think it is because khugepaged may collapse memory for another NUMA
> node in the next loop, so it doesn't make too much sense to carry the
> huge page, but it may be an optimization for !CONFIG_NUMA case.

Yes, that is its intention.

> 
> However, as I mentioned in earlier email the new pcp implementation
> could cache THP now, so we might not need keep this convoluted logic
> anymore. Just free the page if collapse is failed then re-allocate
> THP. The carried THP might improve the success rate a little bit but I
> doubt how noticeable it would be, may be not worth for the extra
> complexity at all.

It would be great if the new pcp implementation is good enough to
get rid of khugepaged's confusing NUMA=y/NUMA=n differences; and all
the *hpage stuff too, I hope.  That would be a welcome cleanup.

> > > Collapse failure is not uncommon and leaking huge pages gets noticed.

After writing that, I realized how I'm almost always testing a NUMA=y
kernel (though on non-NUMA machines), and seldom try the NUMA=n build.
So did so to check no leak, indeed; but was surprised, when comparing
vmstats, that the NUMA=n run had done 5 times as much thp_collapse_alloc
as the NUMA=y run.  I've merely made a note to look into that one day:
maybe it was just a one-off oddity, or maybe the incrementing of stats
is wrong down one path or the other.

Hugh
Yang Shi Aug. 6, 2021, 5:41 p.m. UTC | #10
On Thu, Aug 5, 2021 at 10:21 PM Hugh Dickins <hughd@google.com> wrote:
>
> On Wed, 4 Aug 2021, Yang Shi wrote:
> > On Wed, Aug 4, 2021 at 1:28 AM Hugh Dickins <hughd@google.com> wrote:
> > >
> > > Thanks, but despite us agreeing that the race is too unlikely to be worth
> > > optimizing against, it does still nag at me ever since you questioned it:
> > > silly, but I can't quite be convinced by my own dismissals.
> > >
> > > I do still want to get rid of SGP_HUGE and SGP_NOHUGE, clearing up those
> > > huge allocation decisions remains the intention; but now think to add
> > > SGP_NOALLOC for collapse_file() in place of SGP_NOHUGE or SGP_CACHE -
> > > to rule out that possibility of mischarge after racing hole-punch,
> > > no matter whether it's huge or small.  If any such race occurs,
> > > collapse_file() should just give up.
> > >
> > > This being the "Stupid me" SGP_READ idea, except that of course would
> > > not work: because half the point of that block in collapse_file() is
> > > to initialize the !Uptodate pages, whereas SGP_READ avoids doing so.
> > >
> > > There is, of course, the danger that in fixing this unlikely mischarge,
> > > I've got the code wrong and am introducing a bug: here's what a 17/16
> > > would look like, though it will be better inserted early.  I got sick
> > > of all the "if (page "s, and was glad of the opportunity to fix that
> > > outdated "bring it back from swap" comment - swap got done above.
> > >
> > > What do you think? Should I add this in or leave it out?
> >
> > Thanks for keeping investigating this. The patch looks good to me. I
> > think we could go this way. Just a nit below.
>
> Thanks, I'll add it into the series, a patch before SGP_NOHUGE goes away;
> but I'm not intending to respin the series until there's more feedback
> from others - fcntl versus fadvise is the main issue so far.

Thanks, yeah, no hurry to repost.

>
> > > --- a/include/linux/shmem_fs.h
> > > +++ b/include/linux/shmem_fs.h
> > > @@ -108,6 +108,7 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
> > >  /* Flag allocation requirements to shmem_getpage */
> > >  enum sgp_type {
> > >         SGP_READ,       /* don't exceed i_size, don't allocate page */
> > > +       SGP_NOALLOC,    /* like SGP_READ, but do use fallocated page */
> >
> > The comment looks misleading, it seems SGP_NOALLOC does clear the
> > Uptodate flag but SGP_READ doesn't. Or it is fine not to distinguish
> > this difference?
>
> I think you meant to say, SGP_NOALLOC does *set* the Uptodate flag but
> SGP_READ doesn't.  And a more significant difference, as coded to suit
> collapse_file(), is that SGP_NOALLOC returns failure on hole, whereas
> SGP_READ returns success: I should have mentioned that.

Yes, I mean "set". Sorry for the confusion.

>
> When I wrote "like SGP_READ" there, I just meant "like what's said in
> the line above": would "ditto" be okay with you, and I say
>         SGP_NOALLOC,    /* ditto, but fail on hole, or use fallocated page */
>
> I don't really want to get into the "Uptodate" business there.
> And I'm afraid someone is going to ask me to write multi-line comments
> on each of those SGP_flags, and I'm going to plead "read the source"!

OK, I'm fine as is.

>
> Oh, now I see why you said SGP_NOALLOC does clear the Uptodate flag:
> "goto clear", haha: when we clear the page we set the Uptodate flag.
>
> And I may have another patch to slot in: I was half expecting you to
> question why SGP_READ behaves as it does, so in preparing its defence
> I checked, and found it was not doing quite what I remembered: changes
> were made a long time ago, which have left it slightly suboptimal.
> But that really has nothing to do with the rest of this series,
> and I don't need to run it past you before reposting.
>
> I hope that some of the features in this series can be useful to you.

Thanks, I will see.

>
> Thanks,
> Hugh
Yang Shi Aug. 6, 2021, 5:57 p.m. UTC | #11
On Thu, Aug 5, 2021 at 10:43 PM Hugh Dickins <hughd@google.com> wrote:
>
> On Thu, 5 Aug 2021, Yang Shi wrote:
> >
> > By rereading the code, I think you are correct. Both cases do work
> > correctly without leaking. And the !CONFIG_NUMA case may carry the
> > huge page indefinitely.
> >
> > I think it is because khugepaged may collapse memory for another NUMA
> > node in the next loop, so it doesn't make too much sense to carry the
> > huge page, but it may be an optimization for !CONFIG_NUMA case.
>
> Yes, that is its intention.
>
> >
> > However, as I mentioned in earlier email the new pcp implementation
> > could cache THP now, so we might not need keep this convoluted logic
> > anymore. Just free the page if collapse is failed then re-allocate
> > THP. The carried THP might improve the success rate a little bit but I
> > doubt how noticeable it would be, may be not worth for the extra
> > complexity at all.
>
> It would be great if the new pcp implementation is good enough to
> get rid of khugepaged's confusing NUMA=y/NUMA=n differences; and all
> the *hpage stuff too, I hope.  That would be a welcome cleanup.

 The other question is if that optimization is worth it nowadays or
not. I bet not too many users build NUMA=n kernel nowadays even though
the kernel is actually running on a non-NUMA machine. Some small
devices may run NUMA=n kernel, but I don't think they actually use
THP. So such code complexity could be removed from this point of view
too.

>
> > > > Collapse failure is not uncommon and leaking huge pages gets noticed.
>
> After writing that, I realized how I'm almost always testing a NUMA=y
> kernel (though on non-NUMA machines), and seldom try the NUMA=n build.
> So did so to check no leak, indeed; but was surprised, when comparing
> vmstats, that the NUMA=n run had done 5 times as much thp_collapse_alloc
> as the NUMA=y run.  I've merely made a note to look into that one day:
> maybe it was just a one-off oddity, or maybe the incrementing of stats
> is wrong down one path or the other.

Yeah, probably.

>
> Hugh
Yang Shi Aug. 12, 2021, 6:19 p.m. UTC | #12
On Fri, Aug 6, 2021 at 10:57 AM Yang Shi <shy828301@gmail.com> wrote:
>
> On Thu, Aug 5, 2021 at 10:43 PM Hugh Dickins <hughd@google.com> wrote:
> >
> > On Thu, 5 Aug 2021, Yang Shi wrote:
> > >
> > > By rereading the code, I think you are correct. Both cases do work
> > > correctly without leaking. And the !CONFIG_NUMA case may carry the
> > > huge page indefinitely.
> > >
> > > I think it is because khugepaged may collapse memory for another NUMA
> > > node in the next loop, so it doesn't make too much sense to carry the
> > > huge page, but it may be an optimization for !CONFIG_NUMA case.
> >
> > Yes, that is its intention.
> >
> > >
> > > However, as I mentioned in earlier email the new pcp implementation
> > > could cache THP now, so we might not need keep this convoluted logic
> > > anymore. Just free the page if collapse is failed then re-allocate
> > > THP. The carried THP might improve the success rate a little bit but I
> > > doubt how noticeable it would be, may be not worth for the extra
> > > complexity at all.
> >
> > It would be great if the new pcp implementation is good enough to
> > get rid of khugepaged's confusing NUMA=y/NUMA=n differences; and all
> > the *hpage stuff too, I hope.  That would be a welcome cleanup.
>
>  The other question is if that optimization is worth it nowadays or
> not. I bet not too many users build NUMA=n kernel nowadays even though
> the kernel is actually running on a non-NUMA machine. Some small
> devices may run NUMA=n kernel, but I don't think they actually use
> THP. So such code complexity could be removed from this point of view
> too.
>
> >
> > > > > Collapse failure is not uncommon and leaking huge pages gets noticed.
> >
> > After writing that, I realized how I'm almost always testing a NUMA=y
> > kernel (though on non-NUMA machines), and seldom try the NUMA=n build.
> > So did so to check no leak, indeed; but was surprised, when comparing
> > vmstats, that the NUMA=n run had done 5 times as much thp_collapse_alloc
> > as the NUMA=y run.  I've merely made a note to look into that one day:
> > maybe it was just a one-off oddity, or maybe the incrementing of stats
> > is wrong down one path or the other.

I came up with a patch to remove !CONFIG_NUMA case, and my test found
the same problem. NUMA=n run had done 5 times as much
thp_collapse_alloc as NUMA=y run with vanilla kernel just exactly as
what you saw.

A quick look shows the huge page allocation timing is different for
the two cases. For NUMA=n, the huge page is allocated by
khugepaged_prealloc_page() before scanning the address space, so it
means huge page may be allocated even though there is no suitable
range for collapsing. Then the page would be just freed if khugepaged
already made enough progress then try to reallocate again. The problem
should be more noticeable if you have a shorter scan interval
(scan_sleep_millisecs). I set it to 100ms for my test.

We could carry the huge page across scan passes for NUMA=n, but this
would make the code more complicated. I don't think it is really
worth, so just removing the special case for NUMA=n sounds more
reasonable to me.

>
> Yeah, probably.
>
> >
> > Hugh
diff mbox series

Patch

diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 9b7f7ac52351..3b05a28e34c4 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -86,7 +86,12 @@  extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
 extern int shmem_unuse(unsigned int type, bool frontswap,
 		       unsigned long *fs_pages_to_unuse);
 
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
+extern bool shmem_is_huge(struct vm_area_struct *vma,
+			  struct inode *inode, pgoff_t index);
+static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
+{
+	return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
+}
 extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 						pgoff_t start, pgoff_t end);
@@ -95,8 +100,6 @@  extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 enum sgp_type {
 	SGP_READ,	/* don't exceed i_size, don't allocate page */
 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
-	SGP_NOHUGE,	/* like SGP_CACHE, but no huge pages */
-	SGP_HUGE,	/* like SGP_CACHE, huge pages preferred */
 	SGP_WRITE,	/* may exceed i_size, may allocate !Uptodate page */
 	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
 };
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b0412be08fa2..cecb19c3e965 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1721,7 +1721,7 @@  static void collapse_file(struct mm_struct *mm,
 				xas_unlock_irq(&xas);
 				/* swap in or instantiate fallocated page */
 				if (shmem_getpage(mapping->host, index, &page,
-						  SGP_NOHUGE)) {
+						  SGP_CACHE)) {
 					result = SCAN_FAIL;
 					goto xa_unlocked;
 				}
diff --git a/mm/shmem.c b/mm/shmem.c
index 740d48ef1eb5..6def7391084c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -474,39 +474,35 @@  static bool shmem_confirm_swap(struct address_space *mapping,
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* ifdef here to avoid bloating shmem.o when not necessary */
 
-static int shmem_huge __read_mostly;
+static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
 
-bool shmem_huge_enabled(struct vm_area_struct *vma)
+bool shmem_is_huge(struct vm_area_struct *vma,
+		   struct inode *inode, pgoff_t index)
 {
-	struct inode *inode = file_inode(vma->vm_file);
-	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 	loff_t i_size;
-	pgoff_t off;
 
-	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
-	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
-		return false;
-	if (shmem_huge == SHMEM_HUGE_FORCE)
-		return true;
 	if (shmem_huge == SHMEM_HUGE_DENY)
 		return false;
-	switch (sbinfo->huge) {
-	case SHMEM_HUGE_NEVER:
+	if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
+	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
 		return false;
+	if (shmem_huge == SHMEM_HUGE_FORCE)
+		return true;
+
+	switch (SHMEM_SB(inode->i_sb)->huge) {
 	case SHMEM_HUGE_ALWAYS:
 		return true;
 	case SHMEM_HUGE_WITHIN_SIZE:
-		off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
+		index = round_up(index, HPAGE_PMD_NR);
 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
-		if (i_size >= HPAGE_PMD_SIZE &&
-				i_size >> PAGE_SHIFT >= off)
+		if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
 			return true;
 		fallthrough;
 	case SHMEM_HUGE_ADVISE:
-		/* TODO: implement fadvise() hints */
-		return (vma->vm_flags & VM_HUGEPAGE);
+		if (vma && (vma->vm_flags & VM_HUGEPAGE))
+			return true;
+		fallthrough;
 	default:
-		VM_BUG_ON(1);
 		return false;
 	}
 }
@@ -680,6 +676,12 @@  static long shmem_unused_huge_count(struct super_block *sb,
 
 #define shmem_huge SHMEM_HUGE_DENY
 
+bool shmem_is_huge(struct vm_area_struct *vma,
+		   struct inode *inode, pgoff_t index)
+{
+	return false;
+}
+
 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 		struct shrink_control *sc, unsigned long nr_to_split)
 {
@@ -1829,7 +1831,6 @@  static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 	struct shmem_sb_info *sbinfo;
 	struct mm_struct *charge_mm;
 	struct page *page;
-	enum sgp_type sgp_huge = sgp;
 	pgoff_t hindex = index;
 	gfp_t huge_gfp;
 	int error;
@@ -1838,8 +1839,6 @@  static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 
 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
 		return -EFBIG;
-	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
-		sgp = SGP_CACHE;
 repeat:
 	if (sgp <= SGP_CACHE &&
 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
@@ -1898,36 +1897,12 @@  static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 		return 0;
 	}
 
-	/* shmem_symlink() */
-	if (!shmem_mapping(mapping))
-		goto alloc_nohuge;
-	if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
+	/* Never use a huge page for shmem_symlink() */
+	if (S_ISLNK(inode->i_mode))
 		goto alloc_nohuge;
-	if (shmem_huge == SHMEM_HUGE_FORCE)
-		goto alloc_huge;
-	switch (sbinfo->huge) {
-	case SHMEM_HUGE_NEVER:
+	if (!shmem_is_huge(vma, inode, index))
 		goto alloc_nohuge;
-	case SHMEM_HUGE_WITHIN_SIZE: {
-		loff_t i_size;
-		pgoff_t off;
-
-		off = round_up(index, HPAGE_PMD_NR);
-		i_size = round_up(i_size_read(inode), PAGE_SIZE);
-		if (i_size >= HPAGE_PMD_SIZE &&
-		    i_size >> PAGE_SHIFT >= off)
-			goto alloc_huge;
 
-		fallthrough;
-	}
-	case SHMEM_HUGE_ADVISE:
-		if (sgp_huge == SGP_HUGE)
-			goto alloc_huge;
-		/* TODO: implement fadvise() hints */
-		goto alloc_nohuge;
-	}
-
-alloc_huge:
 	huge_gfp = vma_thp_gfp_mask(vma);
 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
 	page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
@@ -2083,7 +2058,6 @@  static vm_fault_t shmem_fault(struct vm_fault *vmf)
 	struct vm_area_struct *vma = vmf->vma;
 	struct inode *inode = file_inode(vma->vm_file);
 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
-	enum sgp_type sgp;
 	int err;
 	vm_fault_t ret = VM_FAULT_LOCKED;
 
@@ -2146,15 +2120,7 @@  static vm_fault_t shmem_fault(struct vm_fault *vmf)
 		spin_unlock(&inode->i_lock);
 	}
 
-	sgp = SGP_CACHE;
-
-	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
-	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
-		sgp = SGP_NOHUGE;
-	else if (vma->vm_flags & VM_HUGEPAGE)
-		sgp = SGP_HUGE;
-
-	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
+	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
 				  gfp, vma, vmf, &ret);
 	if (err)
 		return vmf_error(err);
@@ -3961,7 +3927,7 @@  int __init shmem_init(void)
 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
 	else
-		shmem_huge = 0; /* just in case it was patched */
+		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
 #endif
 	return 0;