Message ID | 20190307132015.26970-5-alex@ghiti.fr (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Fix free/allocation of runtime gigantic pages | expand |
On 3/7/19 5:20 AM, Alexandre Ghiti wrote: > On systems without CONTIG_ALLOC activated but that support gigantic pages, > boottime reserved gigantic pages can not be freed at all. This patch > simply enables the possibility to hand back those pages to memory > allocator. > > Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> > Acked-by: David S. Miller <davem@davemloft.net> [sparc] Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
On 3/8/19 2:05 PM, Mike Kravetz wrote: > On 3/7/19 5:20 AM, Alexandre Ghiti wrote: >> On systems without CONTIG_ALLOC activated but that support gigantic pages, >> boottime reserved gigantic pages can not be freed at all. This patch >> simply enables the possibility to hand back those pages to memory >> allocator. >> >> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> >> Acked-by: David S. Miller <davem@davemloft.net> [sparc] > Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Thanks Mike, Alex
[ Cc += Aneesh ] Alexandre Ghiti <alex@ghiti.fr> writes: > diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h > index 5b0177733994..d04a0bcc2f1c 100644 > --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h > +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h > @@ -32,13 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate) > } > } > > -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE > -static inline bool gigantic_page_supported(void) > -{ > - return true; > -} > -#endif This is going to clash with: https://patchwork.ozlabs.org/patch/1047003/ Which does: @@ -35,6 +35,13 @@ static inline int hstate_get_psize(struct hstate *hstate) #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static inline bool gigantic_page_supported(void) { + /* + * We used gigantic page reservation with hypervisor assist in some case. + * We cannot use runtime allocation of gigantic pages in those platforms + * This is hash translation mode LPARs. + */ + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) + return false; return true; } #endif Not sure how to resolve it. cheers
Alexandre Ghiti <alex@ghiti.fr> writes: > On systems without CONTIG_ALLOC activated but that support gigantic pages, > boottime reserved gigantic pages can not be freed at all. This patch > simply enables the possibility to hand back those pages to memory > allocator. > > Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> > Acked-by: David S. Miller <davem@davemloft.net> [sparc] > --- > arch/arm64/Kconfig | 2 +- > arch/arm64/include/asm/hugetlb.h | 4 -- > arch/powerpc/include/asm/book3s/64/hugetlb.h | 7 --- > arch/powerpc/platforms/Kconfig.cputype | 2 +- > arch/s390/Kconfig | 2 +- > arch/s390/include/asm/hugetlb.h | 3 -- > arch/sh/Kconfig | 2 +- > arch/sparc/Kconfig | 2 +- > arch/x86/Kconfig | 2 +- > arch/x86/include/asm/hugetlb.h | 4 -- > include/linux/gfp.h | 2 +- > mm/hugetlb.c | 57 ++++++++++++-------- > mm/page_alloc.c | 4 +- > 13 files changed, 44 insertions(+), 49 deletions(-) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index 091a513b93e9..af687eff884a 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -18,7 +18,7 @@ config ARM64 > select ARCH_HAS_FAST_MULTIPLIER > select ARCH_HAS_FORTIFY_SOURCE > select ARCH_HAS_GCOV_PROFILE_ALL > - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC > + select ARCH_HAS_GIGANTIC_PAGE > select ARCH_HAS_KCOV > select ARCH_HAS_MEMBARRIER_SYNC_CORE > select ARCH_HAS_PTE_SPECIAL > diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h > index fb6609875455..59893e766824 100644 > --- a/arch/arm64/include/asm/hugetlb.h > +++ b/arch/arm64/include/asm/hugetlb.h > @@ -65,8 +65,4 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, > > #include <asm-generic/hugetlb.h> > > -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE > -static inline bool gigantic_page_supported(void) { return true; } > -#endif > - > #endif /* __ASM_HUGETLB_H */ > diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h > index 5b0177733994..d04a0bcc2f1c 100644 > --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h > +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h > @@ -32,13 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate) > } > } > > -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE > -static inline bool gigantic_page_supported(void) > -{ > - return true; > -} > -#endif > - > /* hugepd entry valid bit */ > #define HUGEPD_VAL_BITS (0x8000000000000000UL) > As explained in https://patchwork.ozlabs.org/patch/1047003/ architectures like ppc64 have a hypervisor assisted mechanism to indicate where to find gigantic huge pages(16G pages). At this point, we don't use this reserved pages for anything other than hugetlb backing and hence there is no runtime free of this pages needed ( Also we don't do runtime allocation of them). I guess you can still achieve what you want to do in this patch by keeping gigantic_page_supported()? NOTE: We should rename gigantic_page_supported to be more specific to support for runtime_alloc/free of gigantic pages -aneesh
On 03/14/2019 06:52 AM, Aneesh Kumar K.V wrote: > Alexandre Ghiti <alex@ghiti.fr> writes: > >> On systems without CONTIG_ALLOC activated but that support gigantic pages, >> boottime reserved gigantic pages can not be freed at all. This patch >> simply enables the possibility to hand back those pages to memory >> allocator. >> >> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> >> Acked-by: David S. Miller <davem@davemloft.net> [sparc] >> --- >> arch/arm64/Kconfig | 2 +- >> arch/arm64/include/asm/hugetlb.h | 4 -- >> arch/powerpc/include/asm/book3s/64/hugetlb.h | 7 --- >> arch/powerpc/platforms/Kconfig.cputype | 2 +- >> arch/s390/Kconfig | 2 +- >> arch/s390/include/asm/hugetlb.h | 3 -- >> arch/sh/Kconfig | 2 +- >> arch/sparc/Kconfig | 2 +- >> arch/x86/Kconfig | 2 +- >> arch/x86/include/asm/hugetlb.h | 4 -- >> include/linux/gfp.h | 2 +- >> mm/hugetlb.c | 57 ++++++++++++-------- >> mm/page_alloc.c | 4 +- >> 13 files changed, 44 insertions(+), 49 deletions(-) >> >> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig >> index 091a513b93e9..af687eff884a 100644 >> --- a/arch/arm64/Kconfig >> +++ b/arch/arm64/Kconfig >> @@ -18,7 +18,7 @@ config ARM64 >> select ARCH_HAS_FAST_MULTIPLIER >> select ARCH_HAS_FORTIFY_SOURCE >> select ARCH_HAS_GCOV_PROFILE_ALL >> - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC >> + select ARCH_HAS_GIGANTIC_PAGE >> select ARCH_HAS_KCOV >> select ARCH_HAS_MEMBARRIER_SYNC_CORE >> select ARCH_HAS_PTE_SPECIAL >> diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h >> index fb6609875455..59893e766824 100644 >> --- a/arch/arm64/include/asm/hugetlb.h >> +++ b/arch/arm64/include/asm/hugetlb.h >> @@ -65,8 +65,4 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, >> >> #include <asm-generic/hugetlb.h> >> >> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE >> -static inline bool gigantic_page_supported(void) { return true; } >> -#endif >> - >> #endif /* __ASM_HUGETLB_H */ >> diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h >> index 5b0177733994..d04a0bcc2f1c 100644 >> --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h >> +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h >> @@ -32,13 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate) >> } >> } >> >> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE >> -static inline bool gigantic_page_supported(void) >> -{ >> - return true; >> -} >> -#endif >> - >> /* hugepd entry valid bit */ >> #define HUGEPD_VAL_BITS (0x8000000000000000UL) >> > As explained in https://patchwork.ozlabs.org/patch/1047003/ > architectures like ppc64 have a hypervisor assisted mechanism to indicate > where to find gigantic huge pages(16G pages). At this point, we don't use this > reserved pages for anything other than hugetlb backing and hence there > is no runtime free of this pages needed ( Also we don't do > runtime allocation of them). > > I guess you can still achieve what you want to do in this patch by > keeping gigantic_page_supported()? > > NOTE: We should rename gigantic_page_supported to be more specific to > support for runtime_alloc/free of gigantic pages > > -aneesh > Thanks for noticing Aneesh. I can't find a better solution than bringing back gigantic_page_supported check, since it is must be done at runtime in your case. I'm not sure of one thing though: you say that freeing boottime gigantic pages is not needed, but is it forbidden ? Just to know where the check and what its new name should be. Is something like that (on top of this series) ok for you (and everyone else) before I send a v7: diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index d04a0bc..d121559 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -35,4 +35,20 @@ static inline int hstate_get_psize(struct hstate *hstate) /* hugepd entry valid bit */ #define HUGEPD_VAL_BITS (0x8000000000000000UL) +#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE +#define __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED +static inline bool gigantic_page_supported(void) +{ + /* + * We used gigantic page reservation with hypervisor assist in some case. + * We cannot use runtime allocation of gigantic pages in those platforms + * This is hash translation mode LPARs. + */ + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) + return false; + + return true; +} +#endif + #endif diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 71d7b77..7d12e73 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -126,4 +126,18 @@ static inline pte_t huge_ptep_get(pte_t *ptep) } #endif +#ifndef __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED +#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE +static inline bool gigantic_page_supported(void) +{ + return true; +} +#else +static inline bool gigantic_page_supported(void) +{ + return false; +} +#endif /* CONFIG_ARCH_HAS_GIGANTIC_PAGE */ +#endif /* __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED */ + #endif /* _ASM_GENERIC_HUGETLB_H */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9fc96ef..cfbbafe 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2425,6 +2425,11 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, int err; NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); + if (hstate_is_gigantic(h) && !gigantic_page_supported()) { + err = -EINVAL; + goto out; + } + if (nid == NUMA_NO_NODE) { /* * global hstate attribute @@ -2446,6 +2451,7 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, err = set_max_huge_pages(h, count, nodes_allowed); +out: if (nodes_allowed != &node_states[N_MEMORY]) NODEMASK_FREE(nodes_allowed);
On 3/14/19 5:13 PM, Alexandre Ghiti wrote: > On 03/14/2019 06:52 AM, Aneesh Kumar K.V wrote: >> Alexandre Ghiti <alex@ghiti.fr> writes: >> >>> On systems without CONTIG_ALLOC activated but that support gigantic >>> pages, >>> boottime reserved gigantic pages can not be freed at all. This patch >>> simply enables the possibility to hand back those pages to memory >>> allocator. >>> >>> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> >>> Acked-by: David S. Miller <davem@davemloft.net> [sparc] >>> --- >>> arch/arm64/Kconfig | 2 +- >>> arch/arm64/include/asm/hugetlb.h | 4 -- >>> arch/powerpc/include/asm/book3s/64/hugetlb.h | 7 --- >>> arch/powerpc/platforms/Kconfig.cputype | 2 +- >>> arch/s390/Kconfig | 2 +- >>> arch/s390/include/asm/hugetlb.h | 3 -- >>> arch/sh/Kconfig | 2 +- >>> arch/sparc/Kconfig | 2 +- >>> arch/x86/Kconfig | 2 +- >>> arch/x86/include/asm/hugetlb.h | 4 -- >>> include/linux/gfp.h | 2 +- >>> mm/hugetlb.c | 57 ++++++++++++-------- >>> mm/page_alloc.c | 4 +- >>> 13 files changed, 44 insertions(+), 49 deletions(-) >>> >>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig >>> index 091a513b93e9..af687eff884a 100644 >>> --- a/arch/arm64/Kconfig >>> +++ b/arch/arm64/Kconfig >>> @@ -18,7 +18,7 @@ config ARM64 >>> select ARCH_HAS_FAST_MULTIPLIER >>> select ARCH_HAS_FORTIFY_SOURCE >>> select ARCH_HAS_GCOV_PROFILE_ALL >>> - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC >>> + select ARCH_HAS_GIGANTIC_PAGE >>> select ARCH_HAS_KCOV >>> select ARCH_HAS_MEMBARRIER_SYNC_CORE >>> select ARCH_HAS_PTE_SPECIAL >>> diff --git a/arch/arm64/include/asm/hugetlb.h >>> b/arch/arm64/include/asm/hugetlb.h >>> index fb6609875455..59893e766824 100644 >>> --- a/arch/arm64/include/asm/hugetlb.h >>> +++ b/arch/arm64/include/asm/hugetlb.h >>> @@ -65,8 +65,4 @@ extern void set_huge_swap_pte_at(struct mm_struct >>> *mm, unsigned long addr, >>> #include <asm-generic/hugetlb.h> >>> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE >>> -static inline bool gigantic_page_supported(void) { return true; } >>> -#endif >>> - >>> #endif /* __ASM_HUGETLB_H */ >>> diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h >>> b/arch/powerpc/include/asm/book3s/64/hugetlb.h >>> index 5b0177733994..d04a0bcc2f1c 100644 >>> --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h >>> +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h >>> @@ -32,13 +32,6 @@ static inline int hstate_get_psize(struct hstate >>> *hstate) >>> } >>> } >>> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE >>> -static inline bool gigantic_page_supported(void) >>> -{ >>> - return true; >>> -} >>> -#endif >>> - >>> /* hugepd entry valid bit */ >>> #define HUGEPD_VAL_BITS (0x8000000000000000UL) >> As explained in https://patchwork.ozlabs.org/patch/1047003/ >> architectures like ppc64 have a hypervisor assisted mechanism to indicate >> where to find gigantic huge pages(16G pages). At this point, we don't >> use this >> reserved pages for anything other than hugetlb backing and hence there >> is no runtime free of this pages needed ( Also we don't do >> runtime allocation of them). >> >> I guess you can still achieve what you want to do in this patch by >> keeping gigantic_page_supported()? >> >> NOTE: We should rename gigantic_page_supported to be more specific to >> support for runtime_alloc/free of gigantic pages >> >> -aneesh >> > Thanks for noticing Aneesh. > > I can't find a better solution than bringing back > gigantic_page_supported check, > since it is must be done at runtime in your case. > I'm not sure of one thing though: you say that freeing boottime gigantic > pages > is not needed, but is it forbidden ? Just to know where the check and > what its > new name should be. > Is something like that (on top of this series) ok for you (and everyone > else) before > I send a v7: > > diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h > b/arch/powerpc/include/asm/book3s/64/hugetlb.h > index d04a0bc..d121559 100644 > --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h > +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h > @@ -35,4 +35,20 @@ static inline int hstate_get_psize(struct hstate > *hstate) > /* hugepd entry valid bit */ > #define HUGEPD_VAL_BITS (0x8000000000000000UL) > > +#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE > +#define __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED > +static inline bool gigantic_page_supported(void) > +{ > + /* > + * We used gigantic page reservation with hypervisor assist in > some case. > + * We cannot use runtime allocation of gigantic pages in those > platforms > + * This is hash translation mode LPARs. > + */ > + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) > + return false; > + > + return true; > +} > +#endif > + > #endif > diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h > index 71d7b77..7d12e73 100644 > --- a/include/asm-generic/hugetlb.h > +++ b/include/asm-generic/hugetlb.h > @@ -126,4 +126,18 @@ static inline pte_t huge_ptep_get(pte_t *ptep) > } > #endif > > +#ifndef __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED > +#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE The pattern i like is #ifndef gigantic_page_supported #define gigantic_page_supported gigantic_page_supported static inline bool gigantic_page_supported(void) { return true; } #endif instead of _HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED. > +static inline bool gigantic_page_supported(void) > +{ > + return true; > +} > +#else > +static inline bool gigantic_page_supported(void) > +{ > + return false; > +} > +#endif /* CONFIG_ARCH_HAS_GIGANTIC_PAGE */ > +#endif /* __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED */ > + > #endif /* _ASM_GENERIC_HUGETLB_H */ > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index 9fc96ef..cfbbafe 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -2425,6 +2425,11 @@ static ssize_t __nr_hugepages_store_common(bool > obey_mempolicy, > int err; > NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | > __GFP_NORETRY); > > + if (hstate_is_gigantic(h) && !gigantic_page_supported()) { > + err = -EINVAL; > + goto out; > + } you should restore other users of gigantic_page_supported() not just this. That will just make your earlier patch as removing gigantic_page_supported from every architecture other than ppc64 and have a generic version as above. > + > if (nid == NUMA_NO_NODE) { > /* > * global hstate attribute > @@ -2446,6 +2451,7 @@ static ssize_t __nr_hugepages_store_common(bool > obey_mempolicy, > > err = set_max_huge_pages(h, count, nodes_allowed); > > +out: > if (nodes_allowed != &node_states[N_MEMORY]) > NODEMASK_FREE(nodes_allowed); > > > > -aneesh.
On 03/14/2019 02:17 PM, Aneesh Kumar K.V wrote: > On 3/14/19 5:13 PM, Alexandre Ghiti wrote: >> On 03/14/2019 06:52 AM, Aneesh Kumar K.V wrote: >>> Alexandre Ghiti <alex@ghiti.fr> writes: >>> >>>> On systems without CONTIG_ALLOC activated but that support gigantic >>>> pages, >>>> boottime reserved gigantic pages can not be freed at all. This patch >>>> simply enables the possibility to hand back those pages to memory >>>> allocator. >>>> >>>> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> >>>> Acked-by: David S. Miller <davem@davemloft.net> [sparc] >>>> --- >>>> arch/arm64/Kconfig | 2 +- >>>> arch/arm64/include/asm/hugetlb.h | 4 -- >>>> arch/powerpc/include/asm/book3s/64/hugetlb.h | 7 --- >>>> arch/powerpc/platforms/Kconfig.cputype | 2 +- >>>> arch/s390/Kconfig | 2 +- >>>> arch/s390/include/asm/hugetlb.h | 3 -- >>>> arch/sh/Kconfig | 2 +- >>>> arch/sparc/Kconfig | 2 +- >>>> arch/x86/Kconfig | 2 +- >>>> arch/x86/include/asm/hugetlb.h | 4 -- >>>> include/linux/gfp.h | 2 +- >>>> mm/hugetlb.c | 57 >>>> ++++++++++++-------- >>>> mm/page_alloc.c | 4 +- >>>> 13 files changed, 44 insertions(+), 49 deletions(-) >>>> >>>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig >>>> index 091a513b93e9..af687eff884a 100644 >>>> --- a/arch/arm64/Kconfig >>>> +++ b/arch/arm64/Kconfig >>>> @@ -18,7 +18,7 @@ config ARM64 >>>> select ARCH_HAS_FAST_MULTIPLIER >>>> select ARCH_HAS_FORTIFY_SOURCE >>>> select ARCH_HAS_GCOV_PROFILE_ALL >>>> - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC >>>> + select ARCH_HAS_GIGANTIC_PAGE >>>> select ARCH_HAS_KCOV >>>> select ARCH_HAS_MEMBARRIER_SYNC_CORE >>>> select ARCH_HAS_PTE_SPECIAL >>>> diff --git a/arch/arm64/include/asm/hugetlb.h >>>> b/arch/arm64/include/asm/hugetlb.h >>>> index fb6609875455..59893e766824 100644 >>>> --- a/arch/arm64/include/asm/hugetlb.h >>>> +++ b/arch/arm64/include/asm/hugetlb.h >>>> @@ -65,8 +65,4 @@ extern void set_huge_swap_pte_at(struct mm_struct >>>> *mm, unsigned long addr, >>>> #include <asm-generic/hugetlb.h> >>>> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE >>>> -static inline bool gigantic_page_supported(void) { return true; } >>>> -#endif >>>> - >>>> #endif /* __ASM_HUGETLB_H */ >>>> diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h >>>> b/arch/powerpc/include/asm/book3s/64/hugetlb.h >>>> index 5b0177733994..d04a0bcc2f1c 100644 >>>> --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h >>>> +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h >>>> @@ -32,13 +32,6 @@ static inline int hstate_get_psize(struct hstate >>>> *hstate) >>>> } >>>> } >>>> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE >>>> -static inline bool gigantic_page_supported(void) >>>> -{ >>>> - return true; >>>> -} >>>> -#endif >>>> - >>>> /* hugepd entry valid bit */ >>>> #define HUGEPD_VAL_BITS (0x8000000000000000UL) >>> As explained in https://patchwork.ozlabs.org/patch/1047003/ >>> architectures like ppc64 have a hypervisor assisted mechanism to >>> indicate >>> where to find gigantic huge pages(16G pages). At this point, we >>> don't use this >>> reserved pages for anything other than hugetlb backing and hence there >>> is no runtime free of this pages needed ( Also we don't do >>> runtime allocation of them). >>> >>> I guess you can still achieve what you want to do in this patch by >>> keeping gigantic_page_supported()? >>> >>> NOTE: We should rename gigantic_page_supported to be more specific to >>> support for runtime_alloc/free of gigantic pages >>> >>> -aneesh >>> >> Thanks for noticing Aneesh. >> >> I can't find a better solution than bringing back >> gigantic_page_supported check, >> since it is must be done at runtime in your case. >> I'm not sure of one thing though: you say that freeing boottime >> gigantic pages >> is not needed, but is it forbidden ? Just to know where the check and >> what its >> new name should be. You did not answer this question: is freeing boottime gigantic pages "forbidden" or just not needed ? >> Is something like that (on top of this series) ok for you (and >> everyone else) before >> I send a v7: >> >> diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h >> b/arch/powerpc/include/asm/book3s/64/hugetlb.h >> index d04a0bc..d121559 100644 >> --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h >> +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h >> @@ -35,4 +35,20 @@ static inline int hstate_get_psize(struct hstate >> *hstate) >> /* hugepd entry valid bit */ >> #define HUGEPD_VAL_BITS (0x8000000000000000UL) >> >> +#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE >> +#define __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED >> +static inline bool gigantic_page_supported(void) >> +{ >> + /* >> + * We used gigantic page reservation with hypervisor assist >> in some case. >> + * We cannot use runtime allocation of gigantic pages in >> those platforms >> + * This is hash translation mode LPARs. >> + */ >> + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) >> + return false; >> + >> + return true; >> +} >> +#endif >> + >> #endif >> diff --git a/include/asm-generic/hugetlb.h >> b/include/asm-generic/hugetlb.h >> index 71d7b77..7d12e73 100644 >> --- a/include/asm-generic/hugetlb.h >> +++ b/include/asm-generic/hugetlb.h >> @@ -126,4 +126,18 @@ static inline pte_t huge_ptep_get(pte_t *ptep) >> } >> #endif >> >> +#ifndef __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED >> +#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE > > > The pattern i like is > > #ifndef gigantic_page_supported > #define gigantic_page_supported gigantic_page_supported > > static inline bool gigantic_page_supported(void) > { > return true; > } > > #endif > > instead of _HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED. > I see, that avoids a new define. However, it is not consistent with the rest of function definitions in generic hugetlb.h. What do you think ? Should I follow the same format ? Or use yours ? > >> +static inline bool gigantic_page_supported(void) >> +{ >> + return true; >> +} >> +#else >> +static inline bool gigantic_page_supported(void) >> +{ >> + return false; >> +} >> +#endif /* CONFIG_ARCH_HAS_GIGANTIC_PAGE */ >> +#endif /* __HAVE_ARCH_GIGANTIC_PAGE_SUPPORTED */ >> + >> #endif /* _ASM_GENERIC_HUGETLB_H */ >> diff --git a/mm/hugetlb.c b/mm/hugetlb.c >> index 9fc96ef..cfbbafe 100644 >> --- a/mm/hugetlb.c >> +++ b/mm/hugetlb.c >> @@ -2425,6 +2425,11 @@ static ssize_t >> __nr_hugepages_store_common(bool obey_mempolicy, >> int err; >> NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | >> __GFP_NORETRY); >> >> + if (hstate_is_gigantic(h) && !gigantic_page_supported()) { >> + err = -EINVAL; >> + goto out; >> + } > > > you should restore other users of gigantic_page_supported() not just > this. That will just make your earlier patch as removing > gigantic_page_supported from every architecture other than ppc64 and > have a generic version as above. > > I'll restore the check in update_and_free_page too depending on your answer to the above question, since adding this check back would not allow to free boottime gigantic pages. >> + >> if (nid == NUMA_NO_NODE) { >> /* >> * global hstate attribute >> @@ -2446,6 +2451,7 @@ static ssize_t __nr_hugepages_store_common(bool >> obey_mempolicy, >> >> err = set_max_huge_pages(h, count, nodes_allowed); >> >> +out: >> if (nodes_allowed != &node_states[N_MEMORY]) >> NODEMASK_FREE(nodes_allowed); >> >> >> >> > > -aneesh. >
On 3/14/19 7:22 PM, Alexandre Ghiti wrote: > > > On 03/14/2019 02:17 PM, Aneesh Kumar K.V wrote: >> On 3/14/19 5:13 PM, Alexandre Ghiti wrote: >>> On 03/14/2019 06:52 AM, Aneesh Kumar K.V wrote: >>>> Alexandre Ghiti <alex@ghiti.fr> writes: >>>> >> >>> Thanks for noticing Aneesh. >>> >>> I can't find a better solution than bringing back >>> gigantic_page_supported check, >>> since it is must be done at runtime in your case. >>> I'm not sure of one thing though: you say that freeing boottime >>> gigantic pages >>> is not needed, but is it forbidden ? Just to know where the check and >>> what its >>> new name should be. > > You did not answer this question: is freeing boottime gigantic pages > "forbidden" or just > not needed ? IMHO if we don't allow runtime allocation of gigantic hugepage, we should not allow runtime free of gigantic hugepage. Now w.r.t ppc64, hypervisor pass hints about the gignatic hugepages via device tree nodes. Early in boot we mark these pages as reserved and during hugetlb init we use these reserved pages for backing hugetlb fs. Now "forbidden" is not the exact reason. We don't have code to put it back in the reserved list. Hence I would say "not supported". -aneesh
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 091a513b93e9..af687eff884a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -18,7 +18,7 @@ config ARM64 select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC + select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index fb6609875455..59893e766824 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -65,8 +65,4 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, #include <asm-generic/hugetlb.h> -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE -static inline bool gigantic_page_supported(void) { return true; } -#endif - #endif /* __ASM_HUGETLB_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 5b0177733994..d04a0bcc2f1c 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -32,13 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate) } } -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE -static inline bool gigantic_page_supported(void) -{ - return true; -} -#endif - /* hugepd entry valid bit */ #define HUGEPD_VAL_BITS (0x8000000000000000UL) diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index f677c8974212..dc0328de20cd 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -319,7 +319,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK config PPC_RADIX_MMU bool "Radix MMU Support" depends on PPC_BOOK3S_64 - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC + select ARCH_HAS_GIGANTIC_PAGE default y help Enable support for the Power ISA 3.0 Radix style MMU. Currently this diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1c57b83c76f5..d84e536796b1 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -69,7 +69,7 @@ config S390 select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC + select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 2d1afa58a4b6..bd191560efcf 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -116,7 +116,4 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) return pte_modify(pte, newprot); } -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE -static inline bool gigantic_page_supported(void) { return true; } -#endif #endif /* _ASM_S390_HUGETLB_H */ diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index c7266302691c..404b12a0d871 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -53,7 +53,7 @@ config SUPERH select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_NMI select NEED_SG_DMA_LENGTH - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC + select ARCH_HAS_GIGANTIC_PAGE help The SuperH is a RISC processor targeted for use in embedded systems diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ca33c80870e2..234a6bd46e89 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -90,7 +90,7 @@ config SPARC64 select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_PTE_SPECIAL select PCI_DOMAINS if PCI - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC + select ARCH_HAS_GIGANTIC_PAGE config ARCH_DEFCONFIG string diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8ba90f3e0038..ff24eaeef211 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -23,7 +23,7 @@ config X86_64 def_bool y depends on 64BIT # Options that are inherently 64-bit kernel only: - select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC + select ARCH_HAS_GIGANTIC_PAGE select ARCH_SUPPORTS_INT128 select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_SOFT_DIRTY diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index 7469d321f072..f65cfb48cfdd 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h @@ -17,8 +17,4 @@ static inline void arch_clear_hugepage_flags(struct page *page) { } -#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE -static inline bool gigantic_page_supported(void) { return true; } -#endif - #endif /* _ASM_X86_HUGETLB_H */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 1f1ad9aeebb9..58ea44bf75de 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -589,8 +589,8 @@ static inline bool pm_suspended_storage(void) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask); -extern void free_contig_range(unsigned long pfn, unsigned nr_pages); #endif +extern void free_contig_range(unsigned long pfn, unsigned int nr_pages); #ifdef CONFIG_CMA /* CMA stuff */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index afef61656c1e..9fc96ef5aa78 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1058,6 +1058,7 @@ static void free_gigantic_page(struct page *page, unsigned int order) free_contig_range(page_to_pfn(page), 1 << order); } +#ifdef CONFIG_CONTIG_ALLOC static int __alloc_gigantic_page(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) { @@ -1142,11 +1143,20 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); static void prep_compound_gigantic_page(struct page *page, unsigned int order); +#else /* !CONFIG_CONTIG_ALLOC */ +static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) +{ + return NULL; +} +#endif /* CONFIG_CONTIG_ALLOC */ #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ -static inline bool gigantic_page_supported(void) { return false; } static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, - int nid, nodemask_t *nodemask) { return NULL; } + int nid, nodemask_t *nodemask) +{ + return NULL; +} static inline void free_gigantic_page(struct page *page, unsigned int order) { } static inline void destroy_compound_gigantic_page(struct page *page, unsigned int order) { } @@ -1156,9 +1166,6 @@ static void update_and_free_page(struct hstate *h, struct page *page) { int i; - if (hstate_is_gigantic(h) && !gigantic_page_supported()) - return; - h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; for (i = 0; i < pages_per_huge_page(h); i++) { @@ -2276,13 +2283,27 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, } #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) -static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, - nodemask_t *nodes_allowed) +static int set_max_huge_pages(struct hstate *h, unsigned long count, + nodemask_t *nodes_allowed) { unsigned long min_count, ret; - if (hstate_is_gigantic(h) && !gigantic_page_supported()) - return h->max_huge_pages; + spin_lock(&hugetlb_lock); + + /* + * Gigantic pages runtime allocation depend on the capability for large + * page range allocation. + * If the system does not provide this feature, return an error when + * the user tries to allocate gigantic pages but let the user free the + * boottime allocated gigantic pages. + */ + if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { + if (count > persistent_huge_pages(h)) { + spin_unlock(&hugetlb_lock); + return -EINVAL; + } + /* Fall through to decrease pool */ + } /* * Increase the pool size @@ -2295,7 +2316,6 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ - spin_lock(&hugetlb_lock); while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; @@ -2350,9 +2370,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, break; } out: - ret = persistent_huge_pages(h); + h->max_huge_pages = persistent_huge_pages(h); spin_unlock(&hugetlb_lock); - return ret; + + return 0; } #define HSTATE_ATTR_RO(_name) \ @@ -2404,11 +2425,6 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, int err; NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); - if (hstate_is_gigantic(h) && !gigantic_page_supported()) { - err = -EINVAL; - goto out; - } - if (nid == NUMA_NO_NODE) { /* * global hstate attribute @@ -2428,15 +2444,12 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, } else nodes_allowed = &node_states[N_MEMORY]; - h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); + err = set_max_huge_pages(h, count, nodes_allowed); if (nodes_allowed != &node_states[N_MEMORY]) NODEMASK_FREE(nodes_allowed); - return len; -out: - NODEMASK_FREE(nodes_allowed); - return err; + return err ? err : len; } static ssize_t nr_hugepages_store_common(bool obey_mempolicy, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ac9c45ffb344..a4547d90fa7a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8234,8 +8234,9 @@ int alloc_contig_range(unsigned long start, unsigned long end, pfn_max_align_up(end), migratetype); return ret; } +#endif /* CONFIG_CONTIG_ALLOC */ -void free_contig_range(unsigned long pfn, unsigned nr_pages) +void free_contig_range(unsigned long pfn, unsigned int nr_pages) { unsigned int count = 0; @@ -8247,7 +8248,6 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages) } WARN(count != 0, "%d pages are still in use!\n", count); } -#endif #ifdef CONFIG_MEMORY_HOTPLUG /*