Message ID | 1522636236-12625-5-git-send-email-hejianet@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 2 April 2018 at 04:30, Jia He <hejianet@gmail.com> wrote: > This is the preparation for further optimizing in early_pfn_valid > on arm and arm64. > Same as before - please share the code between ARM and arm64. if necessary, you can invent a new HAVE_ARCH_xxx symbol that is only defined by ARM and arm64 - please explain what the patch does and more importantly, why > Signed-off-by: Jia He <jia.he@hxt-semitech.com> > --- > arch/arm/include/asm/page.h | 3 ++- > arch/arm/mm/init.c | 24 ++++++++++++++++++++++++ > arch/arm64/include/asm/page.h | 3 ++- > arch/arm64/mm/init.c | 24 ++++++++++++++++++++++++ > 4 files changed, 52 insertions(+), 2 deletions(-) > > diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h > index f38909c..3bd810e 100644 > --- a/arch/arm/include/asm/page.h > +++ b/arch/arm/include/asm/page.h > @@ -158,9 +158,10 @@ typedef struct page *pgtable_t; > > #ifdef CONFIG_HAVE_ARCH_PFN_VALID > extern int early_region_idx; > -extern int pfn_valid(unsigned long); > +extern int pfn_valid(unsigned long pfn); > extern unsigned long memblock_next_valid_pfn(unsigned long pfn); > #define skip_to_last_invalid_pfn(pfn) (memblock_next_valid_pfn(pfn) - 1) > +extern int pfn_valid_region(unsigned long pfn); > #endif > > #include <asm/memory.h> > diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c > index 06ed190..bdcbf58 100644 > --- a/arch/arm/mm/init.c > +++ b/arch/arm/mm/init.c > @@ -201,6 +201,30 @@ int pfn_valid(unsigned long pfn) > } > EXPORT_SYMBOL(pfn_valid); > > +int pfn_valid_region(unsigned long pfn) > +{ > + unsigned long start_pfn, end_pfn; > + struct memblock_type *type = &memblock.memory; > + struct memblock_region *regions = type->regions; > + > + if (early_region_idx != -1) { > + start_pfn = PFN_DOWN(regions[early_region_idx].base); > + end_pfn = PFN_DOWN(regions[early_region_idx].base + > + regions[early_region_idx].size); > + > + if (pfn >= start_pfn && pfn < end_pfn) > + return !memblock_is_nomap( > + ®ions[early_region_idx]); > + } > + > + early_region_idx = memblock_search_pfn_regions(pfn); > + if (early_region_idx == -1) > + return false; > + > + return !memblock_is_nomap(®ions[early_region_idx]); > +} > +EXPORT_SYMBOL(pfn_valid_region); > + > /* HAVE_MEMBLOCK is always enabled on arm */ > unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn) > { > diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h > index f0d8c8e5..7087b63 100644 > --- a/arch/arm64/include/asm/page.h > +++ b/arch/arm64/include/asm/page.h > @@ -39,9 +39,10 @@ typedef struct page *pgtable_t; > > #ifdef CONFIG_HAVE_ARCH_PFN_VALID > extern int early_region_idx; > -extern int pfn_valid(unsigned long); > +extern int pfn_valid(unsigned long pfn); > extern unsigned long memblock_next_valid_pfn(unsigned long pfn); > #define skip_to_last_invalid_pfn(pfn) (memblock_next_valid_pfn(pfn) - 1) > +extern int pfn_valid_region(unsigned long pfn); > #endif > > #include <asm/memory.h> > diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c > index 342e4e2..a1646b6 100644 > --- a/arch/arm64/mm/init.c > +++ b/arch/arm64/mm/init.c > @@ -293,6 +293,30 @@ int pfn_valid(unsigned long pfn) > } > EXPORT_SYMBOL(pfn_valid); > > +int pfn_valid_region(unsigned long pfn) > +{ > + unsigned long start_pfn, end_pfn; > + struct memblock_type *type = &memblock.memory; > + struct memblock_region *regions = type->regions; > + > + if (early_region_idx != -1) { > + start_pfn = PFN_DOWN(regions[early_region_idx].base); > + end_pfn = PFN_DOWN(regions[early_region_idx].base + > + regions[early_region_idx].size); > + > + if (pfn >= start_pfn && pfn < end_pfn) > + return !memblock_is_nomap( > + ®ions[early_region_idx]); > + } > + > + early_region_idx = memblock_search_pfn_regions(pfn); > + if (early_region_idx == -1) > + return false; > + > + return !memblock_is_nomap(®ions[early_region_idx]); > +} > +EXPORT_SYMBOL(pfn_valid_region); > + > /* HAVE_MEMBLOCK is always enabled on arm64 */ > unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn) > { > -- > 2.7.4 >
Hi Jia,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on linus/master]
[also build test WARNING on v4.16 next-20180329]
[cannot apply to arm64/for-next/core]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Jia-He/optimize-memblock_next_valid_pfn-and-early_pfn_valid-on-arm-and-arm64/20180402-131223
config: arm64-allmodconfig (attached as .config)
compiler: aarch64-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=arm64
All warnings (new ones prefixed by >>):
>> WARNING: vmlinux.o(.text+0x39c4c): Section mismatch in reference from the function pfn_valid_region() to the variable .meminit.data:$d
The function pfn_valid_region() references
the variable __meminitdata $d.
This is often because pfn_valid_region lacks a __meminitdata
annotation or the annotation of $d is wrong.
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index f38909c..3bd810e 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -158,9 +158,10 @@ typedef struct page *pgtable_t; #ifdef CONFIG_HAVE_ARCH_PFN_VALID extern int early_region_idx; -extern int pfn_valid(unsigned long); +extern int pfn_valid(unsigned long pfn); extern unsigned long memblock_next_valid_pfn(unsigned long pfn); #define skip_to_last_invalid_pfn(pfn) (memblock_next_valid_pfn(pfn) - 1) +extern int pfn_valid_region(unsigned long pfn); #endif #include <asm/memory.h> diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 06ed190..bdcbf58 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -201,6 +201,30 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); +int pfn_valid_region(unsigned long pfn) +{ + unsigned long start_pfn, end_pfn; + struct memblock_type *type = &memblock.memory; + struct memblock_region *regions = type->regions; + + if (early_region_idx != -1) { + start_pfn = PFN_DOWN(regions[early_region_idx].base); + end_pfn = PFN_DOWN(regions[early_region_idx].base + + regions[early_region_idx].size); + + if (pfn >= start_pfn && pfn < end_pfn) + return !memblock_is_nomap( + ®ions[early_region_idx]); + } + + early_region_idx = memblock_search_pfn_regions(pfn); + if (early_region_idx == -1) + return false; + + return !memblock_is_nomap(®ions[early_region_idx]); +} +EXPORT_SYMBOL(pfn_valid_region); + /* HAVE_MEMBLOCK is always enabled on arm */ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn) { diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index f0d8c8e5..7087b63 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -39,9 +39,10 @@ typedef struct page *pgtable_t; #ifdef CONFIG_HAVE_ARCH_PFN_VALID extern int early_region_idx; -extern int pfn_valid(unsigned long); +extern int pfn_valid(unsigned long pfn); extern unsigned long memblock_next_valid_pfn(unsigned long pfn); #define skip_to_last_invalid_pfn(pfn) (memblock_next_valid_pfn(pfn) - 1) +extern int pfn_valid_region(unsigned long pfn); #endif #include <asm/memory.h> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 342e4e2..a1646b6 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -293,6 +293,30 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); +int pfn_valid_region(unsigned long pfn) +{ + unsigned long start_pfn, end_pfn; + struct memblock_type *type = &memblock.memory; + struct memblock_region *regions = type->regions; + + if (early_region_idx != -1) { + start_pfn = PFN_DOWN(regions[early_region_idx].base); + end_pfn = PFN_DOWN(regions[early_region_idx].base + + regions[early_region_idx].size); + + if (pfn >= start_pfn && pfn < end_pfn) + return !memblock_is_nomap( + ®ions[early_region_idx]); + } + + early_region_idx = memblock_search_pfn_regions(pfn); + if (early_region_idx == -1) + return false; + + return !memblock_is_nomap(®ions[early_region_idx]); +} +EXPORT_SYMBOL(pfn_valid_region); + /* HAVE_MEMBLOCK is always enabled on arm64 */ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn) {
This is the preparation for further optimizing in early_pfn_valid on arm and arm64. Signed-off-by: Jia He <jia.he@hxt-semitech.com> --- arch/arm/include/asm/page.h | 3 ++- arch/arm/mm/init.c | 24 ++++++++++++++++++++++++ arch/arm64/include/asm/page.h | 3 ++- arch/arm64/mm/init.c | 24 ++++++++++++++++++++++++ 4 files changed, 52 insertions(+), 2 deletions(-)