diff mbox series

[V2,4/6] mm: rename the global section array to mem_sections

Message ID 20210531091908.1738465-5-aisheng.dong@nxp.com (mailing list archive)
State New, archived
Headers show
Series mm/sparse: a few minor fixes and improvements | expand

Commit Message

Dong Aisheng May 31, 2021, 9:19 a.m. UTC
In order to distinguish the struct mem_section for a better code
readability and align with kernel doc [1] name below, change the
global mem section name to 'mem_sections' from 'mem_section'.

[1] Documentation/vm/memory-model.rst
"The `mem_section` objects are arranged in a two-dimensional array
called `mem_sections`."

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Young <dyoung@redhat.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: kexec@lists.infradead.org
Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
---
v1->v2:
 * no changes
---
 include/linux/mmzone.h | 10 +++++-----
 kernel/crash_core.c    |  4 ++--
 mm/sparse.c            | 16 ++++++++--------
 3 files changed, 15 insertions(+), 15 deletions(-)

Comments

David Hildenbrand June 1, 2021, 8:22 a.m. UTC | #1
On 31.05.21 11:19, Dong Aisheng wrote:
> In order to distinguish the struct mem_section for a better code
> readability and align with kernel doc [1] name below, change the
> global mem section name to 'mem_sections' from 'mem_section'.
> 
> [1] Documentation/vm/memory-model.rst
> "The `mem_section` objects are arranged in a two-dimensional array
> called `mem_sections`."
> 
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Dave Young <dyoung@redhat.com>
> Cc: Baoquan He <bhe@redhat.com>
> Cc: Vivek Goyal <vgoyal@redhat.com>
> Cc: kexec@lists.infradead.org
> Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
> ---
> v1->v2:
>   * no changes
> ---
>   include/linux/mmzone.h | 10 +++++-----
>   kernel/crash_core.c    |  4 ++--
>   mm/sparse.c            | 16 ++++++++--------
>   3 files changed, 15 insertions(+), 15 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index a6bfde85ddb0..0ed61f32d898 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1302,9 +1302,9 @@ struct mem_section {
>   #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
>   
>   #ifdef CONFIG_SPARSEMEM_EXTREME
> -extern struct mem_section **mem_section;
> +extern struct mem_section **mem_sections;
>   #else
> -extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
> +extern struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
>   #endif
>   
>   static inline unsigned long *section_to_usemap(struct mem_section *ms)
> @@ -1315,12 +1315,12 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
>   static inline struct mem_section *__nr_to_section(unsigned long nr)
>   {
>   #ifdef CONFIG_SPARSEMEM_EXTREME
> -	if (!mem_section)
> +	if (!mem_sections)
>   		return NULL;
>   #endif
> -	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
> +	if (!mem_sections[SECTION_NR_TO_ROOT(nr)])
>   		return NULL;
> -	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
> +	return &mem_sections[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
>   }
>   extern unsigned long __section_nr(struct mem_section *ms);
>   extern size_t mem_section_usage_size(void);
> diff --git a/kernel/crash_core.c b/kernel/crash_core.c
> index 29cc15398ee4..fb1180d81b5a 100644
> --- a/kernel/crash_core.c
> +++ b/kernel/crash_core.c
> @@ -414,8 +414,8 @@ static int __init crash_save_vmcoreinfo_init(void)
>   	VMCOREINFO_SYMBOL(contig_page_data);
>   #endif
>   #ifdef CONFIG_SPARSEMEM
> -	VMCOREINFO_SYMBOL_ARRAY(mem_section);
> -	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
> +	VMCOREINFO_SYMBOL_ARRAY(mem_sections);
> +	VMCOREINFO_LENGTH(mem_sections, NR_SECTION_ROOTS);
>   	VMCOREINFO_STRUCT_SIZE(mem_section);
>   	VMCOREINFO_OFFSET(mem_section, section_mem_map);
>   	VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
> diff --git a/mm/sparse.c b/mm/sparse.c
> index d02ee6bb7cbc..6412010478f7 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -24,12 +24,12 @@
>    * 1) mem_section	- memory sections, mem_map's for valid memory
>    */
>   #ifdef CONFIG_SPARSEMEM_EXTREME
> -struct mem_section **mem_section;
> +struct mem_section **mem_sections;
>   #else
> -struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
> +struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
>   	____cacheline_internodealigned_in_smp;
>   #endif
> -EXPORT_SYMBOL(mem_section);
> +EXPORT_SYMBOL(mem_sections);
>   
>   #ifdef NODE_NOT_IN_PAGE_FLAGS
>   /*
> @@ -66,8 +66,8 @@ static void __init sparse_alloc_section_roots(void)
>   
>   	size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
>   	align = 1 << (INTERNODE_CACHE_SHIFT);
> -	mem_section = memblock_alloc(size, align);
> -	if (!mem_section)
> +	mem_sections = memblock_alloc(size, align);
> +	if (!mem_sections)
>   		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
>   		      __func__, size, align);
>   }
> @@ -103,14 +103,14 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
>   	 *
>   	 * The mem_hotplug_lock resolves the apparent race below.
>   	 */
> -	if (mem_section[root])
> +	if (mem_sections[root])
>   		return 0;
>   
>   	section = sparse_index_alloc(nid);
>   	if (!section)
>   		return -ENOMEM;
>   
> -	mem_section[root] = section;
> +	mem_sections[root] = section;
>   
>   	return 0;
>   }
> @@ -145,7 +145,7 @@ unsigned long __section_nr(struct mem_section *ms)
>   #else
>   unsigned long __section_nr(struct mem_section *ms)
>   {
> -	return (unsigned long)(ms - mem_section[0]);
> +	return (unsigned long)(ms - mem_sections[0]);
>   }
>   #endif
>   
> 

I repeat: unnecessary code churn IMHO.
Dong Aisheng June 1, 2021, 8:37 a.m. UTC | #2
On Tue, Jun 1, 2021 at 4:22 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 31.05.21 11:19, Dong Aisheng wrote:
> > In order to distinguish the struct mem_section for a better code
> > readability and align with kernel doc [1] name below, change the
> > global mem section name to 'mem_sections' from 'mem_section'.
> >
> > [1] Documentation/vm/memory-model.rst
> > "The `mem_section` objects are arranged in a two-dimensional array
> > called `mem_sections`."
> >
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Dave Young <dyoung@redhat.com>
> > Cc: Baoquan He <bhe@redhat.com>
> > Cc: Vivek Goyal <vgoyal@redhat.com>
> > Cc: kexec@lists.infradead.org
> > Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
> > ---
> > v1->v2:
> >   * no changes
> > ---
> >   include/linux/mmzone.h | 10 +++++-----
> >   kernel/crash_core.c    |  4 ++--
> >   mm/sparse.c            | 16 ++++++++--------
> >   3 files changed, 15 insertions(+), 15 deletions(-)
> >
> > diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> > index a6bfde85ddb0..0ed61f32d898 100644
> > --- a/include/linux/mmzone.h
> > +++ b/include/linux/mmzone.h
> > @@ -1302,9 +1302,9 @@ struct mem_section {
> >   #define SECTION_ROOT_MASK   (SECTIONS_PER_ROOT - 1)
> >
> >   #ifdef CONFIG_SPARSEMEM_EXTREME
> > -extern struct mem_section **mem_section;
> > +extern struct mem_section **mem_sections;
> >   #else
> > -extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
> > +extern struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
> >   #endif
> >
> >   static inline unsigned long *section_to_usemap(struct mem_section *ms)
> > @@ -1315,12 +1315,12 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
> >   static inline struct mem_section *__nr_to_section(unsigned long nr)
> >   {
> >   #ifdef CONFIG_SPARSEMEM_EXTREME
> > -     if (!mem_section)
> > +     if (!mem_sections)
> >               return NULL;
> >   #endif
> > -     if (!mem_section[SECTION_NR_TO_ROOT(nr)])
> > +     if (!mem_sections[SECTION_NR_TO_ROOT(nr)])
> >               return NULL;
> > -     return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
> > +     return &mem_sections[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
> >   }
> >   extern unsigned long __section_nr(struct mem_section *ms);
> >   extern size_t mem_section_usage_size(void);
> > diff --git a/kernel/crash_core.c b/kernel/crash_core.c
> > index 29cc15398ee4..fb1180d81b5a 100644
> > --- a/kernel/crash_core.c
> > +++ b/kernel/crash_core.c
> > @@ -414,8 +414,8 @@ static int __init crash_save_vmcoreinfo_init(void)
> >       VMCOREINFO_SYMBOL(contig_page_data);
> >   #endif
> >   #ifdef CONFIG_SPARSEMEM
> > -     VMCOREINFO_SYMBOL_ARRAY(mem_section);
> > -     VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
> > +     VMCOREINFO_SYMBOL_ARRAY(mem_sections);
> > +     VMCOREINFO_LENGTH(mem_sections, NR_SECTION_ROOTS);
> >       VMCOREINFO_STRUCT_SIZE(mem_section);
> >       VMCOREINFO_OFFSET(mem_section, section_mem_map);
> >       VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
> > diff --git a/mm/sparse.c b/mm/sparse.c
> > index d02ee6bb7cbc..6412010478f7 100644
> > --- a/mm/sparse.c
> > +++ b/mm/sparse.c
> > @@ -24,12 +24,12 @@
> >    * 1) mem_section   - memory sections, mem_map's for valid memory
> >    */
> >   #ifdef CONFIG_SPARSEMEM_EXTREME
> > -struct mem_section **mem_section;
> > +struct mem_section **mem_sections;
> >   #else
> > -struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
> > +struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
> >       ____cacheline_internodealigned_in_smp;
> >   #endif
> > -EXPORT_SYMBOL(mem_section);
> > +EXPORT_SYMBOL(mem_sections);
> >
> >   #ifdef NODE_NOT_IN_PAGE_FLAGS
> >   /*
> > @@ -66,8 +66,8 @@ static void __init sparse_alloc_section_roots(void)
> >
> >       size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
> >       align = 1 << (INTERNODE_CACHE_SHIFT);
> > -     mem_section = memblock_alloc(size, align);
> > -     if (!mem_section)
> > +     mem_sections = memblock_alloc(size, align);
> > +     if (!mem_sections)
> >               panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
> >                     __func__, size, align);
> >   }
> > @@ -103,14 +103,14 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
> >        *
> >        * The mem_hotplug_lock resolves the apparent race below.
> >        */
> > -     if (mem_section[root])
> > +     if (mem_sections[root])
> >               return 0;
> >
> >       section = sparse_index_alloc(nid);
> >       if (!section)
> >               return -ENOMEM;
> >
> > -     mem_section[root] = section;
> > +     mem_sections[root] = section;
> >
> >       return 0;
> >   }
> > @@ -145,7 +145,7 @@ unsigned long __section_nr(struct mem_section *ms)
> >   #else
> >   unsigned long __section_nr(struct mem_section *ms)
> >   {
> > -     return (unsigned long)(ms - mem_section[0]);
> > +     return (unsigned long)(ms - mem_sections[0]);
> >   }
> >   #endif
> >
> >
>
> I repeat: unnecessary code churn IMHO.

Hi David,

Thanks, i explained the reason during my last reply.
Andrew has already picked this patch to -mm tree.

Regards
Aisheng

>
> --
> Thanks,
>
> David / dhildenb
>
David Hildenbrand June 1, 2021, 8:40 a.m. UTC | #3
On 01.06.21 10:37, Dong Aisheng wrote:
> On Tue, Jun 1, 2021 at 4:22 PM David Hildenbrand <david@redhat.com> wrote:
>>
>> On 31.05.21 11:19, Dong Aisheng wrote:
>>> In order to distinguish the struct mem_section for a better code
>>> readability and align with kernel doc [1] name below, change the
>>> global mem section name to 'mem_sections' from 'mem_section'.
>>>
>>> [1] Documentation/vm/memory-model.rst
>>> "The `mem_section` objects are arranged in a two-dimensional array
>>> called `mem_sections`."
>>>
>>> Cc: Andrew Morton <akpm@linux-foundation.org>
>>> Cc: Dave Young <dyoung@redhat.com>
>>> Cc: Baoquan He <bhe@redhat.com>
>>> Cc: Vivek Goyal <vgoyal@redhat.com>
>>> Cc: kexec@lists.infradead.org
>>> Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
>>> ---
>>> v1->v2:
>>>    * no changes
>>> ---
>>>    include/linux/mmzone.h | 10 +++++-----
>>>    kernel/crash_core.c    |  4 ++--
>>>    mm/sparse.c            | 16 ++++++++--------
>>>    3 files changed, 15 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>>> index a6bfde85ddb0..0ed61f32d898 100644
>>> --- a/include/linux/mmzone.h
>>> +++ b/include/linux/mmzone.h
>>> @@ -1302,9 +1302,9 @@ struct mem_section {
>>>    #define SECTION_ROOT_MASK   (SECTIONS_PER_ROOT - 1)
>>>
>>>    #ifdef CONFIG_SPARSEMEM_EXTREME
>>> -extern struct mem_section **mem_section;
>>> +extern struct mem_section **mem_sections;
>>>    #else
>>> -extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
>>> +extern struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
>>>    #endif
>>>
>>>    static inline unsigned long *section_to_usemap(struct mem_section *ms)
>>> @@ -1315,12 +1315,12 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
>>>    static inline struct mem_section *__nr_to_section(unsigned long nr)
>>>    {
>>>    #ifdef CONFIG_SPARSEMEM_EXTREME
>>> -     if (!mem_section)
>>> +     if (!mem_sections)
>>>                return NULL;
>>>    #endif
>>> -     if (!mem_section[SECTION_NR_TO_ROOT(nr)])
>>> +     if (!mem_sections[SECTION_NR_TO_ROOT(nr)])
>>>                return NULL;
>>> -     return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
>>> +     return &mem_sections[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
>>>    }
>>>    extern unsigned long __section_nr(struct mem_section *ms);
>>>    extern size_t mem_section_usage_size(void);
>>> diff --git a/kernel/crash_core.c b/kernel/crash_core.c
>>> index 29cc15398ee4..fb1180d81b5a 100644
>>> --- a/kernel/crash_core.c
>>> +++ b/kernel/crash_core.c
>>> @@ -414,8 +414,8 @@ static int __init crash_save_vmcoreinfo_init(void)
>>>        VMCOREINFO_SYMBOL(contig_page_data);
>>>    #endif
>>>    #ifdef CONFIG_SPARSEMEM
>>> -     VMCOREINFO_SYMBOL_ARRAY(mem_section);
>>> -     VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
>>> +     VMCOREINFO_SYMBOL_ARRAY(mem_sections);
>>> +     VMCOREINFO_LENGTH(mem_sections, NR_SECTION_ROOTS);
>>>        VMCOREINFO_STRUCT_SIZE(mem_section);
>>>        VMCOREINFO_OFFSET(mem_section, section_mem_map);
>>>        VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
>>> diff --git a/mm/sparse.c b/mm/sparse.c
>>> index d02ee6bb7cbc..6412010478f7 100644
>>> --- a/mm/sparse.c
>>> +++ b/mm/sparse.c
>>> @@ -24,12 +24,12 @@
>>>     * 1) mem_section   - memory sections, mem_map's for valid memory
>>>     */
>>>    #ifdef CONFIG_SPARSEMEM_EXTREME
>>> -struct mem_section **mem_section;
>>> +struct mem_section **mem_sections;
>>>    #else
>>> -struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
>>> +struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
>>>        ____cacheline_internodealigned_in_smp;
>>>    #endif
>>> -EXPORT_SYMBOL(mem_section);
>>> +EXPORT_SYMBOL(mem_sections);
>>>
>>>    #ifdef NODE_NOT_IN_PAGE_FLAGS
>>>    /*
>>> @@ -66,8 +66,8 @@ static void __init sparse_alloc_section_roots(void)
>>>
>>>        size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
>>>        align = 1 << (INTERNODE_CACHE_SHIFT);
>>> -     mem_section = memblock_alloc(size, align);
>>> -     if (!mem_section)
>>> +     mem_sections = memblock_alloc(size, align);
>>> +     if (!mem_sections)
>>>                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
>>>                      __func__, size, align);
>>>    }
>>> @@ -103,14 +103,14 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
>>>         *
>>>         * The mem_hotplug_lock resolves the apparent race below.
>>>         */
>>> -     if (mem_section[root])
>>> +     if (mem_sections[root])
>>>                return 0;
>>>
>>>        section = sparse_index_alloc(nid);
>>>        if (!section)
>>>                return -ENOMEM;
>>>
>>> -     mem_section[root] = section;
>>> +     mem_sections[root] = section;
>>>
>>>        return 0;
>>>    }
>>> @@ -145,7 +145,7 @@ unsigned long __section_nr(struct mem_section *ms)
>>>    #else
>>>    unsigned long __section_nr(struct mem_section *ms)
>>>    {
>>> -     return (unsigned long)(ms - mem_section[0]);
>>> +     return (unsigned long)(ms - mem_sections[0]);
>>>    }
>>>    #endif
>>>
>>>
>>
>> I repeat: unnecessary code churn IMHO.
> 
> Hi David,
> 
> Thanks, i explained the reason during my last reply.
> Andrew has already picked this patch to -mm tree.

Just because it's in Andrews tree doesn't mean it will end up upstream. ;)

Anyhow, no really strong opinion, it's simply unnecessary code churn 
that makes bisecting harder without real value IMHO.
Dong Aisheng June 1, 2021, 8:44 a.m. UTC | #4
On Tue, Jun 1, 2021 at 4:40 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 01.06.21 10:37, Dong Aisheng wrote:
> > On Tue, Jun 1, 2021 at 4:22 PM David Hildenbrand <david@redhat.com> wrote:
> >>
> >> On 31.05.21 11:19, Dong Aisheng wrote:
> >>> In order to distinguish the struct mem_section for a better code
> >>> readability and align with kernel doc [1] name below, change the
> >>> global mem section name to 'mem_sections' from 'mem_section'.
> >>>
> >>> [1] Documentation/vm/memory-model.rst
> >>> "The `mem_section` objects are arranged in a two-dimensional array
> >>> called `mem_sections`."
> >>>
> >>> Cc: Andrew Morton <akpm@linux-foundation.org>
> >>> Cc: Dave Young <dyoung@redhat.com>
> >>> Cc: Baoquan He <bhe@redhat.com>
> >>> Cc: Vivek Goyal <vgoyal@redhat.com>
> >>> Cc: kexec@lists.infradead.org
> >>> Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
> >>> ---
> >>> v1->v2:
> >>>    * no changes
> >>> ---
> >>>    include/linux/mmzone.h | 10 +++++-----
> >>>    kernel/crash_core.c    |  4 ++--
> >>>    mm/sparse.c            | 16 ++++++++--------
> >>>    3 files changed, 15 insertions(+), 15 deletions(-)
> >>>
> >>> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> >>> index a6bfde85ddb0..0ed61f32d898 100644
> >>> --- a/include/linux/mmzone.h
> >>> +++ b/include/linux/mmzone.h
> >>> @@ -1302,9 +1302,9 @@ struct mem_section {
> >>>    #define SECTION_ROOT_MASK   (SECTIONS_PER_ROOT - 1)
> >>>
> >>>    #ifdef CONFIG_SPARSEMEM_EXTREME
> >>> -extern struct mem_section **mem_section;
> >>> +extern struct mem_section **mem_sections;
> >>>    #else
> >>> -extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
> >>> +extern struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
> >>>    #endif
> >>>
> >>>    static inline unsigned long *section_to_usemap(struct mem_section *ms)
> >>> @@ -1315,12 +1315,12 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
> >>>    static inline struct mem_section *__nr_to_section(unsigned long nr)
> >>>    {
> >>>    #ifdef CONFIG_SPARSEMEM_EXTREME
> >>> -     if (!mem_section)
> >>> +     if (!mem_sections)
> >>>                return NULL;
> >>>    #endif
> >>> -     if (!mem_section[SECTION_NR_TO_ROOT(nr)])
> >>> +     if (!mem_sections[SECTION_NR_TO_ROOT(nr)])
> >>>                return NULL;
> >>> -     return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
> >>> +     return &mem_sections[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
> >>>    }
> >>>    extern unsigned long __section_nr(struct mem_section *ms);
> >>>    extern size_t mem_section_usage_size(void);
> >>> diff --git a/kernel/crash_core.c b/kernel/crash_core.c
> >>> index 29cc15398ee4..fb1180d81b5a 100644
> >>> --- a/kernel/crash_core.c
> >>> +++ b/kernel/crash_core.c
> >>> @@ -414,8 +414,8 @@ static int __init crash_save_vmcoreinfo_init(void)
> >>>        VMCOREINFO_SYMBOL(contig_page_data);
> >>>    #endif
> >>>    #ifdef CONFIG_SPARSEMEM
> >>> -     VMCOREINFO_SYMBOL_ARRAY(mem_section);
> >>> -     VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
> >>> +     VMCOREINFO_SYMBOL_ARRAY(mem_sections);
> >>> +     VMCOREINFO_LENGTH(mem_sections, NR_SECTION_ROOTS);
> >>>        VMCOREINFO_STRUCT_SIZE(mem_section);
> >>>        VMCOREINFO_OFFSET(mem_section, section_mem_map);
> >>>        VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
> >>> diff --git a/mm/sparse.c b/mm/sparse.c
> >>> index d02ee6bb7cbc..6412010478f7 100644
> >>> --- a/mm/sparse.c
> >>> +++ b/mm/sparse.c
> >>> @@ -24,12 +24,12 @@
> >>>     * 1) mem_section   - memory sections, mem_map's for valid memory
> >>>     */
> >>>    #ifdef CONFIG_SPARSEMEM_EXTREME
> >>> -struct mem_section **mem_section;
> >>> +struct mem_section **mem_sections;
> >>>    #else
> >>> -struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
> >>> +struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
> >>>        ____cacheline_internodealigned_in_smp;
> >>>    #endif
> >>> -EXPORT_SYMBOL(mem_section);
> >>> +EXPORT_SYMBOL(mem_sections);
> >>>
> >>>    #ifdef NODE_NOT_IN_PAGE_FLAGS
> >>>    /*
> >>> @@ -66,8 +66,8 @@ static void __init sparse_alloc_section_roots(void)
> >>>
> >>>        size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
> >>>        align = 1 << (INTERNODE_CACHE_SHIFT);
> >>> -     mem_section = memblock_alloc(size, align);
> >>> -     if (!mem_section)
> >>> +     mem_sections = memblock_alloc(size, align);
> >>> +     if (!mem_sections)
> >>>                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
> >>>                      __func__, size, align);
> >>>    }
> >>> @@ -103,14 +103,14 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
> >>>         *
> >>>         * The mem_hotplug_lock resolves the apparent race below.
> >>>         */
> >>> -     if (mem_section[root])
> >>> +     if (mem_sections[root])
> >>>                return 0;
> >>>
> >>>        section = sparse_index_alloc(nid);
> >>>        if (!section)
> >>>                return -ENOMEM;
> >>>
> >>> -     mem_section[root] = section;
> >>> +     mem_sections[root] = section;
> >>>
> >>>        return 0;
> >>>    }
> >>> @@ -145,7 +145,7 @@ unsigned long __section_nr(struct mem_section *ms)
> >>>    #else
> >>>    unsigned long __section_nr(struct mem_section *ms)
> >>>    {
> >>> -     return (unsigned long)(ms - mem_section[0]);
> >>> +     return (unsigned long)(ms - mem_sections[0]);
> >>>    }
> >>>    #endif
> >>>
> >>>
> >>
> >> I repeat: unnecessary code churn IMHO.
> >
> > Hi David,
> >
> > Thanks, i explained the reason during my last reply.
> > Andrew has already picked this patch to -mm tree.
>
> Just because it's in Andrews tree doesn't mean it will end up upstream. ;)
>
> Anyhow, no really strong opinion, it's simply unnecessary code churn
> that makes bisecting harder without real value IMHO.

In my practice, it helps improve the code reading efficiency with
scope and vim hotkey.
Before the change, I really feel mixed definition causes troubles in
reading code efficiently.
Anyway, that's my personal experience while others may have different options.
Thanks for the feedback.

Regards
Aisheng

>
> --
> Thanks,
>
> David / dhildenb
>
Andrew Morton June 1, 2021, 11:52 p.m. UTC | #5
On Tue, 1 Jun 2021 10:40:09 +0200 David Hildenbrand <david@redhat.com> wrote:

> > Thanks, i explained the reason during my last reply.
> > Andrew has already picked this patch to -mm tree.
> 
> Just because it's in Andrews tree doesn't mean it will end up upstream. ;)
> 
> Anyhow, no really strong opinion, it's simply unnecessary code churn 
> that makes bisecting harder without real value IMHO.

I think it's a good change - mem_sections refers to multiple instances
of a mem_section.  Churn is a pain, but that's the price we pay for more
readable code.  And for having screwed it up originally ;)
HAGIO KAZUHITO(萩尾 一仁) June 2, 2021, 1:11 a.m. UTC | #6
-----Original Message-----
> On Tue, 1 Jun 2021 10:40:09 +0200 David Hildenbrand <david@redhat.com> wrote:
> 
> > > Thanks, i explained the reason during my last reply.
> > > Andrew has already picked this patch to -mm tree.
> >
> > Just because it's in Andrews tree doesn't mean it will end up upstream. ;)
> >
> > Anyhow, no really strong opinion, it's simply unnecessary code churn
> > that makes bisecting harder without real value IMHO.
> 
> I think it's a good change - mem_sections refers to multiple instances
> of a mem_section.  Churn is a pain, but that's the price we pay for more
> readable code.  And for having screwed it up originally ;)

From a makedumpfile/crash-utility viewpoint, I don't deny kernel improvement
and probably the change will not be hard for them to support, but I'd like
you to remember that the tool users will need to update them for the change.

The situation where we need to update the tools for new kernels is usual, but
there are not many cases that they cannot even start session, and this change
will cause it.  Personally I wonder the change is worth forcing users to update
them.

Thanks,
Kazu
Andrew Morton June 2, 2021, 2:26 a.m. UTC | #7
On Wed, 2 Jun 2021 01:11:07 +0000 HAGIO KAZUHITO(萩尾 一仁) <k-hagio-ab@nec.com> wrote:

> -----Original Message-----
> > On Tue, 1 Jun 2021 10:40:09 +0200 David Hildenbrand <david@redhat.com> wrote:
> > 
> > > > Thanks, i explained the reason during my last reply.
> > > > Andrew has already picked this patch to -mm tree.
> > >
> > > Just because it's in Andrews tree doesn't mean it will end up upstream. ;)
> > >
> > > Anyhow, no really strong opinion, it's simply unnecessary code churn
> > > that makes bisecting harder without real value IMHO.
> > 
> > I think it's a good change - mem_sections refers to multiple instances
> > of a mem_section.  Churn is a pain, but that's the price we pay for more
> > readable code.  And for having screwed it up originally ;)
> 
> >From a makedumpfile/crash-utility viewpoint, I don't deny kernel improvement
> and probably the change will not be hard for them to support, but I'd like
> you to remember that the tool users will need to update them for the change.
> 
> The situation where we need to update the tools for new kernels is usual, but
> there are not many cases that they cannot even start session, and this change
> will cause it.  Personally I wonder the change is worth forcing users to update
> them.

Didn't know that.  I guess I'll drop it then.

We could do an assembly-level alias I assume..
Baoquan He June 2, 2021, 3:03 a.m. UTC | #8
On 06/02/21 at 01:11am, HAGIO KAZUHITO(萩尾 一仁) wrote:
> -----Original Message-----
> > On Tue, 1 Jun 2021 10:40:09 +0200 David Hildenbrand <david@redhat.com> wrote:
> > 
> > > > Thanks, i explained the reason during my last reply.
> > > > Andrew has already picked this patch to -mm tree.
> > >
> > > Just because it's in Andrews tree doesn't mean it will end up upstream. ;)
> > >
> > > Anyhow, no really strong opinion, it's simply unnecessary code churn
> > > that makes bisecting harder without real value IMHO.
> > 
> > I think it's a good change - mem_sections refers to multiple instances
> > of a mem_section.  Churn is a pain, but that's the price we pay for more
> > readable code.  And for having screwed it up originally ;)
> 
> From a makedumpfile/crash-utility viewpoint, I don't deny kernel improvement
> and probably the change will not be hard for them to support, but I'd like
> you to remember that the tool users will need to update them for the change.

As VIM user, I can understand Aisheng's feeling on the mem_section
variable which has the same symbol name as its type. Meanwhile it does
cause makedumpfile/crash having to be changed accordingly.

Maybe we can carry it when any essential change is needed in both kernel
and makedumpfile/crash around it.

> 
> The situation where we need to update the tools for new kernels is usual, but
> there are not many cases that they cannot even start session, and this change

By the way, Kazu, about a case starting session, could you be more specific
or rephrase? I may not get it clearly. Thanks.

> will cause it.  Personally I wonder the change is worth forcing users to update
> them.
> 
> Thanks,
> Kazu
>
HAGIO KAZUHITO(萩尾 一仁) June 2, 2021, 5:02 a.m. UTC | #9
-----Original Message-----
> On 06/02/21 at 01:11am, HAGIO KAZUHITO(萩尾 一仁) wrote:
> > -----Original Message-----
> > > On Tue, 1 Jun 2021 10:40:09 +0200 David Hildenbrand <david@redhat.com> wrote:
> > >
> > > > > Thanks, i explained the reason during my last reply.
> > > > > Andrew has already picked this patch to -mm tree.
> > > >
> > > > Just because it's in Andrews tree doesn't mean it will end up upstream. ;)
> > > >
> > > > Anyhow, no really strong opinion, it's simply unnecessary code churn
> > > > that makes bisecting harder without real value IMHO.
> > >
> > > I think it's a good change - mem_sections refers to multiple instances
> > > of a mem_section.  Churn is a pain, but that's the price we pay for more
> > > readable code.  And for having screwed it up originally ;)
> >
> > From a makedumpfile/crash-utility viewpoint, I don't deny kernel improvement
> > and probably the change will not be hard for them to support, but I'd like
> > you to remember that the tool users will need to update them for the change.
> 
> As VIM user, I can understand Aisheng's feeling on the mem_section
> variable which has the same symbol name as its type. Meanwhile it does
> cause makedumpfile/crash having to be changed accordingly.
> 
> Maybe we can carry it when any essential change is needed in both kernel
> and makedumpfile/crash around it.

Yes, that is a possible option.

> 
> >
> > The situation where we need to update the tools for new kernels is usual, but
> > there are not many cases that they cannot even start session, and this change
> 
> By the way, Kazu, about a case starting session, could you be more specific
> or rephrase? I may not get it clearly. Thanks.

As for the current crash, the "mem_section" symbol is used to determine
which memory model is used.

        if (kernel_symbol_exists("mem_section"))
                vt->flags |= SPARSEMEM;
        else if (kernel_symbol_exists("mem_map")) {
                get_symbol_data("mem_map", sizeof(char *), &vt->mem_map);
                vt->flags |= FLATMEM;
        } else
                vt->flags |= DISCONTIGMEM;

So without updating, crash will assume that the memory model is DISCONTIGMEM,
fail during vm_init() and cannot start a session.  This is an imitation of
the situation though:

-       if (kernel_symbol_exists("mem_section"))
+       if (kernel_symbol_exists("mem_sectionX"))

# crash
...
crash: invalid structure member offset: pglist_data_node_mem_map
           FILE: memory.c  LINE: 16420  FUNCTION: dump_memory_nodes()

[/root/bin/crash] error trace: 465304 => 4ac2bf => 4aae19 => 57f4d7

  57f4d7: OFFSET_verify+164
  4aae19: dump_memory_nodes+5321
  4ac2bf: vm_init+4031
  465304: main_loop+392

#

Every time a kernel is released, there are some changes that crash can
start up with but cannot run a specific crash's command, but a change
that crash cannot start up like this case does not occur often.

Also as for makedumpfile, the "SYMBOL(mem_section)" vmcore entry is used
to determine the memory model, so it will fail with the following error
without an update.

# ./makedumpfile --mem-usage /proc/kcore 
get_mem_map: Can't distinguish the memory type.

makedumpfile Failed.

Thanks,
Kazu
Baoquan He June 2, 2021, 5:22 a.m. UTC | #10
On 06/02/21 at 05:02am, HAGIO KAZUHITO(萩尾 一仁) wrote:
> -----Original Message-----
> > On 06/02/21 at 01:11am, HAGIO KAZUHITO(萩尾 一仁) wrote:
> > > -----Original Message-----
> > > > On Tue, 1 Jun 2021 10:40:09 +0200 David Hildenbrand <david@redhat.com> wrote:
> > > >
> > > > > > Thanks, i explained the reason during my last reply.
> > > > > > Andrew has already picked this patch to -mm tree.
> > > > >
> > > > > Just because it's in Andrews tree doesn't mean it will end up upstream. ;)
> > > > >
> > > > > Anyhow, no really strong opinion, it's simply unnecessary code churn
> > > > > that makes bisecting harder without real value IMHO.
> > > >
> > > > I think it's a good change - mem_sections refers to multiple instances
> > > > of a mem_section.  Churn is a pain, but that's the price we pay for more
> > > > readable code.  And for having screwed it up originally ;)
> > >
> > > From a makedumpfile/crash-utility viewpoint, I don't deny kernel improvement
> > > and probably the change will not be hard for them to support, but I'd like
> > > you to remember that the tool users will need to update them for the change.
> > 
> > As VIM user, I can understand Aisheng's feeling on the mem_section
> > variable which has the same symbol name as its type. Meanwhile it does
> > cause makedumpfile/crash having to be changed accordingly.
> > 
> > Maybe we can carry it when any essential change is needed in both kernel
> > and makedumpfile/crash around it.
> 
> Yes, that is a possible option.
> 
> > 
> > >
> > > The situation where we need to update the tools for new kernels is usual, but
> > > there are not many cases that they cannot even start session, and this change
> > 
> > By the way, Kazu, about a case starting session, could you be more specific
> > or rephrase? I may not get it clearly. Thanks.
> 
> As for the current crash, the "mem_section" symbol is used to determine
> which memory model is used.
> 
>         if (kernel_symbol_exists("mem_section"))
>                 vt->flags |= SPARSEMEM;
>         else if (kernel_symbol_exists("mem_map")) {
>                 get_symbol_data("mem_map", sizeof(char *), &vt->mem_map);
>                 vt->flags |= FLATMEM;
>         } else
>                 vt->flags |= DISCONTIGMEM;
> 
> So without updating, crash will assume that the memory model is DISCONTIGMEM,
> fail during vm_init() and cannot start a session.  This is an imitation of
> the situation though:
> 
> -       if (kernel_symbol_exists("mem_section"))
> +       if (kernel_symbol_exists("mem_sectionX"))
> 
> # crash
> ...
> crash: invalid structure member offset: pglist_data_node_mem_map
>            FILE: memory.c  LINE: 16420  FUNCTION: dump_memory_nodes()
> 
> [/root/bin/crash] error trace: 465304 => 4ac2bf => 4aae19 => 57f4d7
> 
>   57f4d7: OFFSET_verify+164
>   4aae19: dump_memory_nodes+5321
>   4ac2bf: vm_init+4031
>   465304: main_loop+392
> 
> #
> 
> Every time a kernel is released, there are some changes that crash can
> start up with but cannot run a specific crash's command, but a change
> that crash cannot start up like this case does not occur often.

Ah,I see. You mean this patch will cause startup failure of crash/makedumpfile
during application's earlier stage, and this is a severer situation than
others. Then we may need defer the patch acceptance to a future suitable
time. Thanks for explanation.

> 
> Also as for makedumpfile, the "SYMBOL(mem_section)" vmcore entry is used
> to determine the memory model, so it will fail with the following error
> without an update.
> 
> # ./makedumpfile --mem-usage /proc/kcore 
> get_mem_map: Can't distinguish the memory type.
> 
> makedumpfile Failed.
> 
> Thanks,
> Kazu
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a6bfde85ddb0..0ed61f32d898 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1302,9 +1302,9 @@  struct mem_section {
 #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
 
 #ifdef CONFIG_SPARSEMEM_EXTREME
-extern struct mem_section **mem_section;
+extern struct mem_section **mem_sections;
 #else
-extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
+extern struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
 #endif
 
 static inline unsigned long *section_to_usemap(struct mem_section *ms)
@@ -1315,12 +1315,12 @@  static inline unsigned long *section_to_usemap(struct mem_section *ms)
 static inline struct mem_section *__nr_to_section(unsigned long nr)
 {
 #ifdef CONFIG_SPARSEMEM_EXTREME
-	if (!mem_section)
+	if (!mem_sections)
 		return NULL;
 #endif
-	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+	if (!mem_sections[SECTION_NR_TO_ROOT(nr)])
 		return NULL;
-	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+	return &mem_sections[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
 }
 extern unsigned long __section_nr(struct mem_section *ms);
 extern size_t mem_section_usage_size(void);
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 29cc15398ee4..fb1180d81b5a 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -414,8 +414,8 @@  static int __init crash_save_vmcoreinfo_init(void)
 	VMCOREINFO_SYMBOL(contig_page_data);
 #endif
 #ifdef CONFIG_SPARSEMEM
-	VMCOREINFO_SYMBOL_ARRAY(mem_section);
-	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+	VMCOREINFO_SYMBOL_ARRAY(mem_sections);
+	VMCOREINFO_LENGTH(mem_sections, NR_SECTION_ROOTS);
 	VMCOREINFO_STRUCT_SIZE(mem_section);
 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
 	VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
diff --git a/mm/sparse.c b/mm/sparse.c
index d02ee6bb7cbc..6412010478f7 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -24,12 +24,12 @@ 
  * 1) mem_section	- memory sections, mem_map's for valid memory
  */
 #ifdef CONFIG_SPARSEMEM_EXTREME
-struct mem_section **mem_section;
+struct mem_section **mem_sections;
 #else
-struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
+struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
 	____cacheline_internodealigned_in_smp;
 #endif
-EXPORT_SYMBOL(mem_section);
+EXPORT_SYMBOL(mem_sections);
 
 #ifdef NODE_NOT_IN_PAGE_FLAGS
 /*
@@ -66,8 +66,8 @@  static void __init sparse_alloc_section_roots(void)
 
 	size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
 	align = 1 << (INTERNODE_CACHE_SHIFT);
-	mem_section = memblock_alloc(size, align);
-	if (!mem_section)
+	mem_sections = memblock_alloc(size, align);
+	if (!mem_sections)
 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 		      __func__, size, align);
 }
@@ -103,14 +103,14 @@  static int __meminit sparse_index_init(unsigned long section_nr, int nid)
 	 *
 	 * The mem_hotplug_lock resolves the apparent race below.
 	 */
-	if (mem_section[root])
+	if (mem_sections[root])
 		return 0;
 
 	section = sparse_index_alloc(nid);
 	if (!section)
 		return -ENOMEM;
 
-	mem_section[root] = section;
+	mem_sections[root] = section;
 
 	return 0;
 }
@@ -145,7 +145,7 @@  unsigned long __section_nr(struct mem_section *ms)
 #else
 unsigned long __section_nr(struct mem_section *ms)
 {
-	return (unsigned long)(ms - mem_section[0]);
+	return (unsigned long)(ms - mem_sections[0]);
 }
 #endif