Message ID | 161044408207.1482714.1125458890762969867.stgit@dwillia2-desk3.amr.corp.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm: Fix pfn_to_online_page() with respect to ZONE_DEVICE | expand |
On 12.01.21 10:34, Dan Williams wrote: > pfn_to_online_page() is already too large to be a macro or an inline > function. In anticipation of further logic changes / growth, move it out > of line. > > No functional change, just code movement. > > Cc: David Hildenbrand <david@redhat.com> > Reported-by: Michal Hocko <mhocko@kernel.org> > Signed-off-by: Dan Williams <dan.j.williams@intel.com> > --- > include/linux/memory_hotplug.h | 17 +---------------- > mm/memory_hotplug.c | 16 ++++++++++++++++ > 2 files changed, 17 insertions(+), 16 deletions(-) > > diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h > index 15acce5ab106..3d99de0db2dd 100644 > --- a/include/linux/memory_hotplug.h > +++ b/include/linux/memory_hotplug.h > @@ -16,22 +16,7 @@ struct resource; > struct vmem_altmap; > > #ifdef CONFIG_MEMORY_HOTPLUG > -/* > - * Return page for the valid pfn only if the page is online. All pfn > - * walkers which rely on the fully initialized page->flags and others > - * should use this rather than pfn_valid && pfn_to_page > - */ > -#define pfn_to_online_page(pfn) \ > -({ \ > - struct page *___page = NULL; \ > - unsigned long ___pfn = pfn; \ > - unsigned long ___nr = pfn_to_section_nr(___pfn); \ > - \ > - if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ > - pfn_valid_within(___pfn)) \ > - ___page = pfn_to_page(___pfn); \ > - ___page; \ > -}) > +struct page *pfn_to_online_page(unsigned long pfn); > > /* > * Types for free bootmem stored in page->lru.next. These have to be in > diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c > index f9d57b9be8c7..55a69d4396e7 100644 > --- a/mm/memory_hotplug.c > +++ b/mm/memory_hotplug.c > @@ -300,6 +300,22 @@ static int check_hotplug_memory_addressable(unsigned long pfn, > return 0; > } > > +/* > + * Return page for the valid pfn only if the page is online. All pfn > + * walkers which rely on the fully initialized page->flags and others > + * should use this rather than pfn_valid && pfn_to_page > + */ > +struct page *pfn_to_online_page(unsigned long pfn) > +{ > + unsigned long nr = pfn_to_section_nr(pfn); > + > + if (nr < NR_MEM_SECTIONS && online_section_nr(nr) && > + pfn_valid_within(pfn)) > + return pfn_to_page(pfn); > + return NULL; > +} > +EXPORT_SYMBOL_GPL(pfn_to_online_page); > + > /* > * Reasonably generic function for adding memory. It is > * expected that archs that support memory hotplug will > Reviewed-by: David Hildenbrand <david@redhat.com>
On Tue, Jan 12, 2021 at 01:34:42AM -0800, Dan Williams wrote: > pfn_to_online_page() is already too large to be a macro or an inline > function. In anticipation of further logic changes / growth, move it out > of line. > > No functional change, just code movement. > > Cc: David Hildenbrand <david@redhat.com> > Reported-by: Michal Hocko <mhocko@kernel.org> > Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Oscar Salvador <osalvador@suse.de>
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 15acce5ab106..3d99de0db2dd 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -16,22 +16,7 @@ struct resource; struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG -/* - * Return page for the valid pfn only if the page is online. All pfn - * walkers which rely on the fully initialized page->flags and others - * should use this rather than pfn_valid && pfn_to_page - */ -#define pfn_to_online_page(pfn) \ -({ \ - struct page *___page = NULL; \ - unsigned long ___pfn = pfn; \ - unsigned long ___nr = pfn_to_section_nr(___pfn); \ - \ - if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ - pfn_valid_within(___pfn)) \ - ___page = pfn_to_page(___pfn); \ - ___page; \ -}) +struct page *pfn_to_online_page(unsigned long pfn); /* * Types for free bootmem stored in page->lru.next. These have to be in diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f9d57b9be8c7..55a69d4396e7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -300,6 +300,22 @@ static int check_hotplug_memory_addressable(unsigned long pfn, return 0; } +/* + * Return page for the valid pfn only if the page is online. All pfn + * walkers which rely on the fully initialized page->flags and others + * should use this rather than pfn_valid && pfn_to_page + */ +struct page *pfn_to_online_page(unsigned long pfn) +{ + unsigned long nr = pfn_to_section_nr(pfn); + + if (nr < NR_MEM_SECTIONS && online_section_nr(nr) && + pfn_valid_within(pfn)) + return pfn_to_page(pfn); + return NULL; +} +EXPORT_SYMBOL_GPL(pfn_to_online_page); + /* * Reasonably generic function for adding memory. It is * expected that archs that support memory hotplug will
pfn_to_online_page() is already too large to be a macro or an inline function. In anticipation of further logic changes / growth, move it out of line. No functional change, just code movement. Cc: David Hildenbrand <david@redhat.com> Reported-by: Michal Hocko <mhocko@kernel.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- include/linux/memory_hotplug.h | 17 +---------------- mm/memory_hotplug.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 16 deletions(-)