diff mbox series

[04/21] mm: free_area_init: use maximal zone PFNs rather than zone sizes

Message ID 20200412194859.12663-5-rppt@kernel.org (mailing list archive)
State New, archived
Headers show
Series mm: rework free_area_init*() funcitons | expand

Commit Message

Mike Rapoport April 12, 2020, 7:48 p.m. UTC
From: Mike Rapoport <rppt@linux.ibm.com>

Currently, architectures that use free_area_init() to initialize memory map
and node and zone structures need to calculate zone and hole sizes. We can
use free_area_init_nodes() instead and let it detect the zone boundaries
while the architectures will only have to supply the possible limits for
the zones.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
 arch/alpha/mm/init.c    | 16 ++++++----------
 arch/c6x/mm/init.c      |  8 +++-----
 arch/h8300/mm/init.c    |  6 +++---
 arch/hexagon/mm/init.c  |  6 +++---
 arch/m68k/mm/init.c     |  6 +++---
 arch/m68k/mm/mcfmmu.c   |  9 +++------
 arch/nds32/mm/init.c    | 11 ++++-------
 arch/nios2/mm/init.c    |  8 +++-----
 arch/openrisc/mm/init.c |  9 +++------
 arch/um/kernel/mem.c    | 12 ++++--------
 include/linux/mm.h      |  2 +-
 mm/page_alloc.c         |  5 ++---
 12 files changed, 38 insertions(+), 60 deletions(-)

Comments

Baoquan He April 22, 2020, 11:41 p.m. UTC | #1
On 04/12/20 at 10:48pm, Mike Rapoport wrote:
> From: Mike Rapoport <rppt@linux.ibm.com>
> 
> Currently, architectures that use free_area_init() to initialize memory map
> and node and zone structures need to calculate zone and hole sizes. We can
> use free_area_init_nodes() instead and let it detect the zone boundaries
> while the architectures will only have to supply the possible limits for
> the zones.
> 
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> ---
>  arch/alpha/mm/init.c    | 16 ++++++----------
>  arch/c6x/mm/init.c      |  8 +++-----
>  arch/h8300/mm/init.c    |  6 +++---
>  arch/hexagon/mm/init.c  |  6 +++---
>  arch/m68k/mm/init.c     |  6 +++---
>  arch/m68k/mm/mcfmmu.c   |  9 +++------
>  arch/nds32/mm/init.c    | 11 ++++-------
>  arch/nios2/mm/init.c    |  8 +++-----
>  arch/openrisc/mm/init.c |  9 +++------
>  arch/um/kernel/mem.c    | 12 ++++--------
>  include/linux/mm.h      |  2 +-
>  mm/page_alloc.c         |  5 ++---
>  12 files changed, 38 insertions(+), 60 deletions(-)
> 
> diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
> index 12e218d3792a..667cd21393b5 100644
> --- a/arch/alpha/mm/init.c
> +++ b/arch/alpha/mm/init.c
> @@ -243,21 +243,17 @@ callback_init(void * kernel_end)
>   */
>  void __init paging_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES] = {0, };
> -	unsigned long dma_pfn, high_pfn;
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
> +	unsigned long dma_pfn;
>  
>  	dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
> -	high_pfn = max_pfn = max_low_pfn;
> +	max_pfn = max_low_pfn;
>  
> -	if (dma_pfn >= high_pfn)
> -		zones_size[ZONE_DMA] = high_pfn;
> -	else {
> -		zones_size[ZONE_DMA] = dma_pfn;
> -		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
> -	}
> +	max_zone_pfn[ZONE_DMA] = dma_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_pfn;
>  
>  	/* Initialize mem_map[].  */
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  
>  	/* Initialize the kernel's ZERO_PGE. */
>  	memset((void *)ZERO_PGE, 0, PAGE_SIZE);
> diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
> index 9b374393a8f4..a97e51a3e26d 100644
> --- a/arch/c6x/mm/init.c
> +++ b/arch/c6x/mm/init.c
> @@ -33,7 +33,7 @@ EXPORT_SYMBOL(empty_zero_page);
>  void __init paging_init(void)
>  {
>  	struct pglist_data *pgdat = NODE_DATA(0);
> -	unsigned long zones_size[MAX_NR_ZONES] = {0, };
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
>  
>  	empty_zero_page      = (unsigned long) memblock_alloc(PAGE_SIZE,
>  							      PAGE_SIZE);
> @@ -49,11 +49,9 @@ void __init paging_init(void)
>  	/*
>  	 * Define zones
>  	 */
> -	zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
> -	pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
> -		__pa(PAGE_OFFSET) >> PAGE_SHIFT;
> +	max_zone_pfn[ZONE_NORMAL] = memory_end >> PAGE_SHIFT;
>  
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  }
>  
>  void __init mem_init(void)
> diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
> index 1eab16b1a0bc..27a0020e3771 100644
> --- a/arch/h8300/mm/init.c
> +++ b/arch/h8300/mm/init.c
> @@ -83,10 +83,10 @@ void __init paging_init(void)
>  		 start_mem, end_mem);
>  
>  	{
> -		unsigned long zones_size[MAX_NR_ZONES] = {0, };
> +		unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
>  
> -		zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
> -		free_area_init(zones_size);
> +		max_zone_pfn[ZONE_NORMAL] = end_mem >> PAGE_SHIFT;
> +		free_area_init(max_zone_pfn);
>  	}
>  }
>  
> diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
> index c961773a6fff..f2e6c868e477 100644
> --- a/arch/hexagon/mm/init.c
> +++ b/arch/hexagon/mm/init.c
> @@ -91,7 +91,7 @@ void sync_icache_dcache(pte_t pte)
>   */
>  void __init paging_init(void)
>  {
> -	unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
>  
>  	/*
>  	 *  This is not particularly well documented anywhere, but
> @@ -101,9 +101,9 @@ void __init paging_init(void)
>  	 *  adjust accordingly.
>  	 */
>  
> -	zones_sizes[ZONE_NORMAL] = max_low_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
>  
> -	free_area_init(zones_sizes);  /*  sets up the zonelists and mem_map  */
> +	free_area_init(max_zone_pfn);  /*  sets up the zonelists and mem_map  */
>  
>  	/*
>  	 * Start of high memory area.  Will probably need something more
> diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
> index b88d510d4fe3..6d3147662ff2 100644
> --- a/arch/m68k/mm/init.c
> +++ b/arch/m68k/mm/init.c
> @@ -84,7 +84,7 @@ void __init paging_init(void)
>  	 * page_alloc get different views of the world.
>  	 */
>  	unsigned long end_mem = memory_end & PAGE_MASK;
> -	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
>  
>  	high_memory = (void *) end_mem;
>  
> @@ -98,8 +98,8 @@ void __init paging_init(void)
>  	 */
>  	set_fs (USER_DS);
>  
> -	zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
> -	free_area_init(zones_size);
> +	max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
> +	free_area_init(max_zone_pfn);
>  }
>  
>  #endif /* CONFIG_MMU */
> diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
> index 0ea375607767..80064e6d064f 100644
> --- a/arch/m68k/mm/mcfmmu.c
> +++ b/arch/m68k/mm/mcfmmu.c
> @@ -39,7 +39,7 @@ void __init paging_init(void)
>  	pte_t *pg_table;
>  	unsigned long address, size;
>  	unsigned long next_pgtable, bootmem_end;
> -	unsigned long zones_size[MAX_NR_ZONES];
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  	enum zone_type zone;
>  	int i;
>  
> @@ -80,11 +80,8 @@ void __init paging_init(void)
>  	}
>  
>  	current->mm = NULL;
> -
> -	for (zone = 0; zone < MAX_NR_ZONES; zone++)
> -		zones_size[zone] = 0x0;
> -	zones_size[ZONE_DMA] = num_pages;
> -	free_area_init(zones_size);
> +	max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
> +	free_area_init(max_zone_pfn);
>  }
>  
>  int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
> diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
> index 0be3833f6814..91147cca4b64 100644
> --- a/arch/nds32/mm/init.c
> +++ b/arch/nds32/mm/init.c
> @@ -31,16 +31,13 @@ EXPORT_SYMBOL(empty_zero_page);
>  
>  static void __init zone_sizes_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES];
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  
> -	/* Clear the zone sizes */
> -	memset(zones_size, 0, sizeof(zones_size));
> -
> -	zones_size[ZONE_NORMAL] = max_low_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
>  #ifdef CONFIG_HIGHMEM
> -	zones_size[ZONE_HIGHMEM] = max_pfn;
> +	max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
>  #endif
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  
>  }
>  
> diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
> index 2c609c2516b2..9afca77d10b1 100644
> --- a/arch/nios2/mm/init.c
> +++ b/arch/nios2/mm/init.c
> @@ -46,17 +46,15 @@ pgd_t *pgd_current;
>   */
>  void __init paging_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES];
> -
> -	memset(zones_size, 0, sizeof(zones_size));
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  
>  	pagetable_init();
>  	pgd_current = swapper_pg_dir;
>  
> -	zones_size[ZONE_NORMAL] = max_mapnr;
> +	max_zone_pfn[ZONE_NORMAL] = max_mapnr;
>  
>  	/* pass the memory from the bootmem allocator to the main allocator */
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  
>  	flush_dcache_range((unsigned long)empty_zero_page,
>  			(unsigned long)empty_zero_page + PAGE_SIZE);
> diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
> index 1f87b524db78..f94fe6d3f499 100644
> --- a/arch/openrisc/mm/init.c
> +++ b/arch/openrisc/mm/init.c
> @@ -45,17 +45,14 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
>  
>  static void __init zone_sizes_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES];
> -
> -	/* Clear the zone sizes */
> -	memset(zones_size, 0, sizeof(zones_size));
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
>  
>  	/*
>  	 * We use only ZONE_NORMAL
>  	 */
> -	zones_size[ZONE_NORMAL] = max_low_pfn;
> +	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
>  
> -	free_area_init(zones_size);
> +	free_area_init(max_zone_pfn);
>  }
>  
>  extern const char _s_kernel_ro[], _e_kernel_ro[];
> diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
> index 30885d0b94ac..401b22f14743 100644
> --- a/arch/um/kernel/mem.c
> +++ b/arch/um/kernel/mem.c
> @@ -158,8 +158,8 @@ static void __init fixaddr_user_init( void)
>  
>  void __init paging_init(void)
>  {
> -	unsigned long zones_size[MAX_NR_ZONES], vaddr;
> -	int i;
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
> +	unsigned long vaddr;
>  
>  	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
>  							       PAGE_SIZE);
> @@ -167,12 +167,8 @@ void __init paging_init(void)
>  		panic("%s: Failed to allocate %lu bytes align=%lx\n",
>  		      __func__, PAGE_SIZE, PAGE_SIZE);
>  
> -	for (i = 0; i < ARRAY_SIZE(zones_size); i++)
> -		zones_size[i] = 0;
> -
> -	zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
> -		(uml_physmem >> PAGE_SHIFT);
> -	free_area_init(zones_size);
> +	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
> +	free_area_init(max_zone_pfn);
>  
>  	/*
>  	 * Fixed mappings, only the page table structure has to be
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 5903bbbdb336..d9a256a97ac5 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2272,7 +2272,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
>  }
>  
>  extern void __init pagecache_init(void);
> -extern void free_area_init(unsigned long * zones_size);
> +extern void free_area_init(unsigned long * max_zone_pfn);
>  extern void __init free_area_init_node(int nid, unsigned long * zones_size,
>  		unsigned long zone_start_pfn, unsigned long *zholes_size);
>  extern void free_initmem(void);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 4530e9cfd9f7..530701b38bc7 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -7700,11 +7700,10 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
>  	dma_reserve = new_dma_reserve;
>  }
>  
> -void __init free_area_init(unsigned long *zones_size)
> +void __init free_area_init(unsigned long *max_zone_pfn)
>  {
>  	init_unavailable_mem();
> -	free_area_init_node(0, zones_size,
> -			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
> +	free_area_init_nodes(max_zone_pfn);

Reviewed-by: Baoquan He <bhe@redhat.com>

>  }
>  
>  static int page_alloc_cpu_dead(unsigned int cpu)
> -- 
> 2.25.1
>
Greg Ungerer June 15, 2020, 3:53 a.m. UTC | #2
Hi Mike,

From: Mike Rapoport <rppt@linux.ibm.com>
> Currently, architectures that use free_area_init() to initialize memory map
> and node and zone structures need to calculate zone and hole sizes. We can
> use free_area_init_nodes() instead and let it detect the zone boundaries
> while the architectures will only have to supply the possible limits for
> the zones.
> 
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>

This is causing some new warnings for me on boot on at least one non-MMU m68k target:

...
NET: Registered protocol family 17
BUG: Bad page state in process swapper  pfn:20165
page:41fe0ca0 refcount:0 mapcount:1 mapping:00000000 index:0x0
flags: 0x0()
raw: 00000000 00000100 00000122 00000000 00000000 00000000 00000000 00000000
page dumped because: nonzero mapcount
CPU: 0 PID: 1 Comm: swapper Not tainted 5.8.0-rc1-00001-g3a38f8a60c65-dirty #1
Stack from 404c9ebc:
         404c9ebc 4029ab28 4029ab28 40088470 41fe0ca0 40299e21 40299df1 404ba2a4
         00020165 00000000 41fd2c10 402c7ba0 41fd2c04 40088504 41fe0ca0 40299e21
         00000000 40088a12 41fe0ca0 41fe0ca4 0000020a 00000000 00000001 402ca000
         00000000 41fe0ca0 41fd2c10 41fd2c10 00000000 00000000 402b2388 00000001
         400a0934 40091056 404c9f44 404c9f44 40088db4 402c7ba0 00000001 41fd2c04
         41fe0ca0 41fd2000 41fe0ca0 40089e02 4026ecf4 40089e4e 41fe0ca0 ffffffff
Call Trace:
         [<40088470>] 0x40088470
  [<40088504>] 0x40088504
  [<40088a12>] 0x40088a12
  [<402ca000>] 0x402ca000
  [<400a0934>] 0x400a0934

         [<40091056>] 0x40091056
  [<40088db4>] 0x40088db4
  [<40089e02>] 0x40089e02
  [<4026ecf4>] 0x4026ecf4
  [<40089e4e>] 0x40089e4e

         [<4008ca26>] 0x4008ca26
  [<4004adf8>] 0x4004adf8
  [<402701ec>] 0x402701ec
  [<4008f25e>] 0x4008f25e
  [<400516f4>] 0x400516f4

         [<4026eec0>] 0x4026eec0
  [<400224f0>] 0x400224f0
  [<402ca000>] 0x402ca000
  [<4026eeda>] 0x4026eeda
  [<40020b00>] 0x40020b00
...

Lots more of them.

...
BUG: Bad page state in process swapper  pfn:201a0
page:41fe1400 refcount:0 mapcount:1 mapping:00000000 index:0x0
flags: 0x0()
raw: 00000000 00000100 00000122 00000000 00000000 00000000 00000000 00000000
page dumped because: nonzero mapcount
CPU: 0 PID: 1 Comm: swapper Tainted: G    B             5.8.0-rc1-00001-g3a38f8a60c65-dirty #1
Stack from 404c9ebc:
         404c9ebc 4029ab28 4029ab28 40088470 41fe1400 40299e21 40299df1 404ba2a4
         000201a0 00000000 41fd2c10 402c7ba0 41fd2c04 40088504 41fe1400 40299e21
         00000000 40088a12 41fe1400 41fe1404 0000020a 0000003b 00000001 40340000
         00000000 41fe1400 41fd2c10 41fd2c10 00000000 00000000 41fe13e0 40022826
         00000044 404c9f44 404c9f44 404c9f44 40088db4 402c7ba0 00000001 41fd2c04
         41fe1400 41fd2000 41fe1400 40089e02 4026ecf4 40089e4e 41fe1400 ffffffff
Call Trace:
         [<40088470>] 0x40088470
  [<40088504>] 0x40088504
  [<40088a12>] 0x40088a12
  [<40022826>] 0x40022826
  [<40088db4>] 0x40088db4

         [<40089e02>] 0x40089e02
  [<4026ecf4>] 0x4026ecf4
  [<40089e4e>] 0x40089e4e
  [<4008ca26>] 0x4008ca26
  [<4004adf8>] 0x4004adf8

         [<402701ec>] 0x402701ec
  [<4008f25e>] 0x4008f25e
  [<400516f4>] 0x400516f4
  [<4026eec0>] 0x4026eec0
  [<400224f0>] 0x400224f0

         [<402ca000>] 0x402ca000
  [<4026eeda>] 0x4026eeda
  [<40020b00>] 0x40020b00
Freeing unused kernel memory: 648K
This architecture does not have kernel memory protection.
Run /init as init process
...

System boots pretty much as normal through user space after this.
Seems to be fully operational despite all those BUGONs.

Specifically this is a M5208EVB target (arch/m68k/configs/m5208evb).


[snip]
> diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
> index b88d510d4fe3..6d3147662ff2 100644
> --- a/arch/m68k/mm/init.c
> +++ b/arch/m68k/mm/init.c
> @@ -84,7 +84,7 @@ void __init paging_init(void)
>  	 * page_alloc get different views of the world.
>  	 */
>  	unsigned long end_mem = memory_end & PAGE_MASK;
> -	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
> +	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
>  
>  	high_memory = (void *) end_mem;
>  
> @@ -98,8 +98,8 @@ void __init paging_init(void)
>  	 */
>  	set_fs (USER_DS);
>  
> -	zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
> -	free_area_init(zones_size);
> +	max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
> +	free_area_init(max_zone_pfn);

This worries me a little. On this target PAGE_OFFSET will be non-0.

Thoughts?

Regards
Greg
Mike Rapoport June 15, 2020, 6:22 a.m. UTC | #3
Hi Greg,

On Mon, Jun 15, 2020 at 01:53:42PM +1000, Greg Ungerer wrote:
> Hi Mike,
> 
> From: Mike Rapoport <rppt@linux.ibm.com>
> > Currently, architectures that use free_area_init() to initialize memory map
> > and node and zone structures need to calculate zone and hole sizes. We can
> > use free_area_init_nodes() instead and let it detect the zone boundaries
> > while the architectures will only have to supply the possible limits for
> > the zones.
> > 
> > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> 
> This is causing some new warnings for me on boot on at least one non-MMU m68k target:

There were a couple of changes that cause this. The free_area_init()
now relies on memblock data and architectural limits for zone sizes
rather than on explisit pfns calculated by the arch code. I've update
motorola variant and missed coldfire. Angelo sent a fix for mcfmmu.c
[1] and I've updated it to include nommu as well

[1] https://lore.kernel.org/linux-m68k/20200614225119.777702-1-angelo.dureghello@timesys.com

From 55b8523df2a5c4565b132c0691990f0821040fec Mon Sep 17 00:00:00 2001
From: Angelo Dureghello <angelo.dureghello@timesys.com>
Date: Mon, 15 Jun 2020 00:51:19 +0200
Subject: [PATCH] m68k: fix registration of memory regions with memblock

Commit 3f08a302f533 ("mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option")
introduced assumption that UMA systems have their memory at node 0 and
updated most of them, but it forgot nommu and coldfire variants of m68k.

The later change in free area initialization in commit fa3354e4ea39 ("mm:
free_area_init: use maximal zone PFNs rather than zone sizes") exposed that
and caused a lot of "BUG: Bad page state in process swapper" reports.

Using memblock_add_node() with nid = 0 to register memory banks solves the
problem.

Fixes: 3f08a302f533 ("mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option")
Fixes: fa3354e4ea39 ("mm: free_area_init: use maximal zone PFNs rather than zone sizes")
Signed-off-by: Angelo Dureghello <angelo.dureghello@timesys.com>
Co-developed-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
 arch/m68k/kernel/setup_no.c | 2 +-
 arch/m68k/mm/mcfmmu.c       | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index e779b19e0193..0c4589a39ba9 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -138,7 +138,7 @@ void __init setup_arch(char **cmdline_p)
 	pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
 		 __bss_stop, memory_start, memory_start, memory_end);
 
-	memblock_add(memory_start, memory_end - memory_start);
+	memblock_add_node(memory_start, memory_end - memory_start, 0);
 
 	/* Keep a copy of command line */
 	*cmdline_p = &command_line[0];
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 29f47923aa46..7d04210d34f0 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -174,7 +174,7 @@ void __init cf_bootmem_alloc(void)
 	m68k_memory[0].addr = _rambase;
 	m68k_memory[0].size = _ramend - _rambase;
 
-	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
 
 	/* compute total pages in system */
 	num_pages = PFN_DOWN(_ramend - _rambase);
Greg Ungerer June 15, 2020, 7:17 a.m. UTC | #4
Hi Mike,

On 15/6/20 4:22 pm, Mike Rapoport wrote:
> On Mon, Jun 15, 2020 at 01:53:42PM +1000, Greg Ungerer wrote:
>> From: Mike Rapoport <rppt@linux.ibm.com>
>>> Currently, architectures that use free_area_init() to initialize memory map
>>> and node and zone structures need to calculate zone and hole sizes. We can
>>> use free_area_init_nodes() instead and let it detect the zone boundaries
>>> while the architectures will only have to supply the possible limits for
>>> the zones.
>>>
>>> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
>>
>> This is causing some new warnings for me on boot on at least one non-MMU m68k target:
> 
> There were a couple of changes that cause this. The free_area_init()
> now relies on memblock data and architectural limits for zone sizes
> rather than on explisit pfns calculated by the arch code. I've update
> motorola variant and missed coldfire. Angelo sent a fix for mcfmmu.c
> [1] and I've updated it to include nommu as well
> 
> [1] https://lore.kernel.org/linux-m68k/20200614225119.777702-1-angelo.dureghello@timesys.com
> 
>>From 55b8523df2a5c4565b132c0691990f0821040fec Mon Sep 17 00:00:00 2001
> From: Angelo Dureghello <angelo.dureghello@timesys.com>
> Date: Mon, 15 Jun 2020 00:51:19 +0200
> Subject: [PATCH] m68k: fix registration of memory regions with memblock
> 
> Commit 3f08a302f533 ("mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option")
> introduced assumption that UMA systems have their memory at node 0 and
> updated most of them, but it forgot nommu and coldfire variants of m68k.
> 
> The later change in free area initialization in commit fa3354e4ea39 ("mm:
> free_area_init: use maximal zone PFNs rather than zone sizes") exposed that
> and caused a lot of "BUG: Bad page state in process swapper" reports.

Even with this patch applied I am still seeing the same messages.

Regards
Greg



> Using memblock_add_node() with nid = 0 to register memory banks solves the
> problem.
> 
> Fixes: 3f08a302f533 ("mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option")
> Fixes: fa3354e4ea39 ("mm: free_area_init: use maximal zone PFNs rather than zone sizes")
> Signed-off-by: Angelo Dureghello <angelo.dureghello@timesys.com>
> Co-developed-by: Mike Rapoport <rppt@linux.ibm.com>
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> ---
>   arch/m68k/kernel/setup_no.c | 2 +-
>   arch/m68k/mm/mcfmmu.c       | 2 +-
>   2 files changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
> index e779b19e0193..0c4589a39ba9 100644
> --- a/arch/m68k/kernel/setup_no.c
> +++ b/arch/m68k/kernel/setup_no.c
> @@ -138,7 +138,7 @@ void __init setup_arch(char **cmdline_p)
>   	pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
>   		 __bss_stop, memory_start, memory_start, memory_end);
>   
> -	memblock_add(memory_start, memory_end - memory_start);
> +	memblock_add_node(memory_start, memory_end - memory_start, 0);
>   
>   	/* Keep a copy of command line */
>   	*cmdline_p = &command_line[0];
> diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
> index 29f47923aa46..7d04210d34f0 100644
> --- a/arch/m68k/mm/mcfmmu.c
> +++ b/arch/m68k/mm/mcfmmu.c
> @@ -174,7 +174,7 @@ void __init cf_bootmem_alloc(void)
>   	m68k_memory[0].addr = _rambase;
>   	m68k_memory[0].size = _ramend - _rambase;
>   
> -	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
> +	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
>   
>   	/* compute total pages in system */
>   	num_pages = PFN_DOWN(_ramend - _rambase);
>
Mike Rapoport June 15, 2020, 8:29 a.m. UTC | #5
(reduced the spam list)

On Mon, Jun 15, 2020 at 05:17:28PM +1000, Greg Ungerer wrote:
> Hi Mike,
> 
> On 15/6/20 4:22 pm, Mike Rapoport wrote:
> > On Mon, Jun 15, 2020 at 01:53:42PM +1000, Greg Ungerer wrote:
> > > From: Mike Rapoport <rppt@linux.ibm.com>
> > > > Currently, architectures that use free_area_init() to initialize memory map
> > > > and node and zone structures need to calculate zone and hole sizes. We can
> > > > use free_area_init_nodes() instead and let it detect the zone boundaries
> > > > while the architectures will only have to supply the possible limits for
> > > > the zones.
> > > > 
> > > > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> > > 
> > > This is causing some new warnings for me on boot on at least one non-MMU m68k target:
> > 
> > There were a couple of changes that cause this. The free_area_init()
> > now relies on memblock data and architectural limits for zone sizes
> > rather than on explisit pfns calculated by the arch code. I've update
> > motorola variant and missed coldfire. Angelo sent a fix for mcfmmu.c
> > [1] and I've updated it to include nommu as well
> > 
> > [1] https://lore.kernel.org/linux-m68k/20200614225119.777702-1-angelo.dureghello@timesys.com
> > 
> > > From 55b8523df2a5c4565b132c0691990f0821040fec Mon Sep 17 00:00:00 2001
> > From: Angelo Dureghello <angelo.dureghello@timesys.com>
> > Date: Mon, 15 Jun 2020 00:51:19 +0200
> > Subject: [PATCH] m68k: fix registration of memory regions with memblock
> > 
> > Commit 3f08a302f533 ("mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option")
> > introduced assumption that UMA systems have their memory at node 0 and
> > updated most of them, but it forgot nommu and coldfire variants of m68k.
> > 
> > The later change in free area initialization in commit fa3354e4ea39 ("mm:
> > free_area_init: use maximal zone PFNs rather than zone sizes") exposed that
> > and caused a lot of "BUG: Bad page state in process swapper" reports.
> 
> Even with this patch applied I am still seeing the same messages.

Argh, it was to early in the morning...
Can you please try the one below?

It seems that coldfire didn't register all its physical memory with
memblock and the pfn list was damaged because of that.


diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index e779b19e0193..f66f4b1d062e 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -138,7 +138,8 @@ void __init setup_arch(char **cmdline_p)
 	pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
 		 __bss_stop, memory_start, memory_start, memory_end);
 
-	memblock_add(memory_start, memory_end - memory_start);
+	memblock_add(_rambase, memory_end - _rambase);
+	memblock_reserve(_rambase, memory_start - _rambase);
 
 	/* Keep a copy of command line */
 	*cmdline_p = &command_line[0];

> Regards
> Greg
> 
> 
> 
> > Using memblock_add_node() with nid = 0 to register memory banks solves the
> > problem.
> > 
> > Fixes: 3f08a302f533 ("mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option")
> > Fixes: fa3354e4ea39 ("mm: free_area_init: use maximal zone PFNs rather than zone sizes")
> > Signed-off-by: Angelo Dureghello <angelo.dureghello@timesys.com>
> > Co-developed-by: Mike Rapoport <rppt@linux.ibm.com>
> > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> > ---
> >   arch/m68k/kernel/setup_no.c | 2 +-
> >   arch/m68k/mm/mcfmmu.c       | 2 +-
> >   2 files changed, 2 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
> > index e779b19e0193..0c4589a39ba9 100644
> > --- a/arch/m68k/kernel/setup_no.c
> > +++ b/arch/m68k/kernel/setup_no.c
> > @@ -138,7 +138,7 @@ void __init setup_arch(char **cmdline_p)
> >   	pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
> >   		 __bss_stop, memory_start, memory_start, memory_end);
> > -	memblock_add(memory_start, memory_end - memory_start);
> > +	memblock_add_node(memory_start, memory_end - memory_start, 0);
> >   	/* Keep a copy of command line */
> >   	*cmdline_p = &command_line[0];
> > diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
> > index 29f47923aa46..7d04210d34f0 100644
> > --- a/arch/m68k/mm/mcfmmu.c
> > +++ b/arch/m68k/mm/mcfmmu.c
> > @@ -174,7 +174,7 @@ void __init cf_bootmem_alloc(void)
> >   	m68k_memory[0].addr = _rambase;
> >   	m68k_memory[0].size = _ramend - _rambase;
> > -	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
> > +	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
> >   	/* compute total pages in system */
> >   	num_pages = PFN_DOWN(_ramend - _rambase);
> >
Greg Ungerer June 15, 2020, 1:02 p.m. UTC | #6
Hi Mike,

On 15/6/20 6:29 pm, Mike Rapoport wrote:
> (reduced the spam list)
> 
> On Mon, Jun 15, 2020 at 05:17:28PM +1000, Greg Ungerer wrote:
>> On 15/6/20 4:22 pm, Mike Rapoport wrote:
>>> On Mon, Jun 15, 2020 at 01:53:42PM +1000, Greg Ungerer wrote:
>>>> From: Mike Rapoport <rppt@linux.ibm.com>
>>>>> Currently, architectures that use free_area_init() to initialize memory map
>>>>> and node and zone structures need to calculate zone and hole sizes. We can
>>>>> use free_area_init_nodes() instead and let it detect the zone boundaries
>>>>> while the architectures will only have to supply the possible limits for
>>>>> the zones.
>>>>>
>>>>> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
>>>>
>>>> This is causing some new warnings for me on boot on at least one non-MMU m68k target:
>>>
>>> There were a couple of changes that cause this. The free_area_init()
>>> now relies on memblock data and architectural limits for zone sizes
>>> rather than on explisit pfns calculated by the arch code. I've update
>>> motorola variant and missed coldfire. Angelo sent a fix for mcfmmu.c
>>> [1] and I've updated it to include nommu as well
>>>
>>> [1] https://lore.kernel.org/linux-m68k/20200614225119.777702-1-angelo.dureghello@timesys.com
>>>
>>>>  From 55b8523df2a5c4565b132c0691990f0821040fec Mon Sep 17 00:00:00 2001
>>> From: Angelo Dureghello <angelo.dureghello@timesys.com>
>>> Date: Mon, 15 Jun 2020 00:51:19 +0200
>>> Subject: [PATCH] m68k: fix registration of memory regions with memblock
>>>
>>> Commit 3f08a302f533 ("mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option")
>>> introduced assumption that UMA systems have their memory at node 0 and
>>> updated most of them, but it forgot nommu and coldfire variants of m68k.
>>>
>>> The later change in free area initialization in commit fa3354e4ea39 ("mm:
>>> free_area_init: use maximal zone PFNs rather than zone sizes") exposed that
>>> and caused a lot of "BUG: Bad page state in process swapper" reports.
>>
>> Even with this patch applied I am still seeing the same messages.
> 
> Argh, it was to early in the morning...
> Can you please try the one below?
> 
> It seems that coldfire didn't register all its physical memory with
> memblock and the pfn list was damaged because of that.
> 
> 
> diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
> index e779b19e0193..f66f4b1d062e 100644
> --- a/arch/m68k/kernel/setup_no.c
> +++ b/arch/m68k/kernel/setup_no.c
> @@ -138,7 +138,8 @@ void __init setup_arch(char **cmdline_p)
>   	pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
>   		 __bss_stop, memory_start, memory_start, memory_end);
>   
> -	memblock_add(memory_start, memory_end - memory_start);
> +	memblock_add(_rambase, memory_end - _rambase);
> +	memblock_reserve(_rambase, memory_start - _rambase);
>   
>   	/* Keep a copy of command line */
>   	*cmdline_p = &command_line[0];

Yep, thats got it. Boots clean again with this one.

Regards
Greg
diff mbox series

Patch

diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 12e218d3792a..667cd21393b5 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -243,21 +243,17 @@  callback_init(void * kernel_end)
  */
 void __init paging_init(void)
 {
-	unsigned long zones_size[MAX_NR_ZONES] = {0, };
-	unsigned long dma_pfn, high_pfn;
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
+	unsigned long dma_pfn;
 
 	dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-	high_pfn = max_pfn = max_low_pfn;
+	max_pfn = max_low_pfn;
 
-	if (dma_pfn >= high_pfn)
-		zones_size[ZONE_DMA] = high_pfn;
-	else {
-		zones_size[ZONE_DMA] = dma_pfn;
-		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
-	}
+	max_zone_pfn[ZONE_DMA] = dma_pfn;
+	max_zone_pfn[ZONE_NORMAL] = max_pfn;
 
 	/* Initialize mem_map[].  */
-	free_area_init(zones_size);
+	free_area_init(max_zone_pfn);
 
 	/* Initialize the kernel's ZERO_PGE. */
 	memset((void *)ZERO_PGE, 0, PAGE_SIZE);
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index 9b374393a8f4..a97e51a3e26d 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -33,7 +33,7 @@  EXPORT_SYMBOL(empty_zero_page);
 void __init paging_init(void)
 {
 	struct pglist_data *pgdat = NODE_DATA(0);
-	unsigned long zones_size[MAX_NR_ZONES] = {0, };
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
 
 	empty_zero_page      = (unsigned long) memblock_alloc(PAGE_SIZE,
 							      PAGE_SIZE);
@@ -49,11 +49,9 @@  void __init paging_init(void)
 	/*
 	 * Define zones
 	 */
-	zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
-	pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
-		__pa(PAGE_OFFSET) >> PAGE_SHIFT;
+	max_zone_pfn[ZONE_NORMAL] = memory_end >> PAGE_SHIFT;
 
-	free_area_init(zones_size);
+	free_area_init(max_zone_pfn);
 }
 
 void __init mem_init(void)
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 1eab16b1a0bc..27a0020e3771 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -83,10 +83,10 @@  void __init paging_init(void)
 		 start_mem, end_mem);
 
 	{
-		unsigned long zones_size[MAX_NR_ZONES] = {0, };
+		unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
 
-		zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
-		free_area_init(zones_size);
+		max_zone_pfn[ZONE_NORMAL] = end_mem >> PAGE_SHIFT;
+		free_area_init(max_zone_pfn);
 	}
 }
 
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index c961773a6fff..f2e6c868e477 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -91,7 +91,7 @@  void sync_icache_dcache(pte_t pte)
  */
 void __init paging_init(void)
 {
-	unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
 
 	/*
 	 *  This is not particularly well documented anywhere, but
@@ -101,9 +101,9 @@  void __init paging_init(void)
 	 *  adjust accordingly.
 	 */
 
-	zones_sizes[ZONE_NORMAL] = max_low_pfn;
+	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
 
-	free_area_init(zones_sizes);  /*  sets up the zonelists and mem_map  */
+	free_area_init(max_zone_pfn);  /*  sets up the zonelists and mem_map  */
 
 	/*
 	 * Start of high memory area.  Will probably need something more
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index b88d510d4fe3..6d3147662ff2 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -84,7 +84,7 @@  void __init paging_init(void)
 	 * page_alloc get different views of the world.
 	 */
 	unsigned long end_mem = memory_end & PAGE_MASK;
-	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
 
 	high_memory = (void *) end_mem;
 
@@ -98,8 +98,8 @@  void __init paging_init(void)
 	 */
 	set_fs (USER_DS);
 
-	zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
-	free_area_init(zones_size);
+	max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
+	free_area_init(max_zone_pfn);
 }
 
 #endif /* CONFIG_MMU */
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 0ea375607767..80064e6d064f 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -39,7 +39,7 @@  void __init paging_init(void)
 	pte_t *pg_table;
 	unsigned long address, size;
 	unsigned long next_pgtable, bootmem_end;
-	unsigned long zones_size[MAX_NR_ZONES];
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
 	enum zone_type zone;
 	int i;
 
@@ -80,11 +80,8 @@  void __init paging_init(void)
 	}
 
 	current->mm = NULL;
-
-	for (zone = 0; zone < MAX_NR_ZONES; zone++)
-		zones_size[zone] = 0x0;
-	zones_size[ZONE_DMA] = num_pages;
-	free_area_init(zones_size);
+	max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
+	free_area_init(max_zone_pfn);
 }
 
 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 0be3833f6814..91147cca4b64 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -31,16 +31,13 @@  EXPORT_SYMBOL(empty_zero_page);
 
 static void __init zone_sizes_init(void)
 {
-	unsigned long zones_size[MAX_NR_ZONES];
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
 
-	/* Clear the zone sizes */
-	memset(zones_size, 0, sizeof(zones_size));
-
-	zones_size[ZONE_NORMAL] = max_low_pfn;
+	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
 #ifdef CONFIG_HIGHMEM
-	zones_size[ZONE_HIGHMEM] = max_pfn;
+	max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
 #endif
-	free_area_init(zones_size);
+	free_area_init(max_zone_pfn);
 
 }
 
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 2c609c2516b2..9afca77d10b1 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -46,17 +46,15 @@  pgd_t *pgd_current;
  */
 void __init paging_init(void)
 {
-	unsigned long zones_size[MAX_NR_ZONES];
-
-	memset(zones_size, 0, sizeof(zones_size));
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
 
 	pagetable_init();
 	pgd_current = swapper_pg_dir;
 
-	zones_size[ZONE_NORMAL] = max_mapnr;
+	max_zone_pfn[ZONE_NORMAL] = max_mapnr;
 
 	/* pass the memory from the bootmem allocator to the main allocator */
-	free_area_init(zones_size);
+	free_area_init(max_zone_pfn);
 
 	flush_dcache_range((unsigned long)empty_zero_page,
 			(unsigned long)empty_zero_page + PAGE_SIZE);
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 1f87b524db78..f94fe6d3f499 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -45,17 +45,14 @@  DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 static void __init zone_sizes_init(void)
 {
-	unsigned long zones_size[MAX_NR_ZONES];
-
-	/* Clear the zone sizes */
-	memset(zones_size, 0, sizeof(zones_size));
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
 
 	/*
 	 * We use only ZONE_NORMAL
 	 */
-	zones_size[ZONE_NORMAL] = max_low_pfn;
+	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
 
-	free_area_init(zones_size);
+	free_area_init(max_zone_pfn);
 }
 
 extern const char _s_kernel_ro[], _e_kernel_ro[];
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 30885d0b94ac..401b22f14743 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -158,8 +158,8 @@  static void __init fixaddr_user_init( void)
 
 void __init paging_init(void)
 {
-	unsigned long zones_size[MAX_NR_ZONES], vaddr;
-	int i;
+	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
+	unsigned long vaddr;
 
 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
 							       PAGE_SIZE);
@@ -167,12 +167,8 @@  void __init paging_init(void)
 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
 		      __func__, PAGE_SIZE, PAGE_SIZE);
 
-	for (i = 0; i < ARRAY_SIZE(zones_size); i++)
-		zones_size[i] = 0;
-
-	zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
-		(uml_physmem >> PAGE_SHIFT);
-	free_area_init(zones_size);
+	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
+	free_area_init(max_zone_pfn);
 
 	/*
 	 * Fixed mappings, only the page table structure has to be
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5903bbbdb336..d9a256a97ac5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2272,7 +2272,7 @@  static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
 }
 
 extern void __init pagecache_init(void);
-extern void free_area_init(unsigned long * zones_size);
+extern void free_area_init(unsigned long * max_zone_pfn);
 extern void __init free_area_init_node(int nid, unsigned long * zones_size,
 		unsigned long zone_start_pfn, unsigned long *zholes_size);
 extern void free_initmem(void);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4530e9cfd9f7..530701b38bc7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7700,11 +7700,10 @@  void __init set_dma_reserve(unsigned long new_dma_reserve)
 	dma_reserve = new_dma_reserve;
 }
 
-void __init free_area_init(unsigned long *zones_size)
+void __init free_area_init(unsigned long *max_zone_pfn)
 {
 	init_unavailable_mem();
-	free_area_init_node(0, zones_size,
-			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
+	free_area_init_nodes(max_zone_pfn);
 }
 
 static int page_alloc_cpu_dead(unsigned int cpu)