diff mbox series

[mm,v3,2/6] mm: Drop meminit_pfn_in_nid as it is redundant

Message ID 20181015202703.2171.40829.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show
Series Deferred page init improvements | expand

Commit Message

Alexander Duyck Oct. 15, 2018, 8:27 p.m. UTC
As best as I can tell the meminit_pfn_in_nid call is completely redundant.
The deferred memory initialization is already making use of
for_each_free_mem_range which in turn will call into __next_mem_range which
will only return a memory range if it matches the node ID provided assuming
it is not NUMA_NO_NODE.

I am operating on the assumption that there are no zones or pgdata_t
structures that have a NUMA node of NUMA_NO_NODE associated with them. If
that is the case then __next_mem_range will never return a memory range
that doesn't match the zone's node ID and as such the check is redundant.

So one piece I would like to verfy on this is if this works for ia64.
Technically it was using a different approach to get the node ID, but it
seems to have the node ID also encoded into the memblock. So I am
assuming this is okay, but would like to get confirmation on that.

Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
---
 mm/page_alloc.c |   50 ++++++++++++++------------------------------------
 1 file changed, 14 insertions(+), 36 deletions(-)

Comments

Pavel Tatashin Oct. 16, 2018, 8:33 p.m. UTC | #1
On 10/15/18 4:27 PM, Alexander Duyck wrote:
> As best as I can tell the meminit_pfn_in_nid call is completely redundant.
> The deferred memory initialization is already making use of
> for_each_free_mem_range which in turn will call into __next_mem_range which
> will only return a memory range if it matches the node ID provided assuming
> it is not NUMA_NO_NODE.
> 
> I am operating on the assumption that there are no zones or pgdata_t
> structures that have a NUMA node of NUMA_NO_NODE associated with them. If
> that is the case then __next_mem_range will never return a memory range
> that doesn't match the zone's node ID and as such the check is redundant.
> 
> So one piece I would like to verfy on this is if this works for ia64.
> Technically it was using a different approach to get the node ID, but it
> seems to have the node ID also encoded into the memblock. So I am
> assuming this is okay, but would like to get confirmation on that.
> 
> Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>

If I am not mistaken, this code is for systems with memory interleaving.
Quick looks shows that x86, powerpc, s390, and sparc have it set.

I am not sure about other arches, but at least on SPARC, there are some
processors with memory interleaving feature:

http://www.fujitsu.com/global/products/computing/servers/unix/sparc-enterprise/technology/performance/memory.html

Pavel


> ---
>  mm/page_alloc.c |   50 ++++++++++++++------------------------------------
>  1 file changed, 14 insertions(+), 36 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 4bd858d1c3ba..a766a15fad81 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1301,36 +1301,22 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
>  #endif
>  
>  #ifdef CONFIG_NODES_SPAN_OTHER_NODES
> -static inline bool __meminit __maybe_unused
> -meminit_pfn_in_nid(unsigned long pfn, int node,
> -		   struct mminit_pfnnid_cache *state)
> +/* Only safe to use early in boot when initialisation is single-threaded */
> +static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
>  {
>  	int nid;
>  
> -	nid = __early_pfn_to_nid(pfn, state);
> +	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
>  	if (nid >= 0 && nid != node)
>  		return false;
>  	return true;
>  }
>  
> -/* Only safe to use early in boot when initialisation is single-threaded */
> -static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
> -{
> -	return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
> -}
> -
>  #else
> -
>  static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
>  {
>  	return true;
>  }
> -static inline bool __meminit  __maybe_unused
> -meminit_pfn_in_nid(unsigned long pfn, int node,
> -		   struct mminit_pfnnid_cache *state)
> -{
> -	return true;
> -}
>  #endif
>  
>  
> @@ -1459,21 +1445,13 @@ static inline void __init pgdat_init_report_one_done(void)
>   *
>   * Then, we check if a current large page is valid by only checking the validity
>   * of the head pfn.
> - *
> - * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
> - * within a node: a pfn is between start and end of a node, but does not belong
> - * to this memory node.
>   */
> -static inline bool __init
> -deferred_pfn_valid(int nid, unsigned long pfn,
> -		   struct mminit_pfnnid_cache *nid_init_state)
> +static inline bool __init deferred_pfn_valid(unsigned long pfn)
>  {
>  	if (!pfn_valid_within(pfn))
>  		return false;
>  	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
>  		return false;
> -	if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
> -		return false;
>  	return true;
>  }
>  
> @@ -1481,15 +1459,14 @@ static inline void __init pgdat_init_report_one_done(void)
>   * Free pages to buddy allocator. Try to free aligned pages in
>   * pageblock_nr_pages sizes.
>   */
> -static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
> +static void __init deferred_free_pages(unsigned long pfn,
>  				       unsigned long end_pfn)
>  {
> -	struct mminit_pfnnid_cache nid_init_state = { };
>  	unsigned long nr_pgmask = pageblock_nr_pages - 1;
>  	unsigned long nr_free = 0;
>  
>  	for (; pfn < end_pfn; pfn++) {
> -		if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
> +		if (!deferred_pfn_valid(pfn)) {
>  			deferred_free_range(pfn - nr_free, nr_free);
>  			nr_free = 0;
>  		} else if (!(pfn & nr_pgmask)) {
> @@ -1509,17 +1486,18 @@ static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
>   * by performing it only once every pageblock_nr_pages.
>   * Return number of pages initialized.
>   */
> -static unsigned long  __init deferred_init_pages(int nid, int zid,
> +static unsigned long  __init deferred_init_pages(struct zone *zone,
>  						 unsigned long pfn,
>  						 unsigned long end_pfn)
>  {
> -	struct mminit_pfnnid_cache nid_init_state = { };
>  	unsigned long nr_pgmask = pageblock_nr_pages - 1;
> +	int nid = zone_to_nid(zone);
>  	unsigned long nr_pages = 0;
> +	int zid = zone_idx(zone);
>  	struct page *page = NULL;
>  
>  	for (; pfn < end_pfn; pfn++) {
> -		if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
> +		if (!deferred_pfn_valid(pfn)) {
>  			page = NULL;
>  			continue;
>  		} else if (!page || !(pfn & nr_pgmask)) {
> @@ -1582,12 +1560,12 @@ static int __init deferred_init_memmap(void *data)
>  	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
>  		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
>  		epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
> -		nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
> +		nr_pages += deferred_init_pages(zone, spfn, epfn);
>  	}
>  	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
>  		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
>  		epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
> -		deferred_free_pages(nid, zid, spfn, epfn);
> +		deferred_free_pages(spfn, epfn);
>  	}
>  	pgdat_resize_unlock(pgdat, &flags);
>  
> @@ -1676,7 +1654,7 @@ static int __init deferred_init_memmap(void *data)
>  		while (spfn < epfn && nr_pages < nr_pages_needed) {
>  			t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
>  			first_deferred_pfn = min(t, epfn);
> -			nr_pages += deferred_init_pages(nid, zid, spfn,
> +			nr_pages += deferred_init_pages(zone, spfn,
>  							first_deferred_pfn);
>  			spfn = first_deferred_pfn;
>  		}
> @@ -1688,7 +1666,7 @@ static int __init deferred_init_memmap(void *data)
>  	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
>  		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
>  		epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
> -		deferred_free_pages(nid, zid, spfn, epfn);
> +		deferred_free_pages(spfn, epfn);
>  
>  		if (first_deferred_pfn == epfn)
>  			break;
>
Alexander Duyck Oct. 16, 2018, 8:49 p.m. UTC | #2
On 10/16/2018 1:33 PM, Pavel Tatashin wrote:
> 
> 
> On 10/15/18 4:27 PM, Alexander Duyck wrote:
>> As best as I can tell the meminit_pfn_in_nid call is completely redundant.
>> The deferred memory initialization is already making use of
>> for_each_free_mem_range which in turn will call into __next_mem_range which
>> will only return a memory range if it matches the node ID provided assuming
>> it is not NUMA_NO_NODE.
>>
>> I am operating on the assumption that there are no zones or pgdata_t
>> structures that have a NUMA node of NUMA_NO_NODE associated with them. If
>> that is the case then __next_mem_range will never return a memory range
>> that doesn't match the zone's node ID and as such the check is redundant.
>>
>> So one piece I would like to verfy on this is if this works for ia64.
>> Technically it was using a different approach to get the node ID, but it
>> seems to have the node ID also encoded into the memblock. So I am
>> assuming this is okay, but would like to get confirmation on that.
>>
>> Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
> 
> If I am not mistaken, this code is for systems with memory interleaving.
> Quick looks shows that x86, powerpc, s390, and sparc have it set.
> 
> I am not sure about other arches, but at least on SPARC, there are some
> processors with memory interleaving feature:
> 
> http://www.fujitsu.com/global/products/computing/servers/unix/sparc-enterprise/technology/performance/memory.html
> 
> Pavel

I get what it is for. However as best I can tell the check is actually 
redundant. In the case of the deferred page initialization we are 
already pulling the memory regions via "for_each_free_mem_range". That 
function is already passed a NUMA node ID. Because of that we are 
already checking the memory range to determine if it is in the node or 
not. As such it doesn't really make sense to go through for each PFN and 
then go back to the memory range and see if the node matches or not.

You can take a look at __next_mem_range which is called by 
for_each_free_mem_range and passed &memblock.memory and 
&memblock.reserved to avoid:
https://elixir.bootlin.com/linux/latest/source/mm/memblock.c#L899

Then you can work your way through:
meminit_pfn_in_nid(pfn, node, state)
  __early_pfn_to_nid(pfn, state)
   memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn)
    memblock_search(&memblock.memory, pfn)

 From what I can tell the deferred init is going back through the 
memblock.memory list we pulled this range from and just validating it 
against itself. This makes sense for the standard init as that is just 
going from start_pfn->end_pfn, but for the deferred init we are pulling 
the memory ranges ahead of time so we shouldn't need to re-validate the 
memory that is contained within that range.
Pavel Tatashin Oct. 16, 2018, 9:06 p.m. UTC | #3
On 10/16/18 4:49 PM, Alexander Duyck wrote:
> On 10/16/2018 1:33 PM, Pavel Tatashin wrote:
>>
>>
>> On 10/15/18 4:27 PM, Alexander Duyck wrote:
>>> As best as I can tell the meminit_pfn_in_nid call is completely
>>> redundant.
>>> The deferred memory initialization is already making use of
>>> for_each_free_mem_range which in turn will call into __next_mem_range
>>> which
>>> will only return a memory range if it matches the node ID provided
>>> assuming
>>> it is not NUMA_NO_NODE.
>>>
>>> I am operating on the assumption that there are no zones or pgdata_t
>>> structures that have a NUMA node of NUMA_NO_NODE associated with
>>> them. If
>>> that is the case then __next_mem_range will never return a memory range
>>> that doesn't match the zone's node ID and as such the check is
>>> redundant.
>>>
>>> So one piece I would like to verfy on this is if this works for ia64.
>>> Technically it was using a different approach to get the node ID, but it
>>> seems to have the node ID also encoded into the memblock. So I am
>>> assuming this is okay, but would like to get confirmation on that.
>>>
>>> Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
>>
>> If I am not mistaken, this code is for systems with memory interleaving.
>> Quick looks shows that x86, powerpc, s390, and sparc have it set.
>>
>> I am not sure about other arches, but at least on SPARC, there are some
>> processors with memory interleaving feature:
>>
>> http://www.fujitsu.com/global/products/computing/servers/unix/sparc-enterprise/technology/performance/memory.html
>>
>>
>> Pavel
> 
> I get what it is for. However as best I can tell the check is actually
> redundant. In the case of the deferred page initialization we are
> already pulling the memory regions via "for_each_free_mem_range". That
> function is already passed a NUMA node ID. Because of that we are
> already checking the memory range to determine if it is in the node or
> not. As such it doesn't really make sense to go through for each PFN and
> then go back to the memory range and see if the node matches or not.
> 

Agree, it looks redundant, nice clean-up, I like it.

Reviewed-by: Pavel Tatashin <pavel.tatashin@microsoft.com>

Thank you,
Pavel


> You can take a look at __next_mem_range which is called by
> for_each_free_mem_range and passed &memblock.memory and
> &memblock.reserved to avoid:
> https://elixir.bootlin.com/linux/latest/source/mm/memblock.c#L899
> 
> Then you can work your way through:
> meminit_pfn_in_nid(pfn, node, state)
>  __early_pfn_to_nid(pfn, state)
>   memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn)
>    memblock_search(&memblock.memory, pfn)
> 
> From what I can tell the deferred init is going back through the
> memblock.memory list we pulled this range from and just validating it
> against itself. This makes sense for the standard init as that is just
> going from start_pfn->end_pfn, but for the deferred init we are pulling
> the memory ranges ahead of time so we shouldn't need to re-validate the
> memory that is contained within that range.
Michal Hocko Oct. 17, 2018, 9:04 a.m. UTC | #4
On Mon 15-10-18 13:27:03, Alexander Duyck wrote:
> As best as I can tell the meminit_pfn_in_nid call is completely redundant.
> The deferred memory initialization is already making use of
> for_each_free_mem_range which in turn will call into __next_mem_range which
> will only return a memory range if it matches the node ID provided assuming
> it is not NUMA_NO_NODE.
> 
> I am operating on the assumption that there are no zones or pgdata_t
> structures that have a NUMA node of NUMA_NO_NODE associated with them. If
> that is the case then __next_mem_range will never return a memory range
> that doesn't match the zone's node ID and as such the check is redundant.
> 
> So one piece I would like to verfy on this is if this works for ia64.
> Technically it was using a different approach to get the node ID, but it
> seems to have the node ID also encoded into the memblock. So I am
> assuming this is okay, but would like to get confirmation on that.

Good catch. Both for_each_free_mem_range and __early_pfn_to_nid rely on
the memblock layer to properly map ranges to nids. I haven't checked too
closely whether this was really necessary in the original deferred
implementatiob by Mel but it is much more clear that it is not needed
now with the clear iterator.

> Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  mm/page_alloc.c |   50 ++++++++++++++------------------------------------
>  1 file changed, 14 insertions(+), 36 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 4bd858d1c3ba..a766a15fad81 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1301,36 +1301,22 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
>  #endif
>  
>  #ifdef CONFIG_NODES_SPAN_OTHER_NODES
> -static inline bool __meminit __maybe_unused
> -meminit_pfn_in_nid(unsigned long pfn, int node,
> -		   struct mminit_pfnnid_cache *state)
> +/* Only safe to use early in boot when initialisation is single-threaded */
> +static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
>  {
>  	int nid;
>  
> -	nid = __early_pfn_to_nid(pfn, state);
> +	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
>  	if (nid >= 0 && nid != node)
>  		return false;
>  	return true;
>  }
>  
> -/* Only safe to use early in boot when initialisation is single-threaded */
> -static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
> -{
> -	return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
> -}
> -
>  #else
> -
>  static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
>  {
>  	return true;
>  }
> -static inline bool __meminit  __maybe_unused
> -meminit_pfn_in_nid(unsigned long pfn, int node,
> -		   struct mminit_pfnnid_cache *state)
> -{
> -	return true;
> -}
>  #endif
>  
>  
> @@ -1459,21 +1445,13 @@ static inline void __init pgdat_init_report_one_done(void)
>   *
>   * Then, we check if a current large page is valid by only checking the validity
>   * of the head pfn.
> - *
> - * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
> - * within a node: a pfn is between start and end of a node, but does not belong
> - * to this memory node.
>   */
> -static inline bool __init
> -deferred_pfn_valid(int nid, unsigned long pfn,
> -		   struct mminit_pfnnid_cache *nid_init_state)
> +static inline bool __init deferred_pfn_valid(unsigned long pfn)
>  {
>  	if (!pfn_valid_within(pfn))
>  		return false;
>  	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
>  		return false;
> -	if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
> -		return false;
>  	return true;
>  }
>  
> @@ -1481,15 +1459,14 @@ static inline void __init pgdat_init_report_one_done(void)
>   * Free pages to buddy allocator. Try to free aligned pages in
>   * pageblock_nr_pages sizes.
>   */
> -static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
> +static void __init deferred_free_pages(unsigned long pfn,
>  				       unsigned long end_pfn)
>  {
> -	struct mminit_pfnnid_cache nid_init_state = { };
>  	unsigned long nr_pgmask = pageblock_nr_pages - 1;
>  	unsigned long nr_free = 0;
>  
>  	for (; pfn < end_pfn; pfn++) {
> -		if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
> +		if (!deferred_pfn_valid(pfn)) {
>  			deferred_free_range(pfn - nr_free, nr_free);
>  			nr_free = 0;
>  		} else if (!(pfn & nr_pgmask)) {
> @@ -1509,17 +1486,18 @@ static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
>   * by performing it only once every pageblock_nr_pages.
>   * Return number of pages initialized.
>   */
> -static unsigned long  __init deferred_init_pages(int nid, int zid,
> +static unsigned long  __init deferred_init_pages(struct zone *zone,
>  						 unsigned long pfn,
>  						 unsigned long end_pfn)
>  {
> -	struct mminit_pfnnid_cache nid_init_state = { };
>  	unsigned long nr_pgmask = pageblock_nr_pages - 1;
> +	int nid = zone_to_nid(zone);
>  	unsigned long nr_pages = 0;
> +	int zid = zone_idx(zone);
>  	struct page *page = NULL;
>  
>  	for (; pfn < end_pfn; pfn++) {
> -		if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
> +		if (!deferred_pfn_valid(pfn)) {
>  			page = NULL;
>  			continue;
>  		} else if (!page || !(pfn & nr_pgmask)) {
> @@ -1582,12 +1560,12 @@ static int __init deferred_init_memmap(void *data)
>  	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
>  		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
>  		epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
> -		nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
> +		nr_pages += deferred_init_pages(zone, spfn, epfn);
>  	}
>  	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
>  		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
>  		epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
> -		deferred_free_pages(nid, zid, spfn, epfn);
> +		deferred_free_pages(spfn, epfn);
>  	}
>  	pgdat_resize_unlock(pgdat, &flags);
>  
> @@ -1676,7 +1654,7 @@ static int __init deferred_init_memmap(void *data)
>  		while (spfn < epfn && nr_pages < nr_pages_needed) {
>  			t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
>  			first_deferred_pfn = min(t, epfn);
> -			nr_pages += deferred_init_pages(nid, zid, spfn,
> +			nr_pages += deferred_init_pages(zone, spfn,
>  							first_deferred_pfn);
>  			spfn = first_deferred_pfn;
>  		}
> @@ -1688,7 +1666,7 @@ static int __init deferred_init_memmap(void *data)
>  	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
>  		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
>  		epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
> -		deferred_free_pages(nid, zid, spfn, epfn);
> +		deferred_free_pages(spfn, epfn);
>  
>  		if (first_deferred_pfn == epfn)
>  			break;
>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4bd858d1c3ba..a766a15fad81 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1301,36 +1301,22 @@  int __meminit early_pfn_to_nid(unsigned long pfn)
 #endif
 
 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
-static inline bool __meminit __maybe_unused
-meminit_pfn_in_nid(unsigned long pfn, int node,
-		   struct mminit_pfnnid_cache *state)
+/* Only safe to use early in boot when initialisation is single-threaded */
+static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
 {
 	int nid;
 
-	nid = __early_pfn_to_nid(pfn, state);
+	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
 	if (nid >= 0 && nid != node)
 		return false;
 	return true;
 }
 
-/* Only safe to use early in boot when initialisation is single-threaded */
-static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
-{
-	return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
-}
-
 #else
-
 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
 {
 	return true;
 }
-static inline bool __meminit  __maybe_unused
-meminit_pfn_in_nid(unsigned long pfn, int node,
-		   struct mminit_pfnnid_cache *state)
-{
-	return true;
-}
 #endif
 
 
@@ -1459,21 +1445,13 @@  static inline void __init pgdat_init_report_one_done(void)
  *
  * Then, we check if a current large page is valid by only checking the validity
  * of the head pfn.
- *
- * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
- * within a node: a pfn is between start and end of a node, but does not belong
- * to this memory node.
  */
-static inline bool __init
-deferred_pfn_valid(int nid, unsigned long pfn,
-		   struct mminit_pfnnid_cache *nid_init_state)
+static inline bool __init deferred_pfn_valid(unsigned long pfn)
 {
 	if (!pfn_valid_within(pfn))
 		return false;
 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
 		return false;
-	if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
-		return false;
 	return true;
 }
 
@@ -1481,15 +1459,14 @@  static inline void __init pgdat_init_report_one_done(void)
  * Free pages to buddy allocator. Try to free aligned pages in
  * pageblock_nr_pages sizes.
  */
-static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
+static void __init deferred_free_pages(unsigned long pfn,
 				       unsigned long end_pfn)
 {
-	struct mminit_pfnnid_cache nid_init_state = { };
 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
 	unsigned long nr_free = 0;
 
 	for (; pfn < end_pfn; pfn++) {
-		if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
+		if (!deferred_pfn_valid(pfn)) {
 			deferred_free_range(pfn - nr_free, nr_free);
 			nr_free = 0;
 		} else if (!(pfn & nr_pgmask)) {
@@ -1509,17 +1486,18 @@  static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
  * by performing it only once every pageblock_nr_pages.
  * Return number of pages initialized.
  */
-static unsigned long  __init deferred_init_pages(int nid, int zid,
+static unsigned long  __init deferred_init_pages(struct zone *zone,
 						 unsigned long pfn,
 						 unsigned long end_pfn)
 {
-	struct mminit_pfnnid_cache nid_init_state = { };
 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
+	int nid = zone_to_nid(zone);
 	unsigned long nr_pages = 0;
+	int zid = zone_idx(zone);
 	struct page *page = NULL;
 
 	for (; pfn < end_pfn; pfn++) {
-		if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
+		if (!deferred_pfn_valid(pfn)) {
 			page = NULL;
 			continue;
 		} else if (!page || !(pfn & nr_pgmask)) {
@@ -1582,12 +1560,12 @@  static int __init deferred_init_memmap(void *data)
 	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
 		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
 		epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
-		nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
+		nr_pages += deferred_init_pages(zone, spfn, epfn);
 	}
 	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
 		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
 		epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
-		deferred_free_pages(nid, zid, spfn, epfn);
+		deferred_free_pages(spfn, epfn);
 	}
 	pgdat_resize_unlock(pgdat, &flags);
 
@@ -1676,7 +1654,7 @@  static int __init deferred_init_memmap(void *data)
 		while (spfn < epfn && nr_pages < nr_pages_needed) {
 			t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
 			first_deferred_pfn = min(t, epfn);
-			nr_pages += deferred_init_pages(nid, zid, spfn,
+			nr_pages += deferred_init_pages(zone, spfn,
 							first_deferred_pfn);
 			spfn = first_deferred_pfn;
 		}
@@ -1688,7 +1666,7 @@  static int __init deferred_init_memmap(void *data)
 	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
 		spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
 		epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
-		deferred_free_pages(nid, zid, spfn, epfn);
+		deferred_free_pages(spfn, epfn);
 
 		if (first_deferred_pfn == epfn)
 			break;