diff mbox series

[v2] mm, sparse: drop pgdat_resize_lock in sparse_add/remove_one_section()

Message ID 20181128091243.19249-1-richard.weiyang@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v2] mm, sparse: drop pgdat_resize_lock in sparse_add/remove_one_section() | expand

Commit Message

Wei Yang Nov. 28, 2018, 9:12 a.m. UTC
In function sparse_add/remove_one_section(), pgdat_resize_lock is used
to protect initialization/release of one mem_section. This looks not
necessary for current implementation.

Following is the current call trace of sparse_add/remove_one_section()

    mem_hotplug_begin()
    arch_add_memory()
       add_pages()
           __add_pages()
               __add_section()
                   sparse_add_one_section()
    mem_hotplug_done()

    mem_hotplug_begin()
    arch_remove_memory()
        __remove_pages()
            __remove_section()
                sparse_remove_one_section()
    mem_hotplug_done()

which shows these functions is protected by the global mem_hotplug_lock.
It won't face contention when accessing the mem_section.

Since the information needed in sparse_add_one_section() is node id to
allocate proper memory. This patch also changes the prototype of
sparse_add_one_section() to pass node id directly. This is intended to
reduce misleading that sparse_add_one_section() would touch pgdat.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>

---
v2:
   * adjust changelog to show this procedure is serialized by global
     mem_hotplug_lock
---
 include/linux/memory_hotplug.h |  2 +-
 mm/memory_hotplug.c            |  2 +-
 mm/sparse.c                    | 17 +++++------------
 3 files changed, 7 insertions(+), 14 deletions(-)

Comments

David Hildenbrand Nov. 28, 2018, 10:28 a.m. UTC | #1
On 28.11.18 10:12, Wei Yang wrote:
> In function sparse_add/remove_one_section(), pgdat_resize_lock is used
> to protect initialization/release of one mem_section. This looks not
> necessary for current implementation.
> 
> Following is the current call trace of sparse_add/remove_one_section()
> 
>     mem_hotplug_begin()
>     arch_add_memory()
>        add_pages()
>            __add_pages()
>                __add_section()
>                    sparse_add_one_section()
>     mem_hotplug_done()
> 
>     mem_hotplug_begin()
>     arch_remove_memory()
>         __remove_pages()
>             __remove_section()
>                 sparse_remove_one_section()
>     mem_hotplug_done()
> 
> which shows these functions is protected by the global mem_hotplug_lock.
> It won't face contention when accessing the mem_section.
> 
> Since the information needed in sparse_add_one_section() is node id to
> allocate proper memory. This patch also changes the prototype of
> sparse_add_one_section() to pass node id directly. This is intended to
> reduce misleading that sparse_add_one_section() would touch pgdat.
> 
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>

Reviewed-by: David Hildenbrand <david@redhat.com>

> 
> ---
> v2:
>    * adjust changelog to show this procedure is serialized by global
>      mem_hotplug_lock
> ---
>  include/linux/memory_hotplug.h |  2 +-
>  mm/memory_hotplug.c            |  2 +-
>  mm/sparse.c                    | 17 +++++------------
>  3 files changed, 7 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
> index 45a5affcab8a..3787d4e913e6 100644
> --- a/include/linux/memory_hotplug.h
> +++ b/include/linux/memory_hotplug.h
> @@ -333,7 +333,7 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
>  		unsigned long nr_pages, struct vmem_altmap *altmap);
>  extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
>  extern bool is_memblock_offlined(struct memory_block *mem);
> -extern int sparse_add_one_section(struct pglist_data *pgdat,
> +extern int sparse_add_one_section(int nid,
>  		unsigned long start_pfn, struct vmem_altmap *altmap);
>  extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		unsigned long map_offset, struct vmem_altmap *altmap);
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index f626e7e5f57b..5b3a3d7b4466 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -253,7 +253,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
>  	if (pfn_valid(phys_start_pfn))
>  		return -EEXIST;
>  
> -	ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap);
> +	ret = sparse_add_one_section(nid, phys_start_pfn, altmap);
>  	if (ret < 0)
>  		return ret;
>  
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 33307fc05c4d..a4fdbcb21514 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -662,25 +662,24 @@ static void free_map_bootmem(struct page *memmap)
>   * set.  If this is <=0, then that means that the passed-in
>   * map was not consumed and must be freed.
>   */
> -int __meminit sparse_add_one_section(struct pglist_data *pgdat,
> -		unsigned long start_pfn, struct vmem_altmap *altmap)
> +int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
> +				     struct vmem_altmap *altmap)
>  {
>  	unsigned long section_nr = pfn_to_section_nr(start_pfn);
>  	struct mem_section *ms;
>  	struct page *memmap;
>  	unsigned long *usemap;
> -	unsigned long flags;
>  	int ret;
>  
>  	/*
>  	 * no locking for this, because it does its own
>  	 * plus, it does a kmalloc
>  	 */
> -	ret = sparse_index_init(section_nr, pgdat->node_id);
> +	ret = sparse_index_init(section_nr, nid);
>  	if (ret < 0 && ret != -EEXIST)
>  		return ret;
>  	ret = 0;
> -	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
> +	memmap = kmalloc_section_memmap(section_nr, nid, altmap);
>  	if (!memmap)
>  		return -ENOMEM;
>  	usemap = __kmalloc_section_usemap();
> @@ -689,8 +688,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
>  		return -ENOMEM;
>  	}
>  
> -	pgdat_resize_lock(pgdat, &flags);
> -
>  	ms = __pfn_to_section(start_pfn);
>  	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
>  		ret = -EEXIST;
> @@ -707,7 +704,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
>  	sparse_init_one_section(ms, section_nr, memmap, usemap);
>  
>  out:
> -	pgdat_resize_unlock(pgdat, &flags);
>  	if (ret < 0) {
>  		kfree(usemap);
>  		__kfree_section_memmap(memmap, altmap);
> @@ -769,10 +765,8 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		unsigned long map_offset, struct vmem_altmap *altmap)
>  {
>  	struct page *memmap = NULL;
> -	unsigned long *usemap = NULL, flags;
> -	struct pglist_data *pgdat = zone->zone_pgdat;
> +	unsigned long *usemap = NULL;
>  
> -	pgdat_resize_lock(pgdat, &flags);
>  	if (ms->section_mem_map) {
>  		usemap = ms->pageblock_flags;
>  		memmap = sparse_decode_mem_map(ms->section_mem_map,
> @@ -780,7 +774,6 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		ms->section_mem_map = 0;
>  		ms->pageblock_flags = NULL;
>  	}
> -	pgdat_resize_unlock(pgdat, &flags);
>  
>  	clear_hwpoisoned_pages(memmap + map_offset,
>  			PAGES_PER_SECTION - map_offset);
>
Michal Hocko Nov. 29, 2018, 8:54 a.m. UTC | #2
On Wed 28-11-18 17:12:43, Wei Yang wrote:
> In function sparse_add/remove_one_section(), pgdat_resize_lock is used
> to protect initialization/release of one mem_section. This looks not
> necessary for current implementation.
> 
> Following is the current call trace of sparse_add/remove_one_section()
> 
>     mem_hotplug_begin()
>     arch_add_memory()
>        add_pages()
>            __add_pages()
>                __add_section()
>                    sparse_add_one_section()
>     mem_hotplug_done()
> 
>     mem_hotplug_begin()
>     arch_remove_memory()
>         __remove_pages()
>             __remove_section()
>                 sparse_remove_one_section()
>     mem_hotplug_done()
> 
> which shows these functions is protected by the global mem_hotplug_lock.
> It won't face contention when accessing the mem_section.

Again there is no explanation _why_ we want this patch. The reason is
that the lock doesn't really protect what the size of the pgdat. The
comment above the lock also mentiones 
"Holding this will also guarantee that any pfn_valid() stays that way."
which is true with the current implementation and false after this patch
but I fail to see how this is helpful. I do not see any pfn walkers to
take the lock so this looks like a relict from the past.

The comment should go away in this patch.

> 
> Since the information needed in sparse_add_one_section() is node id to
> allocate proper memory. This patch also changes the prototype of
> sparse_add_one_section() to pass node id directly. This is intended to
> reduce misleading that sparse_add_one_section() would touch pgdat.

I would do that in the separate patch because review would be slightly
easier.

> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>

With the comment removed
Acked-by: Michal Hocko <mhocko@suse.com>

> 
> ---
> v2:
>    * adjust changelog to show this procedure is serialized by global
>      mem_hotplug_lock
> ---
>  include/linux/memory_hotplug.h |  2 +-
>  mm/memory_hotplug.c            |  2 +-
>  mm/sparse.c                    | 17 +++++------------
>  3 files changed, 7 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
> index 45a5affcab8a..3787d4e913e6 100644
> --- a/include/linux/memory_hotplug.h
> +++ b/include/linux/memory_hotplug.h
> @@ -333,7 +333,7 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
>  		unsigned long nr_pages, struct vmem_altmap *altmap);
>  extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
>  extern bool is_memblock_offlined(struct memory_block *mem);
> -extern int sparse_add_one_section(struct pglist_data *pgdat,
> +extern int sparse_add_one_section(int nid,
>  		unsigned long start_pfn, struct vmem_altmap *altmap);
>  extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		unsigned long map_offset, struct vmem_altmap *altmap);
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index f626e7e5f57b..5b3a3d7b4466 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -253,7 +253,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
>  	if (pfn_valid(phys_start_pfn))
>  		return -EEXIST;
>  
> -	ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap);
> +	ret = sparse_add_one_section(nid, phys_start_pfn, altmap);
>  	if (ret < 0)
>  		return ret;
>  
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 33307fc05c4d..a4fdbcb21514 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -662,25 +662,24 @@ static void free_map_bootmem(struct page *memmap)
>   * set.  If this is <=0, then that means that the passed-in
>   * map was not consumed and must be freed.
>   */
> -int __meminit sparse_add_one_section(struct pglist_data *pgdat,
> -		unsigned long start_pfn, struct vmem_altmap *altmap)
> +int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
> +				     struct vmem_altmap *altmap)
>  {
>  	unsigned long section_nr = pfn_to_section_nr(start_pfn);
>  	struct mem_section *ms;
>  	struct page *memmap;
>  	unsigned long *usemap;
> -	unsigned long flags;
>  	int ret;
>  
>  	/*
>  	 * no locking for this, because it does its own
>  	 * plus, it does a kmalloc
>  	 */
> -	ret = sparse_index_init(section_nr, pgdat->node_id);
> +	ret = sparse_index_init(section_nr, nid);
>  	if (ret < 0 && ret != -EEXIST)
>  		return ret;
>  	ret = 0;
> -	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
> +	memmap = kmalloc_section_memmap(section_nr, nid, altmap);
>  	if (!memmap)
>  		return -ENOMEM;
>  	usemap = __kmalloc_section_usemap();
> @@ -689,8 +688,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
>  		return -ENOMEM;
>  	}
>  
> -	pgdat_resize_lock(pgdat, &flags);
> -
>  	ms = __pfn_to_section(start_pfn);
>  	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
>  		ret = -EEXIST;
> @@ -707,7 +704,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
>  	sparse_init_one_section(ms, section_nr, memmap, usemap);
>  
>  out:
> -	pgdat_resize_unlock(pgdat, &flags);
>  	if (ret < 0) {
>  		kfree(usemap);
>  		__kfree_section_memmap(memmap, altmap);
> @@ -769,10 +765,8 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		unsigned long map_offset, struct vmem_altmap *altmap)
>  {
>  	struct page *memmap = NULL;
> -	unsigned long *usemap = NULL, flags;
> -	struct pglist_data *pgdat = zone->zone_pgdat;
> +	unsigned long *usemap = NULL;
>  
> -	pgdat_resize_lock(pgdat, &flags);
>  	if (ms->section_mem_map) {
>  		usemap = ms->pageblock_flags;
>  		memmap = sparse_decode_mem_map(ms->section_mem_map,
> @@ -780,7 +774,6 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		ms->section_mem_map = 0;
>  		ms->pageblock_flags = NULL;
>  	}
> -	pgdat_resize_unlock(pgdat, &flags);
>  
>  	clear_hwpoisoned_pages(memmap + map_offset,
>  			PAGES_PER_SECTION - map_offset);
> -- 
> 2.15.1
>
Wei Yang Nov. 29, 2018, 9:29 a.m. UTC | #3
On Thu, Nov 29, 2018 at 09:54:22AM +0100, Michal Hocko wrote:
>On Wed 28-11-18 17:12:43, Wei Yang wrote:
>> In function sparse_add/remove_one_section(), pgdat_resize_lock is used
>> to protect initialization/release of one mem_section. This looks not
>> necessary for current implementation.
>> 
>> Following is the current call trace of sparse_add/remove_one_section()
>> 
>>     mem_hotplug_begin()
>>     arch_add_memory()
>>        add_pages()
>>            __add_pages()
>>                __add_section()
>>                    sparse_add_one_section()
>>     mem_hotplug_done()
>> 
>>     mem_hotplug_begin()
>>     arch_remove_memory()
>>         __remove_pages()
>>             __remove_section()
>>                 sparse_remove_one_section()
>>     mem_hotplug_done()
>> 
>> which shows these functions is protected by the global mem_hotplug_lock.
>> It won't face contention when accessing the mem_section.
>
>Again there is no explanation _why_ we want this patch. The reason is
>that the lock doesn't really protect what the size of the pgdat. The
>comment above the lock also mentiones 
>"Holding this will also guarantee that any pfn_valid() stays that way."
>which is true with the current implementation and false after this patch
>but I fail to see how this is helpful. I do not see any pfn walkers to
>take the lock so this looks like a relict from the past.
>
>The comment should go away in this patch.
>

Ok, let me try to address this.

>> 
>> Since the information needed in sparse_add_one_section() is node id to
>> allocate proper memory. This patch also changes the prototype of
>> sparse_add_one_section() to pass node id directly. This is intended to
>> reduce misleading that sparse_add_one_section() would touch pgdat.
>
>I would do that in the separate patch because review would be slightly
>easier.

Oops, I thought the merged version is preferred.

Hmm... I would prepare v3 to separate them.

>
>> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>
>With the comment removed
>Acked-by: Michal Hocko <mhocko@suse.com>
>
>> 
>> ---
>> v2:
>>    * adjust changelog to show this procedure is serialized by global
>>      mem_hotplug_lock
>> ---
>>  include/linux/memory_hotplug.h |  2 +-
>>  mm/memory_hotplug.c            |  2 +-
>>  mm/sparse.c                    | 17 +++++------------
>>  3 files changed, 7 insertions(+), 14 deletions(-)
>> 
>> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
>> index 45a5affcab8a..3787d4e913e6 100644
>> --- a/include/linux/memory_hotplug.h
>> +++ b/include/linux/memory_hotplug.h
>> @@ -333,7 +333,7 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
>>  		unsigned long nr_pages, struct vmem_altmap *altmap);
>>  extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
>>  extern bool is_memblock_offlined(struct memory_block *mem);
>> -extern int sparse_add_one_section(struct pglist_data *pgdat,
>> +extern int sparse_add_one_section(int nid,
>>  		unsigned long start_pfn, struct vmem_altmap *altmap);
>>  extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>>  		unsigned long map_offset, struct vmem_altmap *altmap);
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index f626e7e5f57b..5b3a3d7b4466 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -253,7 +253,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
>>  	if (pfn_valid(phys_start_pfn))
>>  		return -EEXIST;
>>  
>> -	ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap);
>> +	ret = sparse_add_one_section(nid, phys_start_pfn, altmap);
>>  	if (ret < 0)
>>  		return ret;
>>  
>> diff --git a/mm/sparse.c b/mm/sparse.c
>> index 33307fc05c4d..a4fdbcb21514 100644
>> --- a/mm/sparse.c
>> +++ b/mm/sparse.c
>> @@ -662,25 +662,24 @@ static void free_map_bootmem(struct page *memmap)
>>   * set.  If this is <=0, then that means that the passed-in
>>   * map was not consumed and must be freed.
>>   */
>> -int __meminit sparse_add_one_section(struct pglist_data *pgdat,
>> -		unsigned long start_pfn, struct vmem_altmap *altmap)
>> +int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
>> +				     struct vmem_altmap *altmap)
>>  {
>>  	unsigned long section_nr = pfn_to_section_nr(start_pfn);
>>  	struct mem_section *ms;
>>  	struct page *memmap;
>>  	unsigned long *usemap;
>> -	unsigned long flags;
>>  	int ret;
>>  
>>  	/*
>>  	 * no locking for this, because it does its own
>>  	 * plus, it does a kmalloc
>>  	 */
>> -	ret = sparse_index_init(section_nr, pgdat->node_id);
>> +	ret = sparse_index_init(section_nr, nid);
>>  	if (ret < 0 && ret != -EEXIST)
>>  		return ret;
>>  	ret = 0;
>> -	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
>> +	memmap = kmalloc_section_memmap(section_nr, nid, altmap);
>>  	if (!memmap)
>>  		return -ENOMEM;
>>  	usemap = __kmalloc_section_usemap();
>> @@ -689,8 +688,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
>>  		return -ENOMEM;
>>  	}
>>  
>> -	pgdat_resize_lock(pgdat, &flags);
>> -
>>  	ms = __pfn_to_section(start_pfn);
>>  	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
>>  		ret = -EEXIST;
>> @@ -707,7 +704,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
>>  	sparse_init_one_section(ms, section_nr, memmap, usemap);
>>  
>>  out:
>> -	pgdat_resize_unlock(pgdat, &flags);
>>  	if (ret < 0) {
>>  		kfree(usemap);
>>  		__kfree_section_memmap(memmap, altmap);
>> @@ -769,10 +765,8 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>>  		unsigned long map_offset, struct vmem_altmap *altmap)
>>  {
>>  	struct page *memmap = NULL;
>> -	unsigned long *usemap = NULL, flags;
>> -	struct pglist_data *pgdat = zone->zone_pgdat;
>> +	unsigned long *usemap = NULL;
>>  
>> -	pgdat_resize_lock(pgdat, &flags);
>>  	if (ms->section_mem_map) {
>>  		usemap = ms->pageblock_flags;
>>  		memmap = sparse_decode_mem_map(ms->section_mem_map,
>> @@ -780,7 +774,6 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>>  		ms->section_mem_map = 0;
>>  		ms->pageblock_flags = NULL;
>>  	}
>> -	pgdat_resize_unlock(pgdat, &flags);
>>  
>>  	clear_hwpoisoned_pages(memmap + map_offset,
>>  			PAGES_PER_SECTION - map_offset);
>> -- 
>> 2.15.1
>> 
>
>-- 
>Michal Hocko
>SUSE Labs
diff mbox series

Patch

diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 45a5affcab8a..3787d4e913e6 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -333,7 +333,7 @@  extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 		unsigned long nr_pages, struct vmem_altmap *altmap);
 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 extern bool is_memblock_offlined(struct memory_block *mem);
-extern int sparse_add_one_section(struct pglist_data *pgdat,
+extern int sparse_add_one_section(int nid,
 		unsigned long start_pfn, struct vmem_altmap *altmap);
 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
 		unsigned long map_offset, struct vmem_altmap *altmap);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f626e7e5f57b..5b3a3d7b4466 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -253,7 +253,7 @@  static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
 	if (pfn_valid(phys_start_pfn))
 		return -EEXIST;
 
-	ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap);
+	ret = sparse_add_one_section(nid, phys_start_pfn, altmap);
 	if (ret < 0)
 		return ret;
 
diff --git a/mm/sparse.c b/mm/sparse.c
index 33307fc05c4d..a4fdbcb21514 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -662,25 +662,24 @@  static void free_map_bootmem(struct page *memmap)
  * set.  If this is <=0, then that means that the passed-in
  * map was not consumed and must be freed.
  */
-int __meminit sparse_add_one_section(struct pglist_data *pgdat,
-		unsigned long start_pfn, struct vmem_altmap *altmap)
+int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
+				     struct vmem_altmap *altmap)
 {
 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
 	struct mem_section *ms;
 	struct page *memmap;
 	unsigned long *usemap;
-	unsigned long flags;
 	int ret;
 
 	/*
 	 * no locking for this, because it does its own
 	 * plus, it does a kmalloc
 	 */
-	ret = sparse_index_init(section_nr, pgdat->node_id);
+	ret = sparse_index_init(section_nr, nid);
 	if (ret < 0 && ret != -EEXIST)
 		return ret;
 	ret = 0;
-	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
+	memmap = kmalloc_section_memmap(section_nr, nid, altmap);
 	if (!memmap)
 		return -ENOMEM;
 	usemap = __kmalloc_section_usemap();
@@ -689,8 +688,6 @@  int __meminit sparse_add_one_section(struct pglist_data *pgdat,
 		return -ENOMEM;
 	}
 
-	pgdat_resize_lock(pgdat, &flags);
-
 	ms = __pfn_to_section(start_pfn);
 	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
 		ret = -EEXIST;
@@ -707,7 +704,6 @@  int __meminit sparse_add_one_section(struct pglist_data *pgdat,
 	sparse_init_one_section(ms, section_nr, memmap, usemap);
 
 out:
-	pgdat_resize_unlock(pgdat, &flags);
 	if (ret < 0) {
 		kfree(usemap);
 		__kfree_section_memmap(memmap, altmap);
@@ -769,10 +765,8 @@  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
 		unsigned long map_offset, struct vmem_altmap *altmap)
 {
 	struct page *memmap = NULL;
-	unsigned long *usemap = NULL, flags;
-	struct pglist_data *pgdat = zone->zone_pgdat;
+	unsigned long *usemap = NULL;
 
-	pgdat_resize_lock(pgdat, &flags);
 	if (ms->section_mem_map) {
 		usemap = ms->pageblock_flags;
 		memmap = sparse_decode_mem_map(ms->section_mem_map,
@@ -780,7 +774,6 @@  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
 		ms->section_mem_map = 0;
 		ms->pageblock_flags = NULL;
 	}
-	pgdat_resize_unlock(pgdat, &flags);
 
 	clear_hwpoisoned_pages(memmap + map_offset,
 			PAGES_PER_SECTION - map_offset);