diff mbox series

[RFC,2/9] sparse-vmemmap: Consolidate arguments in vmemmap section populate

Message ID 20201208172901.17384-3-joao.m.martins@oracle.com (mailing list archive)
State New
Headers show
Series mm, sparse-vmemmap: Introduce compound pagemaps | expand

Commit Message

Joao Martins Dec. 8, 2020, 5:28 p.m. UTC
Replace vmem_altmap with an vmem_context argument. That let us
express how the vmemmap is gonna be initialized e.g. passing
flags and a page size for reusing pages upon initializing the
vmemmap.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 include/linux/memory_hotplug.h |  6 +++++-
 include/linux/mm.h             |  2 +-
 mm/memory_hotplug.c            |  3 ++-
 mm/sparse-vmemmap.c            |  6 +++++-
 mm/sparse.c                    | 16 ++++++++--------
 5 files changed, 21 insertions(+), 12 deletions(-)

Comments

John Hubbard Dec. 9, 2020, 6:16 a.m. UTC | #1
On 12/8/20 9:28 AM, Joao Martins wrote:
> Replace vmem_altmap with an vmem_context argument. That let us
> express how the vmemmap is gonna be initialized e.g. passing
> flags and a page size for reusing pages upon initializing the
> vmemmap.

How about this instead:

Replace the vmem_altmap argument with a vmem_context argument that
contains vmem_altmap for now. Subsequent patches will add additional
member elements to vmem_context, such as flags and page size.

No behavior changes are intended.

?

> 
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
>   include/linux/memory_hotplug.h |  6 +++++-
>   include/linux/mm.h             |  2 +-
>   mm/memory_hotplug.c            |  3 ++-
>   mm/sparse-vmemmap.c            |  6 +++++-
>   mm/sparse.c                    | 16 ++++++++--------
>   5 files changed, 21 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
> index 551093b74596..73f8bcbb58a4 100644
> --- a/include/linux/memory_hotplug.h
> +++ b/include/linux/memory_hotplug.h
> @@ -81,6 +81,10 @@ struct mhp_params {
>   	pgprot_t pgprot;
>   };
>   
> +struct vmem_context {
> +	struct vmem_altmap *altmap;
> +};
> +
>   /*
>    * Zone resizing functions
>    *
> @@ -353,7 +357,7 @@ extern void remove_pfn_range_from_zone(struct zone *zone,
>   				       unsigned long nr_pages);
>   extern bool is_memblock_offlined(struct memory_block *mem);
>   extern int sparse_add_section(int nid, unsigned long pfn,
> -		unsigned long nr_pages, struct vmem_altmap *altmap);
> +		unsigned long nr_pages, struct vmem_context *ctx);
>   extern void sparse_remove_section(struct mem_section *ms,
>   		unsigned long pfn, unsigned long nr_pages,
>   		unsigned long map_offset, struct vmem_altmap *altmap);
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index db6ae4d3fb4e..2eb44318bb2d 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3000,7 +3000,7 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
>   
>   void *sparse_buffer_alloc(unsigned long size);
>   struct page * __populate_section_memmap(unsigned long pfn,
> -		unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
> +		unsigned long nr_pages, int nid, struct vmem_context *ctx);
>   pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
>   p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
>   pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 63b2e46b6555..f8870c53fe5e 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -313,6 +313,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>   	unsigned long cur_nr_pages;
>   	int err;
>   	struct vmem_altmap *altmap = params->altmap;
> +	struct vmem_context ctx = { .altmap = params->altmap };

OK, so this is the one place I can see where ctx is set up. And it's never null.
Let's remember that point...

>   
>   	if (WARN_ON_ONCE(!params->pgprot.pgprot))
>   		return -EINVAL;
> @@ -341,7 +342,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>   		/* Select all remaining pages up to the next section boundary */
>   		cur_nr_pages = min(end_pfn - pfn,
>   				   SECTION_ALIGN_UP(pfn + 1) - pfn);
> -		err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
> +		err = sparse_add_section(nid, pfn, cur_nr_pages, &ctx);
>   		if (err)
>   			break;
>   		cond_resched();
> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
> index 16183d85a7d5..bcda68ba1381 100644
> --- a/mm/sparse-vmemmap.c
> +++ b/mm/sparse-vmemmap.c
> @@ -249,15 +249,19 @@ int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
>   }
>   
>   struct page * __meminit __populate_section_memmap(unsigned long pfn,
> -		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
> +		unsigned long nr_pages, int nid, struct vmem_context *ctx)
>   {
>   	unsigned long start = (unsigned long) pfn_to_page(pfn);
>   	unsigned long end = start + nr_pages * sizeof(struct page);
> +	struct vmem_altmap *altmap = NULL;
>   
>   	if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
>   		!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
>   		return NULL;
>   
> +	if (ctx)

But...ctx can never be null, right?

I didn't spot any other issues, though.

thanks,
Joao Martins Dec. 9, 2020, 1:51 p.m. UTC | #2
On 12/9/20 6:16 AM, John Hubbard wrote:
> On 12/8/20 9:28 AM, Joao Martins wrote:
>> Replace vmem_altmap with an vmem_context argument. That let us
>> express how the vmemmap is gonna be initialized e.g. passing
>> flags and a page size for reusing pages upon initializing the
>> vmemmap.
> 
> How about this instead:
> 
> Replace the vmem_altmap argument with a vmem_context argument that
> contains vmem_altmap for now. Subsequent patches will add additional
> member elements to vmem_context, such as flags and page size.
> 
> No behavior changes are intended.
> 
> ?
> 
Yeap, it's better than way. Thanks.

>>
>> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
>> ---
>>   include/linux/memory_hotplug.h |  6 +++++-
>>   include/linux/mm.h             |  2 +-
>>   mm/memory_hotplug.c            |  3 ++-
>>   mm/sparse-vmemmap.c            |  6 +++++-
>>   mm/sparse.c                    | 16 ++++++++--------
>>   5 files changed, 21 insertions(+), 12 deletions(-)
>>
>> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
>> index 551093b74596..73f8bcbb58a4 100644
>> --- a/include/linux/memory_hotplug.h
>> +++ b/include/linux/memory_hotplug.h
>> @@ -81,6 +81,10 @@ struct mhp_params {
>>   	pgprot_t pgprot;
>>   };
>>   
>> +struct vmem_context {
>> +	struct vmem_altmap *altmap;
>> +};
>> +
>>   /*
>>    * Zone resizing functions
>>    *
>> @@ -353,7 +357,7 @@ extern void remove_pfn_range_from_zone(struct zone *zone,
>>   				       unsigned long nr_pages);
>>   extern bool is_memblock_offlined(struct memory_block *mem);
>>   extern int sparse_add_section(int nid, unsigned long pfn,
>> -		unsigned long nr_pages, struct vmem_altmap *altmap);
>> +		unsigned long nr_pages, struct vmem_context *ctx);
>>   extern void sparse_remove_section(struct mem_section *ms,
>>   		unsigned long pfn, unsigned long nr_pages,
>>   		unsigned long map_offset, struct vmem_altmap *altmap);
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index db6ae4d3fb4e..2eb44318bb2d 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -3000,7 +3000,7 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
>>   
>>   void *sparse_buffer_alloc(unsigned long size);
>>   struct page * __populate_section_memmap(unsigned long pfn,
>> -		unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
>> +		unsigned long nr_pages, int nid, struct vmem_context *ctx);
>>   pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
>>   p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
>>   pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index 63b2e46b6555..f8870c53fe5e 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -313,6 +313,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>>   	unsigned long cur_nr_pages;
>>   	int err;
>>   	struct vmem_altmap *altmap = params->altmap;
>> +	struct vmem_context ctx = { .altmap = params->altmap };
> 
> OK, so this is the one place I can see where ctx is set up. And it's never null.
> Let's remember that point...
> 

(...)

>>   
>>   	if (WARN_ON_ONCE(!params->pgprot.pgprot))
>>   		return -EINVAL;
>> @@ -341,7 +342,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>>   		/* Select all remaining pages up to the next section boundary */
>>   		cur_nr_pages = min(end_pfn - pfn,
>>   				   SECTION_ALIGN_UP(pfn + 1) - pfn);
>> -		err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
>> +		err = sparse_add_section(nid, pfn, cur_nr_pages, &ctx);
>>   		if (err)
>>   			break;
>>   		cond_resched();
>> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
>> index 16183d85a7d5..bcda68ba1381 100644
>> --- a/mm/sparse-vmemmap.c
>> +++ b/mm/sparse-vmemmap.c
>> @@ -249,15 +249,19 @@ int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
>>   }
>>   
>>   struct page * __meminit __populate_section_memmap(unsigned long pfn,
>> -		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
>> +		unsigned long nr_pages, int nid, struct vmem_context *ctx)
>>   {
>>   	unsigned long start = (unsigned long) pfn_to_page(pfn);
>>   	unsigned long end = start + nr_pages * sizeof(struct page);
>> +	struct vmem_altmap *altmap = NULL;
>>   
>>   	if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
>>   		!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
>>   		return NULL;
>>   
>> +	if (ctx)
> 
> But...ctx can never be null, right?
> 
Indeed.

This is an artifact of an old version of this where the passed parameter
could be null.

> I didn't spot any other issues, though.
> 
> thanks,
>
Dan Williams Feb. 20, 2021, 1:49 a.m. UTC | #3
On Tue, Dec 8, 2020 at 9:31 AM Joao Martins <joao.m.martins@oracle.com> wrote:
>
> Replace vmem_altmap with an vmem_context argument. That let us
> express how the vmemmap is gonna be initialized e.g. passing
> flags and a page size for reusing pages upon initializing the
> vmemmap.
>

Per the comment on the last patch, if compound dev_pagemap never
collides with vmem_altmap then I don't think this patch is needed.
Joao Martins Feb. 22, 2021, 11:26 a.m. UTC | #4
On 2/20/21 1:49 AM, Dan Williams wrote:
> On Tue, Dec 8, 2020 at 9:31 AM Joao Martins <joao.m.martins@oracle.com> wrote:
>>
>> Replace vmem_altmap with an vmem_context argument. That let us
>> express how the vmemmap is gonna be initialized e.g. passing
>> flags and a page size for reusing pages upon initializing the
>> vmemmap.
>>
> 
> Per the comment on the last patch, if compound dev_pagemap never
> collides with vmem_altmap then I don't think this patch is needed.
> 
See my previous patch reply. It *might* be worth keeping that around.

And since the RFC, nvdimm is going to need a slight adjustment for the
altmap reserve pfn range, should we keep altmap around.
diff mbox series

Patch

diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 551093b74596..73f8bcbb58a4 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -81,6 +81,10 @@  struct mhp_params {
 	pgprot_t pgprot;
 };
 
+struct vmem_context {
+	struct vmem_altmap *altmap;
+};
+
 /*
  * Zone resizing functions
  *
@@ -353,7 +357,7 @@  extern void remove_pfn_range_from_zone(struct zone *zone,
 				       unsigned long nr_pages);
 extern bool is_memblock_offlined(struct memory_block *mem);
 extern int sparse_add_section(int nid, unsigned long pfn,
-		unsigned long nr_pages, struct vmem_altmap *altmap);
+		unsigned long nr_pages, struct vmem_context *ctx);
 extern void sparse_remove_section(struct mem_section *ms,
 		unsigned long pfn, unsigned long nr_pages,
 		unsigned long map_offset, struct vmem_altmap *altmap);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index db6ae4d3fb4e..2eb44318bb2d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3000,7 +3000,7 @@  static inline void print_vma_addr(char *prefix, unsigned long rip)
 
 void *sparse_buffer_alloc(unsigned long size);
 struct page * __populate_section_memmap(unsigned long pfn,
-		unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
+		unsigned long nr_pages, int nid, struct vmem_context *ctx);
 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 63b2e46b6555..f8870c53fe5e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -313,6 +313,7 @@  int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 	unsigned long cur_nr_pages;
 	int err;
 	struct vmem_altmap *altmap = params->altmap;
+	struct vmem_context ctx = { .altmap = params->altmap };
 
 	if (WARN_ON_ONCE(!params->pgprot.pgprot))
 		return -EINVAL;
@@ -341,7 +342,7 @@  int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 		/* Select all remaining pages up to the next section boundary */
 		cur_nr_pages = min(end_pfn - pfn,
 				   SECTION_ALIGN_UP(pfn + 1) - pfn);
-		err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
+		err = sparse_add_section(nid, pfn, cur_nr_pages, &ctx);
 		if (err)
 			break;
 		cond_resched();
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 16183d85a7d5..bcda68ba1381 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -249,15 +249,19 @@  int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
 }
 
 struct page * __meminit __populate_section_memmap(unsigned long pfn,
-		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
+		unsigned long nr_pages, int nid, struct vmem_context *ctx)
 {
 	unsigned long start = (unsigned long) pfn_to_page(pfn);
 	unsigned long end = start + nr_pages * sizeof(struct page);
+	struct vmem_altmap *altmap = NULL;
 
 	if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
 		!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
 		return NULL;
 
+	if (ctx)
+		altmap = ctx->altmap;
+
 	if (vmemmap_populate(start, end, nid, altmap))
 		return NULL;
 
diff --git a/mm/sparse.c b/mm/sparse.c
index 7bd23f9d6cef..47ca494398a7 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -443,7 +443,7 @@  static unsigned long __init section_map_size(void)
 }
 
 struct page __init *__populate_section_memmap(unsigned long pfn,
-		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
+		unsigned long nr_pages, int nid, struct vmem_context *ctx)
 {
 	unsigned long size = section_map_size();
 	struct page *map = sparse_buffer_alloc(size);
@@ -648,9 +648,9 @@  void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 static struct page * __meminit populate_section_memmap(unsigned long pfn,
-		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
+		unsigned long nr_pages, int nid, struct vmem_context *ctx)
 {
-	return __populate_section_memmap(pfn, nr_pages, nid, altmap);
+	return __populate_section_memmap(pfn, nr_pages, nid, ctx);
 }
 
 static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
@@ -842,7 +842,7 @@  static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 }
 
 static struct page * __meminit section_activate(int nid, unsigned long pfn,
-		unsigned long nr_pages, struct vmem_altmap *altmap)
+		unsigned long nr_pages, struct vmem_context *ctx)
 {
 	struct mem_section *ms = __pfn_to_section(pfn);
 	struct mem_section_usage *usage = NULL;
@@ -874,9 +874,9 @@  static struct page * __meminit section_activate(int nid, unsigned long pfn,
 	if (nr_pages < PAGES_PER_SECTION && early_section(ms))
 		return pfn_to_page(pfn);
 
-	memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
+	memmap = populate_section_memmap(pfn, nr_pages, nid, ctx);
 	if (!memmap) {
-		section_deactivate(pfn, nr_pages, altmap);
+		section_deactivate(pfn, nr_pages, ctx->altmap);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -902,7 +902,7 @@  static struct page * __meminit section_activate(int nid, unsigned long pfn,
  * * -ENOMEM	- Out of memory.
  */
 int __meminit sparse_add_section(int nid, unsigned long start_pfn,
-		unsigned long nr_pages, struct vmem_altmap *altmap)
+		unsigned long nr_pages, struct vmem_context *ctx)
 {
 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
 	struct mem_section *ms;
@@ -913,7 +913,7 @@  int __meminit sparse_add_section(int nid, unsigned long start_pfn,
 	if (ret < 0)
 		return ret;
 
-	memmap = section_activate(nid, start_pfn, nr_pages, altmap);
+	memmap = section_activate(nid, start_pfn, nr_pages, ctx);
 	if (IS_ERR(memmap))
 		return PTR_ERR(memmap);