diff mbox series

[1/7] mm/sparse.c: Introduce new function fill_subsection_map()

Message ID 20200209104826.3385-2-bhe@redhat.com (mailing list archive)
State New, archived
Headers show
Series mm/hotplug: Only use subsection in VMEMMAP case and fix hot add/remove failure in SPARSEMEM|!VMEMMAP case | expand

Commit Message

Baoquan He Feb. 9, 2020, 10:48 a.m. UTC
Wrap the codes filling subsection map in section_activate() into
fill_subsection_map(), this makes section_activate() cleaner and
easier to follow.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
 1 file changed, 34 insertions(+), 11 deletions(-)

Comments

Wei Yang Feb. 9, 2020, 11:05 p.m. UTC | #1
On Sun, Feb 09, 2020 at 06:48:20PM +0800, Baoquan He wrote:
>Wrap the codes filling subsection map in section_activate() into
>fill_subsection_map(), this makes section_activate() cleaner and
>easier to follow.
>

This looks a preparation for #ifdef the code for VMEMMAP, then why not take
the usage handling into this function too?

>Signed-off-by: Baoquan He <bhe@redhat.com>
>---
> mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 34 insertions(+), 11 deletions(-)
>
>diff --git a/mm/sparse.c b/mm/sparse.c
>index c184b69460b7..9ad741ccbeb6 100644
>--- a/mm/sparse.c
>+++ b/mm/sparse.c
>@@ -788,24 +788,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> 		depopulate_section_memmap(pfn, nr_pages, altmap);
> }
> 
>-static struct page * __meminit section_activate(int nid, unsigned long pfn,
>-		unsigned long nr_pages, struct vmem_altmap *altmap)
>+/**
>+ * fill_subsection_map - fill subsection map of a memory region
>+ * @pfn - start pfn of the memory range
>+ * @nr_pages - number of pfns to add in the region
>+ *
>+ * This clears the related subsection map inside one section, and only

s/clears/fills/ ?

>+ * intended for hotplug.
>+ *
>+ * Return:
>+ * * 0		- On success.
>+ * * -EINVAL	- Invalid memory region.
>+ * * -EEXIST	- Subsection map has been set.
>+ */
>+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> {
>-	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> 	struct mem_section *ms = __pfn_to_section(pfn);
>-	struct mem_section_usage *usage = NULL;
>+	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> 	unsigned long *subsection_map;
>-	struct page *memmap;
> 	int rc = 0;
> 
> 	subsection_mask_set(map, pfn, nr_pages);
> 
>-	if (!ms->usage) {
>-		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>-		if (!usage)
>-			return ERR_PTR(-ENOMEM);
>-		ms->usage = usage;
>-	}
> 	subsection_map = &ms->usage->subsection_map[0];
> 
> 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
>@@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
> 		bitmap_or(subsection_map, map, subsection_map,
> 				SUBSECTIONS_PER_SECTION);
> 
>+	return rc;
>+}
>+
>+static struct page * __meminit section_activate(int nid, unsigned long pfn,
>+		unsigned long nr_pages, struct vmem_altmap *altmap)
>+{
>+	struct mem_section *ms = __pfn_to_section(pfn);
>+	struct mem_section_usage *usage = NULL;
>+	struct page *memmap;
>+	int rc = 0;
>+
>+	if (!ms->usage) {
>+		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>+		if (!usage)
>+			return ERR_PTR(-ENOMEM);
>+		ms->usage = usage;
>+	}
>+
>+	rc = fill_subsection_map(pfn, nr_pages);
> 	if (rc) {
> 		if (usage)
> 			ms->usage = NULL;
>-- 
>2.17.2
Wei Yang Feb. 9, 2020, 11:11 p.m. UTC | #2
On Mon, Feb 10, 2020 at 07:05:56AM +0800, Wei Yang wrote:
>On Sun, Feb 09, 2020 at 06:48:20PM +0800, Baoquan He wrote:
>>Wrap the codes filling subsection map in section_activate() into
>>fill_subsection_map(), this makes section_activate() cleaner and
>>easier to follow.
>>
>
>This looks a preparation for #ifdef the code for VMEMMAP, then why not take
>the usage handling into this function too?
>

Oops, you are right. My mistake.

>>Signed-off-by: Baoquan He <bhe@redhat.com>
>>---
>> mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
>> 1 file changed, 34 insertions(+), 11 deletions(-)
>>
>>diff --git a/mm/sparse.c b/mm/sparse.c
>>index c184b69460b7..9ad741ccbeb6 100644
>>--- a/mm/sparse.c
>>+++ b/mm/sparse.c
>>@@ -788,24 +788,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>> 		depopulate_section_memmap(pfn, nr_pages, altmap);
>> }
>> 
>>-static struct page * __meminit section_activate(int nid, unsigned long pfn,
>>-		unsigned long nr_pages, struct vmem_altmap *altmap)
>>+/**
>>+ * fill_subsection_map - fill subsection map of a memory region
>>+ * @pfn - start pfn of the memory range
>>+ * @nr_pages - number of pfns to add in the region
>>+ *
>>+ * This clears the related subsection map inside one section, and only
>
>s/clears/fills/ ?
>
>>+ * intended for hotplug.
>>+ *
>>+ * Return:
>>+ * * 0		- On success.
>>+ * * -EINVAL	- Invalid memory region.
>>+ * * -EEXIST	- Subsection map has been set.
>>+ */
>>+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
>> {
>>-	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
>> 	struct mem_section *ms = __pfn_to_section(pfn);
>>-	struct mem_section_usage *usage = NULL;
>>+	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
>> 	unsigned long *subsection_map;
>>-	struct page *memmap;
>> 	int rc = 0;
>> 
>> 	subsection_mask_set(map, pfn, nr_pages);
>> 
>>-	if (!ms->usage) {
>>-		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>>-		if (!usage)
>>-			return ERR_PTR(-ENOMEM);
>>-		ms->usage = usage;
>>-	}
>> 	subsection_map = &ms->usage->subsection_map[0];
>> 
>> 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
>>@@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
>> 		bitmap_or(subsection_map, map, subsection_map,
>> 				SUBSECTIONS_PER_SECTION);
>> 
>>+	return rc;
>>+}
>>+
>>+static struct page * __meminit section_activate(int nid, unsigned long pfn,
>>+		unsigned long nr_pages, struct vmem_altmap *altmap)
>>+{
>>+	struct mem_section *ms = __pfn_to_section(pfn);
>>+	struct mem_section_usage *usage = NULL;
>>+	struct page *memmap;
>>+	int rc = 0;
>>+
>>+	if (!ms->usage) {
>>+		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>>+		if (!usage)
>>+			return ERR_PTR(-ENOMEM);
>>+		ms->usage = usage;
>>+	}
>>+
>>+	rc = fill_subsection_map(pfn, nr_pages);
>> 	if (rc) {
>> 		if (usage)
>> 			ms->usage = NULL;
>>-- 
>>2.17.2
>
>-- 
>Wei Yang
>Help you, Help me
Baoquan He Feb. 10, 2020, 3:25 a.m. UTC | #3
On 02/10/20 at 07:05am, Wei Yang wrote:
> >-static struct page * __meminit section_activate(int nid, unsigned long pfn,
> >-		unsigned long nr_pages, struct vmem_altmap *altmap)
> >+/**
> >+ * fill_subsection_map - fill subsection map of a memory region
> >+ * @pfn - start pfn of the memory range
> >+ * @nr_pages - number of pfns to add in the region
> >+ *
> >+ * This clears the related subsection map inside one section, and only
> 
> s/clears/fills/ ?

Good catch, thanks for your careful review.

I will wait a while to see if there's any input from other reviewers,
then update this post accordingly together.

> 
> >+ * intended for hotplug.
> >+ *
> >+ * Return:
> >+ * * 0		- On success.
> >+ * * -EINVAL	- Invalid memory region.
> >+ * * -EEXIST	- Subsection map has been set.
> >+ */
> >+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> > {
> >-	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> > 	struct mem_section *ms = __pfn_to_section(pfn);
> >-	struct mem_section_usage *usage = NULL;
> >+	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> > 	unsigned long *subsection_map;
> >-	struct page *memmap;
> > 	int rc = 0;
> > 
> > 	subsection_mask_set(map, pfn, nr_pages);
> > 
> >-	if (!ms->usage) {
> >-		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> >-		if (!usage)
> >-			return ERR_PTR(-ENOMEM);
> >-		ms->usage = usage;
> >-	}
> > 	subsection_map = &ms->usage->subsection_map[0];
> > 
> > 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
> >@@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
> > 		bitmap_or(subsection_map, map, subsection_map,
> > 				SUBSECTIONS_PER_SECTION);
> > 
> >+	return rc;
> >+}
> >+
> >+static struct page * __meminit section_activate(int nid, unsigned long pfn,
> >+		unsigned long nr_pages, struct vmem_altmap *altmap)
> >+{
> >+	struct mem_section *ms = __pfn_to_section(pfn);
> >+	struct mem_section_usage *usage = NULL;
> >+	struct page *memmap;
> >+	int rc = 0;
> >+
> >+	if (!ms->usage) {
> >+		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> >+		if (!usage)
> >+			return ERR_PTR(-ENOMEM);
> >+		ms->usage = usage;
> >+	}
> >+
> >+	rc = fill_subsection_map(pfn, nr_pages);
> > 	if (rc) {
> > 		if (usage)
> > 			ms->usage = NULL;
> >-- 
> >2.17.2
> 
> -- 
> Wei Yang
> Help you, Help me
>
David Hildenbrand Feb. 10, 2020, 9:49 a.m. UTC | #4
On 09.02.20 11:48, Baoquan He wrote:
> Wrap the codes filling subsection map in section_activate() into
> fill_subsection_map(), this makes section_activate() cleaner and
> easier to follow.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 34 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/sparse.c b/mm/sparse.c
> index c184b69460b7..9ad741ccbeb6 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -788,24 +788,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>  		depopulate_section_memmap(pfn, nr_pages, altmap);
>  }
>  
> -static struct page * __meminit section_activate(int nid, unsigned long pfn,
> -		unsigned long nr_pages, struct vmem_altmap *altmap)
> +/**
> + * fill_subsection_map - fill subsection map of a memory region
> + * @pfn - start pfn of the memory range
> + * @nr_pages - number of pfns to add in the region
> + *
> + * This clears the related subsection map inside one section, and only
> + * intended for hotplug.
> + *
> + * Return:
> + * * 0		- On success.
> + * * -EINVAL	- Invalid memory region.
> + * * -EEXIST	- Subsection map has been set.
> + */
> +static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
>  {
> -	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
>  	struct mem_section *ms = __pfn_to_section(pfn);
> -	struct mem_section_usage *usage = NULL;
> +	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
>  	unsigned long *subsection_map;
> -	struct page *memmap;
>  	int rc = 0;
>  
>  	subsection_mask_set(map, pfn, nr_pages);
>  
> -	if (!ms->usage) {
> -		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> -		if (!usage)
> -			return ERR_PTR(-ENOMEM);
> -		ms->usage = usage;
> -	}
>  	subsection_map = &ms->usage->subsection_map[0];
>  
>  	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
> @@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
>  		bitmap_or(subsection_map, map, subsection_map,
>  				SUBSECTIONS_PER_SECTION);
>  
> +	return rc;
> +}
> +
> +static struct page * __meminit section_activate(int nid, unsigned long pfn,
> +		unsigned long nr_pages, struct vmem_altmap *altmap)
> +{
> +	struct mem_section *ms = __pfn_to_section(pfn);
> +	struct mem_section_usage *usage = NULL;
> +	struct page *memmap;
> +	int rc = 0;
> +
> +	if (!ms->usage) {
> +		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> +		if (!usage)
> +			return ERR_PTR(-ENOMEM);
> +		ms->usage = usage;
> +	}
> +
> +	rc = fill_subsection_map(pfn, nr_pages);
>  	if (rc) {
>  		if (usage)
>  			ms->usage = NULL;
> 

What about having two variants of
section_activate()/section_deactivate() instead? Then we don't have any
subsection related stuff in !subsection code.
Baoquan He Feb. 11, 2020, 12:46 p.m. UTC | #5
On 02/10/20 at 10:49am, David Hildenbrand wrote:
> On 09.02.20 11:48, Baoquan He wrote:
> > Wrap the codes filling subsection map in section_activate() into
> > fill_subsection_map(), this makes section_activate() cleaner and
> > easier to follow.
> > 
> > Signed-off-by: Baoquan He <bhe@redhat.com>
> > ---
> >  mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 34 insertions(+), 11 deletions(-)
> > 
> > diff --git a/mm/sparse.c b/mm/sparse.c
> > index c184b69460b7..9ad741ccbeb6 100644
> > --- a/mm/sparse.c
> > +++ b/mm/sparse.c
> > @@ -788,24 +788,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> >  		depopulate_section_memmap(pfn, nr_pages, altmap);
> >  }
> >  
> > -static struct page * __meminit section_activate(int nid, unsigned long pfn,
> > -		unsigned long nr_pages, struct vmem_altmap *altmap)
> > +/**
> > + * fill_subsection_map - fill subsection map of a memory region
> > + * @pfn - start pfn of the memory range
> > + * @nr_pages - number of pfns to add in the region
> > + *
> > + * This clears the related subsection map inside one section, and only
> > + * intended for hotplug.
> > + *
> > + * Return:
> > + * * 0		- On success.
> > + * * -EINVAL	- Invalid memory region.
> > + * * -EEXIST	- Subsection map has been set.
> > + */
> > +static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> >  {
> > -	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> >  	struct mem_section *ms = __pfn_to_section(pfn);
> > -	struct mem_section_usage *usage = NULL;
> > +	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> >  	unsigned long *subsection_map;
> > -	struct page *memmap;
> >  	int rc = 0;
> >  
> >  	subsection_mask_set(map, pfn, nr_pages);
> >  
> > -	if (!ms->usage) {
> > -		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> > -		if (!usage)
> > -			return ERR_PTR(-ENOMEM);
> > -		ms->usage = usage;
> > -	}
> >  	subsection_map = &ms->usage->subsection_map[0];
> >  
> >  	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
> > @@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
> >  		bitmap_or(subsection_map, map, subsection_map,
> >  				SUBSECTIONS_PER_SECTION);
> >  
> > +	return rc;
> > +}
> > +
> > +static struct page * __meminit section_activate(int nid, unsigned long pfn,
> > +		unsigned long nr_pages, struct vmem_altmap *altmap)
> > +{
> > +	struct mem_section *ms = __pfn_to_section(pfn);
> > +	struct mem_section_usage *usage = NULL;
> > +	struct page *memmap;
> > +	int rc = 0;
> > +
> > +	if (!ms->usage) {
> > +		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> > +		if (!usage)
> > +			return ERR_PTR(-ENOMEM);
> > +		ms->usage = usage;
> > +	}
> > +
> > +	rc = fill_subsection_map(pfn, nr_pages);
> >  	if (rc) {
> >  		if (usage)
> >  			ms->usage = NULL;
> > 
> 
> What about having two variants of
> section_activate()/section_deactivate() instead? Then we don't have any
> subsection related stuff in !subsection code.

Thanks for looking into this, David.

Having two variants of section_activate()/section_deactivate() is also
good. Just not like memmap handling which is very different between classic
sparse and vmemmap, makes having two variants very attractive, the code
and logic in section_activate()/section_deactivate() is not too much,
and both of them basically can share the most of code, these make the
variants way not so necessary. I personally prefer the current way, what
do you think?

Thanks
Baoquan
David Hildenbrand Feb. 11, 2020, 2:44 p.m. UTC | #6
On 11.02.20 13:46, Baoquan He wrote:
> On 02/10/20 at 10:49am, David Hildenbrand wrote:
>> On 09.02.20 11:48, Baoquan He wrote:
>>> Wrap the codes filling subsection map in section_activate() into
>>> fill_subsection_map(), this makes section_activate() cleaner and
>>> easier to follow.
>>>
>>> Signed-off-by: Baoquan He <bhe@redhat.com>
>>> ---
>>>  mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
>>>  1 file changed, 34 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/mm/sparse.c b/mm/sparse.c
>>> index c184b69460b7..9ad741ccbeb6 100644
>>> --- a/mm/sparse.c
>>> +++ b/mm/sparse.c
>>> @@ -788,24 +788,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>>>  		depopulate_section_memmap(pfn, nr_pages, altmap);
>>>  }
>>>  
>>> -static struct page * __meminit section_activate(int nid, unsigned long pfn,
>>> -		unsigned long nr_pages, struct vmem_altmap *altmap)
>>> +/**
>>> + * fill_subsection_map - fill subsection map of a memory region
>>> + * @pfn - start pfn of the memory range
>>> + * @nr_pages - number of pfns to add in the region
>>> + *
>>> + * This clears the related subsection map inside one section, and only
>>> + * intended for hotplug.
>>> + *
>>> + * Return:
>>> + * * 0		- On success.
>>> + * * -EINVAL	- Invalid memory region.
>>> + * * -EEXIST	- Subsection map has been set.
>>> + */
>>> +static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
>>>  {
>>> -	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
>>>  	struct mem_section *ms = __pfn_to_section(pfn);
>>> -	struct mem_section_usage *usage = NULL;
>>> +	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
>>>  	unsigned long *subsection_map;
>>> -	struct page *memmap;
>>>  	int rc = 0;
>>>  
>>>  	subsection_mask_set(map, pfn, nr_pages);
>>>  
>>> -	if (!ms->usage) {
>>> -		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>>> -		if (!usage)
>>> -			return ERR_PTR(-ENOMEM);
>>> -		ms->usage = usage;
>>> -	}
>>>  	subsection_map = &ms->usage->subsection_map[0];
>>>  
>>>  	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
>>> @@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
>>>  		bitmap_or(subsection_map, map, subsection_map,
>>>  				SUBSECTIONS_PER_SECTION);
>>>  
>>> +	return rc;
>>> +}
>>> +
>>> +static struct page * __meminit section_activate(int nid, unsigned long pfn,
>>> +		unsigned long nr_pages, struct vmem_altmap *altmap)
>>> +{
>>> +	struct mem_section *ms = __pfn_to_section(pfn);
>>> +	struct mem_section_usage *usage = NULL;
>>> +	struct page *memmap;
>>> +	int rc = 0;
>>> +
>>> +	if (!ms->usage) {
>>> +		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>>> +		if (!usage)
>>> +			return ERR_PTR(-ENOMEM);
>>> +		ms->usage = usage;
>>> +	}
>>> +
>>> +	rc = fill_subsection_map(pfn, nr_pages);
>>>  	if (rc) {
>>>  		if (usage)
>>>  			ms->usage = NULL;
>>>
>>
>> What about having two variants of
>> section_activate()/section_deactivate() instead? Then we don't have any
>> subsection related stuff in !subsection code.
> 
> Thanks for looking into this, David.
> 
> Having two variants of section_activate()/section_deactivate() is also
> good. Just not like memmap handling which is very different between classic
> sparse and vmemmap, makes having two variants very attractive, the code
> and logic in section_activate()/section_deactivate() is not too much,
> and both of them basically can share the most of code, these make the
> variants way not so necessary. I personally prefer the current way, what
> do you think?

I was looking at

if (nr_pages < PAGES_PER_SECTION && early_section(ms))
	return pfn_to_page(pfn);

and thought that it is also specific to sub-section handling. I wonder
if we can simply move that into the VMEMMAP variant of
populate_section_memmap()?

But apart from that I agree that the end result with the current
approach is also nice.

Can you reshuffle the patches, moving the fixes to the very front so we
can backport more easily?
Baoquan He Feb. 12, 2020, 11:21 a.m. UTC | #7
On 02/11/20 at 03:44pm, David Hildenbrand wrote:
> On 11.02.20 13:46, Baoquan He wrote:
> > On 02/10/20 at 10:49am, David Hildenbrand wrote:
> >> On 09.02.20 11:48, Baoquan He wrote:
> >>> Wrap the codes filling subsection map in section_activate() into
> >>> fill_subsection_map(), this makes section_activate() cleaner and
> >>> easier to follow.
> >>>
> >>> Signed-off-by: Baoquan He <bhe@redhat.com>
> >>> ---
> >>>  mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
> >>>  1 file changed, 34 insertions(+), 11 deletions(-)
> >>>
> >>> diff --git a/mm/sparse.c b/mm/sparse.c
> >>> index c184b69460b7..9ad741ccbeb6 100644
> >>> --- a/mm/sparse.c
> >>> +++ b/mm/sparse.c
> >>> @@ -788,24 +788,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> >>>  		depopulate_section_memmap(pfn, nr_pages, altmap);
> >>>  }
> >>>  
> >>> -static struct page * __meminit section_activate(int nid, unsigned long pfn,
> >>> -		unsigned long nr_pages, struct vmem_altmap *altmap)
> >>> +/**
> >>> + * fill_subsection_map - fill subsection map of a memory region
> >>> + * @pfn - start pfn of the memory range
> >>> + * @nr_pages - number of pfns to add in the region
> >>> + *
> >>> + * This clears the related subsection map inside one section, and only
> >>> + * intended for hotplug.
> >>> + *
> >>> + * Return:
> >>> + * * 0		- On success.
> >>> + * * -EINVAL	- Invalid memory region.
> >>> + * * -EEXIST	- Subsection map has been set.
> >>> + */
> >>> +static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> >>>  {
> >>> -	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> >>>  	struct mem_section *ms = __pfn_to_section(pfn);
> >>> -	struct mem_section_usage *usage = NULL;
> >>> +	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> >>>  	unsigned long *subsection_map;
> >>> -	struct page *memmap;
> >>>  	int rc = 0;
> >>>  
> >>>  	subsection_mask_set(map, pfn, nr_pages);
> >>>  
> >>> -	if (!ms->usage) {
> >>> -		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> >>> -		if (!usage)
> >>> -			return ERR_PTR(-ENOMEM);
> >>> -		ms->usage = usage;
> >>> -	}
> >>>  	subsection_map = &ms->usage->subsection_map[0];
> >>>  
> >>>  	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
> >>> @@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
> >>>  		bitmap_or(subsection_map, map, subsection_map,
> >>>  				SUBSECTIONS_PER_SECTION);
> >>>  
> >>> +	return rc;
> >>> +}
> >>> +
> >>> +static struct page * __meminit section_activate(int nid, unsigned long pfn,
> >>> +		unsigned long nr_pages, struct vmem_altmap *altmap)
> >>> +{
> >>> +	struct mem_section *ms = __pfn_to_section(pfn);
> >>> +	struct mem_section_usage *usage = NULL;
> >>> +	struct page *memmap;
> >>> +	int rc = 0;
> >>> +
> >>> +	if (!ms->usage) {
> >>> +		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
> >>> +		if (!usage)
> >>> +			return ERR_PTR(-ENOMEM);
> >>> +		ms->usage = usage;
> >>> +	}
> >>> +
> >>> +	rc = fill_subsection_map(pfn, nr_pages);
> >>>  	if (rc) {
> >>>  		if (usage)
> >>>  			ms->usage = NULL;
> >>>
> >>
> >> What about having two variants of
> >> section_activate()/section_deactivate() instead? Then we don't have any
> >> subsection related stuff in !subsection code.
> > 
> > Thanks for looking into this, David.
> > 
> > Having two variants of section_activate()/section_deactivate() is also
> > good. Just not like memmap handling which is very different between classic
> > sparse and vmemmap, makes having two variants very attractive, the code
> > and logic in section_activate()/section_deactivate() is not too much,
> > and both of them basically can share the most of code, these make the
> > variants way not so necessary. I personally prefer the current way, what
> > do you think?
> 
> I was looking at
> 
> if (nr_pages < PAGES_PER_SECTION && early_section(ms))
> 	return pfn_to_page(pfn);
> 
> and thought that it is also specific to sub-section handling. I wonder
> if we can simply move that into the VMEMMAP variant of
> populate_section_memmap()?
> 
> But apart from that I agree that the end result with the current
> approach is also nice.
> 
> Can you reshuffle the patches, moving the fixes to the very front so we
> can backport more easily?

Sure, I will move it as the 1st one. Thanks.
diff mbox series

Patch

diff --git a/mm/sparse.c b/mm/sparse.c
index c184b69460b7..9ad741ccbeb6 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -788,24 +788,28 @@  static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 		depopulate_section_memmap(pfn, nr_pages, altmap);
 }
 
-static struct page * __meminit section_activate(int nid, unsigned long pfn,
-		unsigned long nr_pages, struct vmem_altmap *altmap)
+/**
+ * fill_subsection_map - fill subsection map of a memory region
+ * @pfn - start pfn of the memory range
+ * @nr_pages - number of pfns to add in the region
+ *
+ * This clears the related subsection map inside one section, and only
+ * intended for hotplug.
+ *
+ * Return:
+ * * 0		- On success.
+ * * -EINVAL	- Invalid memory region.
+ * * -EEXIST	- Subsection map has been set.
+ */
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
 {
-	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
 	struct mem_section *ms = __pfn_to_section(pfn);
-	struct mem_section_usage *usage = NULL;
+	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
 	unsigned long *subsection_map;
-	struct page *memmap;
 	int rc = 0;
 
 	subsection_mask_set(map, pfn, nr_pages);
 
-	if (!ms->usage) {
-		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
-		if (!usage)
-			return ERR_PTR(-ENOMEM);
-		ms->usage = usage;
-	}
 	subsection_map = &ms->usage->subsection_map[0];
 
 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
@@ -816,6 +820,25 @@  static struct page * __meminit section_activate(int nid, unsigned long pfn,
 		bitmap_or(subsection_map, map, subsection_map,
 				SUBSECTIONS_PER_SECTION);
 
+	return rc;
+}
+
+static struct page * __meminit section_activate(int nid, unsigned long pfn,
+		unsigned long nr_pages, struct vmem_altmap *altmap)
+{
+	struct mem_section *ms = __pfn_to_section(pfn);
+	struct mem_section_usage *usage = NULL;
+	struct page *memmap;
+	int rc = 0;
+
+	if (!ms->usage) {
+		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
+		if (!usage)
+			return ERR_PTR(-ENOMEM);
+		ms->usage = usage;
+	}
+
+	rc = fill_subsection_map(pfn, nr_pages);
 	if (rc) {
 		if (usage)
 			ms->usage = NULL;