diff mbox series

[v6,04/12] mm/hotplug: Prepare shrink_{zone, pgdat}_span for sub-section removal

Message ID 155552635609.2015392.6246305135559796835.stgit@dwillia2-desk3.amr.corp.intel.com (mailing list archive)
State Superseded
Headers show
Series mm: Sub-section memory hotplug support | expand

Commit Message

Dan Williams April 17, 2019, 6:39 p.m. UTC
Sub-section hotplug support reduces the unit of operation of hotplug
from section-sized-units (PAGES_PER_SECTION) to sub-section-sized units
(PAGES_PER_SUBSECTION). Teach shrink_{zone,pgdat}_span() to consider
PAGES_PER_SUBSECTION boundaries as the points where pfn_valid(), not
valid_section(), can toggle.

Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 include/linux/mmzone.h |    2 ++
 mm/memory_hotplug.c    |   16 ++++++++--------
 2 files changed, 10 insertions(+), 8 deletions(-)

Comments

Ralph Campbell April 19, 2019, 11:09 p.m. UTC | #1
Just noticed this by inspection.
I can't say I'm very familiar with the code.

On 4/17/19 11:39 AM, Dan Williams wrote:
> Sub-section hotplug support reduces the unit of operation of hotplug
> from section-sized-units (PAGES_PER_SECTION) to sub-section-sized units
> (PAGES_PER_SUBSECTION). Teach shrink_{zone,pgdat}_span() to consider
> PAGES_PER_SUBSECTION boundaries as the points where pfn_valid(), not
> valid_section(), can toggle.
> 
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Logan Gunthorpe <logang@deltatee.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
>   include/linux/mmzone.h |    2 ++
>   mm/memory_hotplug.c    |   16 ++++++++--------
>   2 files changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index cffde898e345..b13f0cddf75e 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1164,6 +1164,8 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
>   
>   #define SECTION_ACTIVE_SIZE ((1UL << SECTION_SIZE_BITS) / BITS_PER_LONG)
>   #define SECTION_ACTIVE_MASK (~(SECTION_ACTIVE_SIZE - 1))
> +#define PAGES_PER_SUB_SECTION (SECTION_ACTIVE_SIZE / PAGE_SIZE)
> +#define PAGE_SUB_SECTION_MASK (~(PAGES_PER_SUB_SECTION-1))
>   
>   struct mem_section_usage {
>   	/*
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 8b7415736d21..d5874f9d4043 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -327,10 +327,10 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
>   {
>   	struct mem_section *ms;
>   
> -	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
> +	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUB_SECTION) {
>   		ms = __pfn_to_section(start_pfn);
>   
> -		if (unlikely(!valid_section(ms)))
> +		if (unlikely(!pfn_valid(start_pfn)))
>   			continue;

Note that "struct mem_section *ms;" is now set but not used.
You can remove the definition and initialization of "ms".

>   		if (unlikely(pfn_to_nid(start_pfn) != nid))
> @@ -355,10 +355,10 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
>   
>   	/* pfn is the end pfn of a memory section. */
>   	pfn = end_pfn - 1;
> -	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
> +	for (; pfn >= start_pfn; pfn -= PAGES_PER_SUB_SECTION) {
>   		ms = __pfn_to_section(pfn);
>   
> -		if (unlikely(!valid_section(ms)))
> +		if (unlikely(!pfn_valid(pfn)))
>   			continue;

Ditto about "ms".

>   		if (unlikely(pfn_to_nid(pfn) != nid))
> @@ -417,10 +417,10 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
>   	 * it check the zone has only hole or not.
>   	 */
>   	pfn = zone_start_pfn;
> -	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
> +	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
>   		ms = __pfn_to_section(pfn);
>   
> -		if (unlikely(!valid_section(ms)))
> +		if (unlikely(!pfn_valid(pfn)))
>   			continue;

Ditto about "ms".

>   		if (page_zone(pfn_to_page(pfn)) != zone)
> @@ -485,10 +485,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
>   	 * has only hole or not.
>   	 */
>   	pfn = pgdat_start_pfn;
> -	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
> +	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
>   		ms = __pfn_to_section(pfn);
>   
> -		if (unlikely(!valid_section(ms)))
> +		if (unlikely(!pfn_valid(pfn)))
>   			continue;

Ditto about "ms".

>   		if (pfn_to_nid(pfn) != nid)
>
Dan Williams April 19, 2019, 11:13 p.m. UTC | #2
On Fri, Apr 19, 2019 at 4:09 PM Ralph Campbell <rcampbell@nvidia.com> wrote:
[..]
> > diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> > index 8b7415736d21..d5874f9d4043 100644
> > --- a/mm/memory_hotplug.c
> > +++ b/mm/memory_hotplug.c
> > @@ -327,10 +327,10 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
> >   {
> >       struct mem_section *ms;
> >
> > -     for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
> > +     for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUB_SECTION) {
> >               ms = __pfn_to_section(start_pfn);
> >
> > -             if (unlikely(!valid_section(ms)))
> > +             if (unlikely(!pfn_valid(start_pfn)))
> >                       continue;
>
> Note that "struct mem_section *ms;" is now set but not used.
> You can remove the definition and initialization of "ms".

Good eye, yes, will clean up.
Oscar Salvador April 26, 2019, 1:59 p.m. UTC | #3
On Wed, Apr 17, 2019 at 11:39:16AM -0700, Dan Williams wrote:
> @@ -417,10 +417,10 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
>  	 * it check the zone has only hole or not.
>  	 */
>  	pfn = zone_start_pfn;
> -	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
> +	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
>  		ms = __pfn_to_section(pfn);
>  
> -		if (unlikely(!valid_section(ms)))
> +		if (unlikely(!pfn_valid(pfn)))
>  			continue;
>  
>  		if (page_zone(pfn_to_page(pfn)) != zone)
> @@ -485,10 +485,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
>  	 * has only hole or not.
>  	 */
>  	pfn = pgdat_start_pfn;
> -	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
> +	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
>  		ms = __pfn_to_section(pfn);
>  
> -		if (unlikely(!valid_section(ms)))
> +		if (unlikely(!pfn_valid(pfn)))
>  			continue;
>  
>  		if (pfn_to_nid(pfn) != nid)

The last loop from shrink_{pgdat,zone}_span can be reworked to unify both
in one function, and both functions can be factored out a bit.
Actually, I do have a patch that does that, I might dig it up.

The rest looks good:

Reviewed-by: Oscar Salvador <osalvador@suse.de>

>
Oscar Salvador April 26, 2019, 2 p.m. UTC | #4
On Fri, Apr 26, 2019 at 03:59:12PM +0200, Oscar Salvador wrote:
> On Wed, Apr 17, 2019 at 11:39:16AM -0700, Dan Williams wrote:
> > @@ -417,10 +417,10 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
> >  	 * it check the zone has only hole or not.
> >  	 */
> >  	pfn = zone_start_pfn;
> > -	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
> > +	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
> >  		ms = __pfn_to_section(pfn);
> >  
> > -		if (unlikely(!valid_section(ms)))
> > +		if (unlikely(!pfn_valid(pfn)))
> >  			continue;
> >  
> >  		if (page_zone(pfn_to_page(pfn)) != zone)
> > @@ -485,10 +485,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
> >  	 * has only hole or not.
> >  	 */
> >  	pfn = pgdat_start_pfn;
> > -	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
> > +	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
> >  		ms = __pfn_to_section(pfn);
> >  
> > -		if (unlikely(!valid_section(ms)))
> > +		if (unlikely(!pfn_valid(pfn)))
> >  			continue;
> >  
> >  		if (pfn_to_nid(pfn) != nid)
> 
> The last loop from shrink_{pgdat,zone}_span can be reworked to unify both
> in one function, and both functions can be factored out a bit.
> Actually, I do have a patch that does that, I might dig it up.
> 
> The rest looks good:
> 
> Reviewed-by: Oscar Salvador <osalvador@suse.de>

I mean of course besides Ralph's comment.
Pasha Tatashin May 2, 2019, 7:18 p.m. UTC | #5
On Wed, Apr 17, 2019 at 2:53 PM Dan Williams <dan.j.williams@intel.com> wrote:
>
> Sub-section hotplug support reduces the unit of operation of hotplug
> from section-sized-units (PAGES_PER_SECTION) to sub-section-sized units
> (PAGES_PER_SUBSECTION). Teach shrink_{zone,pgdat}_span() to consider
> PAGES_PER_SUBSECTION boundaries as the points where pfn_valid(), not
> valid_section(), can toggle.
>
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Logan Gunthorpe <logang@deltatee.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
>  include/linux/mmzone.h |    2 ++
>  mm/memory_hotplug.c    |   16 ++++++++--------
>  2 files changed, 10 insertions(+), 8 deletions(-)

given removing all unused "*ms"

Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cffde898e345..b13f0cddf75e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1164,6 +1164,8 @@  static inline unsigned long section_nr_to_pfn(unsigned long sec)
 
 #define SECTION_ACTIVE_SIZE ((1UL << SECTION_SIZE_BITS) / BITS_PER_LONG)
 #define SECTION_ACTIVE_MASK (~(SECTION_ACTIVE_SIZE - 1))
+#define PAGES_PER_SUB_SECTION (SECTION_ACTIVE_SIZE / PAGE_SIZE)
+#define PAGE_SUB_SECTION_MASK (~(PAGES_PER_SUB_SECTION-1))
 
 struct mem_section_usage {
 	/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 8b7415736d21..d5874f9d4043 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -327,10 +327,10 @@  static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
 {
 	struct mem_section *ms;
 
-	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
+	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUB_SECTION) {
 		ms = __pfn_to_section(start_pfn);
 
-		if (unlikely(!valid_section(ms)))
+		if (unlikely(!pfn_valid(start_pfn)))
 			continue;
 
 		if (unlikely(pfn_to_nid(start_pfn) != nid))
@@ -355,10 +355,10 @@  static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 
 	/* pfn is the end pfn of a memory section. */
 	pfn = end_pfn - 1;
-	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
+	for (; pfn >= start_pfn; pfn -= PAGES_PER_SUB_SECTION) {
 		ms = __pfn_to_section(pfn);
 
-		if (unlikely(!valid_section(ms)))
+		if (unlikely(!pfn_valid(pfn)))
 			continue;
 
 		if (unlikely(pfn_to_nid(pfn) != nid))
@@ -417,10 +417,10 @@  static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 	 * it check the zone has only hole or not.
 	 */
 	pfn = zone_start_pfn;
-	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
+	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
 		ms = __pfn_to_section(pfn);
 
-		if (unlikely(!valid_section(ms)))
+		if (unlikely(!pfn_valid(pfn)))
 			continue;
 
 		if (page_zone(pfn_to_page(pfn)) != zone)
@@ -485,10 +485,10 @@  static void shrink_pgdat_span(struct pglist_data *pgdat,
 	 * has only hole or not.
 	 */
 	pfn = pgdat_start_pfn;
-	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
+	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
 		ms = __pfn_to_section(pfn);
 
-		if (unlikely(!valid_section(ms)))
+		if (unlikely(!pfn_valid(pfn)))
 			continue;
 
 		if (pfn_to_nid(pfn) != nid)