diff mbox series

[v9,02/12] mm/sparsemem: Add helpers track active portions of a section at boot

Message ID 155977187919.2443951.8925592545929008845.stgit@dwillia2-desk3.amr.corp.intel.com (mailing list archive)
State Superseded
Headers show
Series mm: Sub-section memory hotplug support | expand

Commit Message

Dan Williams June 5, 2019, 9:57 p.m. UTC
Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
sub-section active bitmask, each bit representing a PMD_SIZE span of the
architecture's memory hotplug section size.

The implications of a partially populated section is that pfn_valid()
needs to go beyond a valid_section() check and read the sub-section
active ranges from the bitmask. The expectation is that the bitmask
(subsection_map) fits in the same cacheline as the valid_section() data,
so the incremental performance overhead to pfn_valid() should be
negligible.

Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Tested-by: Jane Chu <jane.chu@oracle.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 include/linux/mmzone.h |   29 ++++++++++++++++++++++++++++-
 mm/page_alloc.c        |    4 +++-
 mm/sparse.c            |   35 +++++++++++++++++++++++++++++++++++
 3 files changed, 66 insertions(+), 2 deletions(-)

Comments

Oscar Salvador June 6, 2019, 4:55 p.m. UTC | #1
On Wed, Jun 05, 2019 at 02:57:59PM -0700, Dan Williams wrote:
> Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
> sub-section active bitmask, each bit representing a PMD_SIZE span of the
> architecture's memory hotplug section size.
> 
> The implications of a partially populated section is that pfn_valid()
> needs to go beyond a valid_section() check and read the sub-section
> active ranges from the bitmask. The expectation is that the bitmask
> (subsection_map) fits in the same cacheline as the valid_section() data,
> so the incremental performance overhead to pfn_valid() should be
> negligible.
> 
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Logan Gunthorpe <logang@deltatee.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
> Tested-by: Jane Chu <jane.chu@oracle.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Reviewed-by: Oscar Salvador <osalvador@suse.de>
Wei Yang June 17, 2019, 10:21 p.m. UTC | #2
On Wed, Jun 05, 2019 at 02:57:59PM -0700, Dan Williams wrote:
>Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
>sub-section active bitmask, each bit representing a PMD_SIZE span of the
>architecture's memory hotplug section size.
>
>The implications of a partially populated section is that pfn_valid()
>needs to go beyond a valid_section() check and read the sub-section
>active ranges from the bitmask. The expectation is that the bitmask
>(subsection_map) fits in the same cacheline as the valid_section() data,
>so the incremental performance overhead to pfn_valid() should be
>negligible.
>
>Cc: Michal Hocko <mhocko@suse.com>
>Cc: Vlastimil Babka <vbabka@suse.cz>
>Cc: Logan Gunthorpe <logang@deltatee.com>
>Cc: Oscar Salvador <osalvador@suse.de>
>Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
>Tested-by: Jane Chu <jane.chu@oracle.com>
>Signed-off-by: Dan Williams <dan.j.williams@intel.com>
>---
> include/linux/mmzone.h |   29 ++++++++++++++++++++++++++++-
> mm/page_alloc.c        |    4 +++-
> mm/sparse.c            |   35 +++++++++++++++++++++++++++++++++++
> 3 files changed, 66 insertions(+), 2 deletions(-)
>
>diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>index ac163f2f274f..6dd52d544857 100644
>--- a/include/linux/mmzone.h
>+++ b/include/linux/mmzone.h
>@@ -1199,6 +1199,8 @@ struct mem_section_usage {
> 	unsigned long pageblock_flags[0];
> };
> 
>+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
>+
> struct page;
> struct page_ext;
> struct mem_section {
>@@ -1336,12 +1338,36 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
> 
> extern int __highest_present_section_nr;
> 
>+static inline int subsection_map_index(unsigned long pfn)
>+{
>+	return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
>+}
>+
>+#ifdef CONFIG_SPARSEMEM_VMEMMAP
>+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
>+{
>+	int idx = subsection_map_index(pfn);
>+
>+	return test_bit(idx, ms->usage->subsection_map);
>+}
>+#else
>+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
>+{
>+	return 1;
>+}
>+#endif
>+
> #ifndef CONFIG_HAVE_ARCH_PFN_VALID
> static inline int pfn_valid(unsigned long pfn)
> {
>+	struct mem_section *ms;
>+
> 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
> 		return 0;
>-	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
>+	ms = __nr_to_section(pfn_to_section_nr(pfn));
>+	if (!valid_section(ms))
>+		return 0;
>+	return pfn_section_valid(ms, pfn);
> }
> #endif
> 
>@@ -1373,6 +1399,7 @@ void sparse_init(void);
> #define sparse_init()	do {} while (0)
> #define sparse_index_init(_sec, _nid)  do {} while (0)
> #define pfn_present pfn_valid
>+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
> #endif /* CONFIG_SPARSEMEM */
> 
> /*
>diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>index c6d8224d792e..bd773efe5b82 100644
>--- a/mm/page_alloc.c
>+++ b/mm/page_alloc.c
>@@ -7292,10 +7292,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
> 
> 	/* Print out the early node map */
> 	pr_info("Early memory node ranges\n");
>-	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
>+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
> 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
> 			(u64)start_pfn << PAGE_SHIFT,
> 			((u64)end_pfn << PAGE_SHIFT) - 1);
>+		subsection_map_init(start_pfn, end_pfn - start_pfn);
>+	}

Just curious about why we set subsection here?

Function free_area_init_nodes() mostly handles pgdat, if I am correct. Setup
subsection here looks like touching some lower level system data structure.

> 
> 	/* Initialise every node */
> 	mminit_verify_pageflags_layout();
>diff --git a/mm/sparse.c b/mm/sparse.c
>index 71da15cc7432..0baa2e55cfdd 100644
>--- a/mm/sparse.c
>+++ b/mm/sparse.c
>@@ -210,6 +210,41 @@ static inline unsigned long first_present_section_nr(void)
> 	return next_present_section_nr(-1);
> }
> 
>+void subsection_mask_set(unsigned long *map, unsigned long pfn,
>+		unsigned long nr_pages)
>+{
>+	int idx = subsection_map_index(pfn);
>+	int end = subsection_map_index(pfn + nr_pages - 1);
>+
>+	bitmap_set(map, idx, end - idx + 1);
>+}
>+
>+void subsection_map_init(unsigned long pfn, unsigned long nr_pages)
>+{
>+	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
>+	int i, start_sec = pfn_to_section_nr(pfn);
>+
>+	if (!nr_pages)
>+		return;
>+
>+	for (i = start_sec; i <= end_sec; i++) {
>+		struct mem_section *ms;
>+		unsigned long pfns;
>+
>+		pfns = min(nr_pages, PAGES_PER_SECTION
>+				- (pfn & ~PAGE_SECTION_MASK));
>+		ms = __nr_to_section(i);
>+		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
>+
>+		pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
>+				pfns, subsection_map_index(pfn),
>+				subsection_map_index(pfn + pfns - 1));
>+
>+		pfn += pfns;
>+		nr_pages -= pfns;
>+	}
>+}
>+
> /* Record a memory area against a node. */
> void __init memory_present(int nid, unsigned long start, unsigned long end)
> {
Dan Williams June 17, 2019, 10:32 p.m. UTC | #3
On Mon, Jun 17, 2019 at 3:22 PM Wei Yang <richard.weiyang@gmail.com> wrote:
>
> On Wed, Jun 05, 2019 at 02:57:59PM -0700, Dan Williams wrote:
> >Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
> >sub-section active bitmask, each bit representing a PMD_SIZE span of the
> >architecture's memory hotplug section size.
> >
> >The implications of a partially populated section is that pfn_valid()
> >needs to go beyond a valid_section() check and read the sub-section
> >active ranges from the bitmask. The expectation is that the bitmask
> >(subsection_map) fits in the same cacheline as the valid_section() data,
> >so the incremental performance overhead to pfn_valid() should be
> >negligible.
> >
> >Cc: Michal Hocko <mhocko@suse.com>
> >Cc: Vlastimil Babka <vbabka@suse.cz>
> >Cc: Logan Gunthorpe <logang@deltatee.com>
> >Cc: Oscar Salvador <osalvador@suse.de>
> >Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
> >Tested-by: Jane Chu <jane.chu@oracle.com>
> >Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> >---
> > include/linux/mmzone.h |   29 ++++++++++++++++++++++++++++-
> > mm/page_alloc.c        |    4 +++-
> > mm/sparse.c            |   35 +++++++++++++++++++++++++++++++++++
> > 3 files changed, 66 insertions(+), 2 deletions(-)
> >
> >diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> >index ac163f2f274f..6dd52d544857 100644
> >--- a/include/linux/mmzone.h
> >+++ b/include/linux/mmzone.h
> >@@ -1199,6 +1199,8 @@ struct mem_section_usage {
> >       unsigned long pageblock_flags[0];
> > };
> >
> >+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
> >+
> > struct page;
> > struct page_ext;
> > struct mem_section {
> >@@ -1336,12 +1338,36 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
> >
> > extern int __highest_present_section_nr;
> >
> >+static inline int subsection_map_index(unsigned long pfn)
> >+{
> >+      return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
> >+}
> >+
> >+#ifdef CONFIG_SPARSEMEM_VMEMMAP
> >+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
> >+{
> >+      int idx = subsection_map_index(pfn);
> >+
> >+      return test_bit(idx, ms->usage->subsection_map);
> >+}
> >+#else
> >+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
> >+{
> >+      return 1;
> >+}
> >+#endif
> >+
> > #ifndef CONFIG_HAVE_ARCH_PFN_VALID
> > static inline int pfn_valid(unsigned long pfn)
> > {
> >+      struct mem_section *ms;
> >+
> >       if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
> >               return 0;
> >-      return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
> >+      ms = __nr_to_section(pfn_to_section_nr(pfn));
> >+      if (!valid_section(ms))
> >+              return 0;
> >+      return pfn_section_valid(ms, pfn);
> > }
> > #endif
> >
> >@@ -1373,6 +1399,7 @@ void sparse_init(void);
> > #define sparse_init() do {} while (0)
> > #define sparse_index_init(_sec, _nid)  do {} while (0)
> > #define pfn_present pfn_valid
> >+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
> > #endif /* CONFIG_SPARSEMEM */
> >
> > /*
> >diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> >index c6d8224d792e..bd773efe5b82 100644
> >--- a/mm/page_alloc.c
> >+++ b/mm/page_alloc.c
> >@@ -7292,10 +7292,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
> >
> >       /* Print out the early node map */
> >       pr_info("Early memory node ranges\n");
> >-      for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
> >+      for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
> >               pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
> >                       (u64)start_pfn << PAGE_SHIFT,
> >                       ((u64)end_pfn << PAGE_SHIFT) - 1);
> >+              subsection_map_init(start_pfn, end_pfn - start_pfn);
> >+      }
>
> Just curious about why we set subsection here?
>
> Function free_area_init_nodes() mostly handles pgdat, if I am correct. Setup
> subsection here looks like touching some lower level system data structure.

Correct, I'm not sure how it ended up there, but it was the source of
a bug that was fixed with this change:

https://lore.kernel.org/lkml/CAPcyv4hjvBPDYKpp2Gns3-cc2AQ0AVS1nLk-K3fwXeRUvvzQLg@mail.gmail.com/
Wei Yang June 18, 2019, 1:03 a.m. UTC | #4
On Mon, Jun 17, 2019 at 03:32:45PM -0700, Dan Williams wrote:
>On Mon, Jun 17, 2019 at 3:22 PM Wei Yang <richard.weiyang@gmail.com> wrote:
>>
>> On Wed, Jun 05, 2019 at 02:57:59PM -0700, Dan Williams wrote:
>> >Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
>> >sub-section active bitmask, each bit representing a PMD_SIZE span of the
>> >architecture's memory hotplug section size.
>> >
>> >The implications of a partially populated section is that pfn_valid()
>> >needs to go beyond a valid_section() check and read the sub-section
>> >active ranges from the bitmask. The expectation is that the bitmask
>> >(subsection_map) fits in the same cacheline as the valid_section() data,
>> >so the incremental performance overhead to pfn_valid() should be
>> >negligible.
>> >
>> >Cc: Michal Hocko <mhocko@suse.com>
>> >Cc: Vlastimil Babka <vbabka@suse.cz>
>> >Cc: Logan Gunthorpe <logang@deltatee.com>
>> >Cc: Oscar Salvador <osalvador@suse.de>
>> >Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
>> >Tested-by: Jane Chu <jane.chu@oracle.com>
>> >Signed-off-by: Dan Williams <dan.j.williams@intel.com>
>> >---
>> > include/linux/mmzone.h |   29 ++++++++++++++++++++++++++++-
>> > mm/page_alloc.c        |    4 +++-
>> > mm/sparse.c            |   35 +++++++++++++++++++++++++++++++++++
>> > 3 files changed, 66 insertions(+), 2 deletions(-)
>> >
>> >diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>> >index ac163f2f274f..6dd52d544857 100644
>> >--- a/include/linux/mmzone.h
>> >+++ b/include/linux/mmzone.h
>> >@@ -1199,6 +1199,8 @@ struct mem_section_usage {
>> >       unsigned long pageblock_flags[0];
>> > };
>> >
>> >+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
>> >+
>> > struct page;
>> > struct page_ext;
>> > struct mem_section {
>> >@@ -1336,12 +1338,36 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
>> >
>> > extern int __highest_present_section_nr;
>> >
>> >+static inline int subsection_map_index(unsigned long pfn)
>> >+{
>> >+      return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
>> >+}
>> >+
>> >+#ifdef CONFIG_SPARSEMEM_VMEMMAP
>> >+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
>> >+{
>> >+      int idx = subsection_map_index(pfn);
>> >+
>> >+      return test_bit(idx, ms->usage->subsection_map);
>> >+}
>> >+#else
>> >+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
>> >+{
>> >+      return 1;
>> >+}
>> >+#endif
>> >+
>> > #ifndef CONFIG_HAVE_ARCH_PFN_VALID
>> > static inline int pfn_valid(unsigned long pfn)
>> > {
>> >+      struct mem_section *ms;
>> >+
>> >       if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
>> >               return 0;
>> >-      return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
>> >+      ms = __nr_to_section(pfn_to_section_nr(pfn));
>> >+      if (!valid_section(ms))
>> >+              return 0;
>> >+      return pfn_section_valid(ms, pfn);
>> > }
>> > #endif
>> >
>> >@@ -1373,6 +1399,7 @@ void sparse_init(void);
>> > #define sparse_init() do {} while (0)
>> > #define sparse_index_init(_sec, _nid)  do {} while (0)
>> > #define pfn_present pfn_valid
>> >+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
>> > #endif /* CONFIG_SPARSEMEM */
>> >
>> > /*
>> >diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> >index c6d8224d792e..bd773efe5b82 100644
>> >--- a/mm/page_alloc.c
>> >+++ b/mm/page_alloc.c
>> >@@ -7292,10 +7292,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
>> >
>> >       /* Print out the early node map */
>> >       pr_info("Early memory node ranges\n");
>> >-      for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
>> >+      for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
>> >               pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
>> >                       (u64)start_pfn << PAGE_SHIFT,
>> >                       ((u64)end_pfn << PAGE_SHIFT) - 1);
>> >+              subsection_map_init(start_pfn, end_pfn - start_pfn);
>> >+      }
>>
>> Just curious about why we set subsection here?
>>
>> Function free_area_init_nodes() mostly handles pgdat, if I am correct. Setup
>> subsection here looks like touching some lower level system data structure.
>
>Correct, I'm not sure how it ended up there, but it was the source of
>a bug that was fixed with this change:
>
>https://lore.kernel.org/lkml/CAPcyv4hjvBPDYKpp2Gns3-cc2AQ0AVS1nLk-K3fwXeRUvvzQLg@mail.gmail.com/

So this one is moved to sparse_init_nid().

The bug is strange, while the code now is more reasonable to me.

Thanks :-)

>_______________________________________________
>Linux-nvdimm mailing list
>Linux-nvdimm@lists.01.org
>https://lists.01.org/mailman/listinfo/linux-nvdimm
Dan Williams June 19, 2019, 3:15 a.m. UTC | #5
On Mon, Jun 17, 2019 at 3:32 PM Dan Williams <dan.j.williams@intel.com> wrote:
>
> On Mon, Jun 17, 2019 at 3:22 PM Wei Yang <richard.weiyang@gmail.com> wrote:
> >
> > On Wed, Jun 05, 2019 at 02:57:59PM -0700, Dan Williams wrote:
> > >Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
> > >sub-section active bitmask, each bit representing a PMD_SIZE span of the
> > >architecture's memory hotplug section size.
> > >
> > >The implications of a partially populated section is that pfn_valid()
> > >needs to go beyond a valid_section() check and read the sub-section
> > >active ranges from the bitmask. The expectation is that the bitmask
> > >(subsection_map) fits in the same cacheline as the valid_section() data,
> > >so the incremental performance overhead to pfn_valid() should be
> > >negligible.
> > >
> > >Cc: Michal Hocko <mhocko@suse.com>
> > >Cc: Vlastimil Babka <vbabka@suse.cz>
> > >Cc: Logan Gunthorpe <logang@deltatee.com>
> > >Cc: Oscar Salvador <osalvador@suse.de>
> > >Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
> > >Tested-by: Jane Chu <jane.chu@oracle.com>
> > >Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> > >---
> > > include/linux/mmzone.h |   29 ++++++++++++++++++++++++++++-
> > > mm/page_alloc.c        |    4 +++-
> > > mm/sparse.c            |   35 +++++++++++++++++++++++++++++++++++
> > > 3 files changed, 66 insertions(+), 2 deletions(-)
> > >
> > >diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> > >index ac163f2f274f..6dd52d544857 100644
> > >--- a/include/linux/mmzone.h
> > >+++ b/include/linux/mmzone.h
> > >@@ -1199,6 +1199,8 @@ struct mem_section_usage {
> > >       unsigned long pageblock_flags[0];
> > > };
> > >
> > >+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
> > >+
> > > struct page;
> > > struct page_ext;
> > > struct mem_section {
> > >@@ -1336,12 +1338,36 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
> > >
> > > extern int __highest_present_section_nr;
> > >
> > >+static inline int subsection_map_index(unsigned long pfn)
> > >+{
> > >+      return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
> > >+}
> > >+
> > >+#ifdef CONFIG_SPARSEMEM_VMEMMAP
> > >+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
> > >+{
> > >+      int idx = subsection_map_index(pfn);
> > >+
> > >+      return test_bit(idx, ms->usage->subsection_map);
> > >+}
> > >+#else
> > >+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
> > >+{
> > >+      return 1;
> > >+}
> > >+#endif
> > >+
> > > #ifndef CONFIG_HAVE_ARCH_PFN_VALID
> > > static inline int pfn_valid(unsigned long pfn)
> > > {
> > >+      struct mem_section *ms;
> > >+
> > >       if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
> > >               return 0;
> > >-      return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
> > >+      ms = __nr_to_section(pfn_to_section_nr(pfn));
> > >+      if (!valid_section(ms))
> > >+              return 0;
> > >+      return pfn_section_valid(ms, pfn);
> > > }
> > > #endif
> > >
> > >@@ -1373,6 +1399,7 @@ void sparse_init(void);
> > > #define sparse_init() do {} while (0)
> > > #define sparse_index_init(_sec, _nid)  do {} while (0)
> > > #define pfn_present pfn_valid
> > >+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
> > > #endif /* CONFIG_SPARSEMEM */
> > >
> > > /*
> > >diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > >index c6d8224d792e..bd773efe5b82 100644
> > >--- a/mm/page_alloc.c
> > >+++ b/mm/page_alloc.c
> > >@@ -7292,10 +7292,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
> > >
> > >       /* Print out the early node map */
> > >       pr_info("Early memory node ranges\n");
> > >-      for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
> > >+      for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
> > >               pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
> > >                       (u64)start_pfn << PAGE_SHIFT,
> > >                       ((u64)end_pfn << PAGE_SHIFT) - 1);
> > >+              subsection_map_init(start_pfn, end_pfn - start_pfn);
> > >+      }
> >
> > Just curious about why we set subsection here?
> >
> > Function free_area_init_nodes() mostly handles pgdat, if I am correct. Setup
> > subsection here looks like touching some lower level system data structure.
>
> Correct, I'm not sure how it ended up there, but it was the source of
> a bug that was fixed with this change:
>
> https://lore.kernel.org/lkml/CAPcyv4hjvBPDYKpp2Gns3-cc2AQ0AVS1nLk-K3fwXeRUvvzQLg@mail.gmail.com/

On second thought I'm going to keep subsection_map_init() in
free_area_init_nodes(), but instead teach pfn_valid() to return true
for all "early" sections. There are code paths that use pfn_valid() as
a coarse check before validating against pgdat for real validity of
online memory. It is sufficient and safe for those to assume that all
early sections are fully pfn_valid, while ZONE_DEVICE hotplug can see
the more precise subsection_map.
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ac163f2f274f..6dd52d544857 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1199,6 +1199,8 @@  struct mem_section_usage {
 	unsigned long pageblock_flags[0];
 };
 
+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
+
 struct page;
 struct page_ext;
 struct mem_section {
@@ -1336,12 +1338,36 @@  static inline struct mem_section *__pfn_to_section(unsigned long pfn)
 
 extern int __highest_present_section_nr;
 
+static inline int subsection_map_index(unsigned long pfn)
+{
+	return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+	int idx = subsection_map_index(pfn);
+
+	return test_bit(idx, ms->usage->subsection_map);
+}
+#else
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+	return 1;
+}
+#endif
+
 #ifndef CONFIG_HAVE_ARCH_PFN_VALID
 static inline int pfn_valid(unsigned long pfn)
 {
+	struct mem_section *ms;
+
 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
 		return 0;
-	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
+	ms = __nr_to_section(pfn_to_section_nr(pfn));
+	if (!valid_section(ms))
+		return 0;
+	return pfn_section_valid(ms, pfn);
 }
 #endif
 
@@ -1373,6 +1399,7 @@  void sparse_init(void);
 #define sparse_init()	do {} while (0)
 #define sparse_index_init(_sec, _nid)  do {} while (0)
 #define pfn_present pfn_valid
+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
 #endif /* CONFIG_SPARSEMEM */
 
 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c6d8224d792e..bd773efe5b82 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7292,10 +7292,12 @@  void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 
 	/* Print out the early node map */
 	pr_info("Early memory node ranges\n");
-	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
 			(u64)start_pfn << PAGE_SHIFT,
 			((u64)end_pfn << PAGE_SHIFT) - 1);
+		subsection_map_init(start_pfn, end_pfn - start_pfn);
+	}
 
 	/* Initialise every node */
 	mminit_verify_pageflags_layout();
diff --git a/mm/sparse.c b/mm/sparse.c
index 71da15cc7432..0baa2e55cfdd 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -210,6 +210,41 @@  static inline unsigned long first_present_section_nr(void)
 	return next_present_section_nr(-1);
 }
 
+void subsection_mask_set(unsigned long *map, unsigned long pfn,
+		unsigned long nr_pages)
+{
+	int idx = subsection_map_index(pfn);
+	int end = subsection_map_index(pfn + nr_pages - 1);
+
+	bitmap_set(map, idx, end - idx + 1);
+}
+
+void subsection_map_init(unsigned long pfn, unsigned long nr_pages)
+{
+	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
+	int i, start_sec = pfn_to_section_nr(pfn);
+
+	if (!nr_pages)
+		return;
+
+	for (i = start_sec; i <= end_sec; i++) {
+		struct mem_section *ms;
+		unsigned long pfns;
+
+		pfns = min(nr_pages, PAGES_PER_SECTION
+				- (pfn & ~PAGE_SECTION_MASK));
+		ms = __nr_to_section(i);
+		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
+
+		pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
+				pfns, subsection_map_index(pfn),
+				subsection_map_index(pfn + pfns - 1));
+
+		pfn += pfns;
+		nr_pages -= pfns;
+	}
+}
+
 /* Record a memory area against a node. */
 void __init memory_present(int nid, unsigned long start, unsigned long end)
 {