diff mbox series

[v2,2/7] mm/sparse.c: introduce new function fill_subsection_map()

Message ID 20200220043316.19668-3-bhe@redhat.com (mailing list archive)
State New, archived
Headers show
Series mm/hotplug: Only use subsection map in VMEMMAP case | expand

Commit Message

Baoquan He Feb. 20, 2020, 4:33 a.m. UTC
Wrap the codes filling subsection map from section_activate() into
fill_subsection_map(), this makes section_activate() cleaner and
easier to follow.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
 1 file changed, 34 insertions(+), 11 deletions(-)

Comments

Wei Yang Feb. 20, 2020, 6:14 a.m. UTC | #1
On Thu, Feb 20, 2020 at 12:33:11PM +0800, Baoquan He wrote:
>Wrap the codes filling subsection map from section_activate() into
>fill_subsection_map(), this makes section_activate() cleaner and
>easier to follow.
>
>Signed-off-by: Baoquan He <bhe@redhat.com>

Reviewed-by: Wei Yang <richardw.yang@linux.intel.com>

>---
> mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 34 insertions(+), 11 deletions(-)
>
>diff --git a/mm/sparse.c b/mm/sparse.c
>index b8e52c8fed7f..977b47acd38d 100644
>--- a/mm/sparse.c
>+++ b/mm/sparse.c
>@@ -790,24 +790,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> 		ms->section_mem_map = (unsigned long)NULL;
> }
> 
>-static struct page * __meminit section_activate(int nid, unsigned long pfn,
>-		unsigned long nr_pages, struct vmem_altmap *altmap)
>+/**
>+ * fill_subsection_map - fill subsection map of a memory region
>+ * @pfn - start pfn of the memory range
>+ * @nr_pages - number of pfns to add in the region
>+ *
>+ * This fills the related subsection map inside one section, and only
>+ * intended for hotplug.
>+ *
>+ * Return:
>+ * * 0		- On success.
>+ * * -EINVAL	- Invalid memory region.
>+ * * -EEXIST	- Subsection map has been set.
>+ */
>+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> {
>-	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> 	struct mem_section *ms = __pfn_to_section(pfn);
>-	struct mem_section_usage *usage = NULL;
>+	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> 	unsigned long *subsection_map;
>-	struct page *memmap;
> 	int rc = 0;
> 
> 	subsection_mask_set(map, pfn, nr_pages);
> 
>-	if (!ms->usage) {
>-		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>-		if (!usage)
>-			return ERR_PTR(-ENOMEM);
>-		ms->usage = usage;
>-	}
> 	subsection_map = &ms->usage->subsection_map[0];
> 
> 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
>@@ -818,6 +822,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
> 		bitmap_or(subsection_map, map, subsection_map,
> 				SUBSECTIONS_PER_SECTION);
> 
>+	return rc;
>+}
>+
>+static struct page * __meminit section_activate(int nid, unsigned long pfn,
>+		unsigned long nr_pages, struct vmem_altmap *altmap)
>+{
>+	struct mem_section *ms = __pfn_to_section(pfn);
>+	struct mem_section_usage *usage = NULL;
>+	struct page *memmap;
>+	int rc = 0;
>+
>+	if (!ms->usage) {
>+		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>+		if (!usage)
>+			return ERR_PTR(-ENOMEM);
>+		ms->usage = usage;
>+	}
>+
>+	rc = fill_subsection_map(pfn, nr_pages);
> 	if (rc) {
> 		if (usage)
> 			ms->usage = NULL;
>-- 
>2.17.2
David Hildenbrand Feb. 28, 2020, 2:27 p.m. UTC | #2
On 20.02.20 05:33, Baoquan He wrote:
> Wrap the codes filling subsection map from section_activate() into

"Factor out the code that fills the subsection" ...

> fill_subsection_map(), this makes section_activate() cleaner and
> easier to follow.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 34 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/sparse.c b/mm/sparse.c
> index b8e52c8fed7f..977b47acd38d 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -790,24 +790,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
>  		ms->section_mem_map = (unsigned long)NULL;
>  }
>  
> -static struct page * __meminit section_activate(int nid, unsigned long pfn,
> -		unsigned long nr_pages, struct vmem_altmap *altmap)
> +/**
> + * fill_subsection_map - fill subsection map of a memory region
> + * @pfn - start pfn of the memory range
> + * @nr_pages - number of pfns to add in the region
> + *
> + * This fills the related subsection map inside one section, and only
> + * intended for hotplug.
> + *
> + * Return:
> + * * 0		- On success.
> + * * -EINVAL	- Invalid memory region.
> + * * -EEXIST	- Subsection map has been set.
> + */

Without this comment (or a massively reduced one :) )

Reviewed-by: David Hildenbrand <david@redhat.com>
Baoquan He March 1, 2020, 4:59 a.m. UTC | #3
On 02/28/20 at 03:27pm, David Hildenbrand wrote:
> On 20.02.20 05:33, Baoquan He wrote:
> > Wrap the codes filling subsection map from section_activate() into
> 
> "Factor out the code that fills the subsection" ...

Fine to me, I will replace it with this. Thanks.

> 
> > fill_subsection_map(), this makes section_activate() cleaner and
> > easier to follow.
> > 
> > Signed-off-by: Baoquan He <bhe@redhat.com>
> > ---
> >  mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 34 insertions(+), 11 deletions(-)
> > 
> > diff --git a/mm/sparse.c b/mm/sparse.c
> > index b8e52c8fed7f..977b47acd38d 100644
> > --- a/mm/sparse.c
> > +++ b/mm/sparse.c
> > @@ -790,24 +790,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> >  		ms->section_mem_map = (unsigned long)NULL;
> >  }
> >  
> > -static struct page * __meminit section_activate(int nid, unsigned long pfn,
> > -		unsigned long nr_pages, struct vmem_altmap *altmap)
> > +/**
> > + * fill_subsection_map - fill subsection map of a memory region
> > + * @pfn - start pfn of the memory range
> > + * @nr_pages - number of pfns to add in the region
> > + *
> > + * This fills the related subsection map inside one section, and only
> > + * intended for hotplug.
> > + *
> > + * Return:
> > + * * 0		- On success.
> > + * * -EINVAL	- Invalid memory region.
> > + * * -EEXIST	- Subsection map has been set.
> > + */
> 
> Without this comment (or a massively reduced one :) )

Yeah, as we discussed, I will remove it.

> 
> Reviewed-by: David Hildenbrand <david@redhat.com>
> 
> -- 
> Thanks,
> 
> David / dhildenb
diff mbox series

Patch

diff --git a/mm/sparse.c b/mm/sparse.c
index b8e52c8fed7f..977b47acd38d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -790,24 +790,28 @@  static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
 		ms->section_mem_map = (unsigned long)NULL;
 }
 
-static struct page * __meminit section_activate(int nid, unsigned long pfn,
-		unsigned long nr_pages, struct vmem_altmap *altmap)
+/**
+ * fill_subsection_map - fill subsection map of a memory region
+ * @pfn - start pfn of the memory range
+ * @nr_pages - number of pfns to add in the region
+ *
+ * This fills the related subsection map inside one section, and only
+ * intended for hotplug.
+ *
+ * Return:
+ * * 0		- On success.
+ * * -EINVAL	- Invalid memory region.
+ * * -EEXIST	- Subsection map has been set.
+ */
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
 {
-	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
 	struct mem_section *ms = __pfn_to_section(pfn);
-	struct mem_section_usage *usage = NULL;
+	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
 	unsigned long *subsection_map;
-	struct page *memmap;
 	int rc = 0;
 
 	subsection_mask_set(map, pfn, nr_pages);
 
-	if (!ms->usage) {
-		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
-		if (!usage)
-			return ERR_PTR(-ENOMEM);
-		ms->usage = usage;
-	}
 	subsection_map = &ms->usage->subsection_map[0];
 
 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
@@ -818,6 +822,25 @@  static struct page * __meminit section_activate(int nid, unsigned long pfn,
 		bitmap_or(subsection_map, map, subsection_map,
 				SUBSECTIONS_PER_SECTION);
 
+	return rc;
+}
+
+static struct page * __meminit section_activate(int nid, unsigned long pfn,
+		unsigned long nr_pages, struct vmem_altmap *altmap)
+{
+	struct mem_section *ms = __pfn_to_section(pfn);
+	struct mem_section_usage *usage = NULL;
+	struct page *memmap;
+	int rc = 0;
+
+	if (!ms->usage) {
+		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
+		if (!usage)
+			return ERR_PTR(-ENOMEM);
+		ms->usage = usage;
+	}
+
+	rc = fill_subsection_map(pfn, nr_pages);
 	if (rc) {
 		if (usage)
 			ms->usage = NULL;