diff mbox series

[2/2] mm, page_alloc: cleanup usemap_size() when SPARSEMEM is not set

Message ID 20181205091905.27727-2-richard.weiyang@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/2] mm, pageblock: make sure pageblock won't exceed mem_sectioin | expand

Commit Message

Wei Yang Dec. 5, 2018, 9:19 a.m. UTC
Two cleanups in this patch:

  * since pageblock_nr_pages == (1 << pageblock_order), the roundup()
    and right shift pageblock_order could be replaced with
    DIV_ROUND_UP()
  * use BITS_TO_LONGS() to get number of bytes for bitmap

This patch also fix one typo in comment.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
---
 mm/page_alloc.c | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

Comments

Wei Yang Dec. 7, 2018, 9:58 a.m. UTC | #1
On Wed, Dec 05, 2018 at 05:19:05PM +0800, Wei Yang wrote:
>Two cleanups in this patch:
>
>  * since pageblock_nr_pages == (1 << pageblock_order), the roundup()
>    and right shift pageblock_order could be replaced with
>    DIV_ROUND_UP()
>  * use BITS_TO_LONGS() to get number of bytes for bitmap
>
>This patch also fix one typo in comment.

Patch 1 maybe controversial, how about this one :-)

Look forward some comments.

>
>Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>---
> mm/page_alloc.c | 9 +++------
> 1 file changed, 3 insertions(+), 6 deletions(-)
>
>diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>index 7c745c305332..baf473f80800 100644
>--- a/mm/page_alloc.c
>+++ b/mm/page_alloc.c
>@@ -6204,7 +6204,7 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
> /*
>  * Calculate the size of the zone->blockflags rounded to an unsigned long
>  * Start by making sure zonesize is a multiple of pageblock_order by rounding
>- * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
>+ * up. Then use 1 NR_PAGEBLOCK_BITS width of bits per pageblock, finally
>  * round what is now in bits to nearest long in bits, then return it in
>  * bytes.
>  */
>@@ -6213,12 +6213,9 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
> 	unsigned long usemapsize;
> 
> 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
>-	usemapsize = roundup(zonesize, pageblock_nr_pages);
>-	usemapsize = usemapsize >> pageblock_order;
>+	usemapsize = DIV_ROUND_UP(zonesize, pageblock_nr_pages);
> 	usemapsize *= NR_PAGEBLOCK_BITS;
>-	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
>-
>-	return usemapsize / 8;
>+	return BITS_TO_LONGS(usemapsize) * sizeof(unsigned long);
> }
> 
> static void __ref setup_usemap(struct pglist_data *pgdat,
>-- 
>2.15.1
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7c745c305332..baf473f80800 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6204,7 +6204,7 @@  static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
 /*
  * Calculate the size of the zone->blockflags rounded to an unsigned long
  * Start by making sure zonesize is a multiple of pageblock_order by rounding
- * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
+ * up. Then use 1 NR_PAGEBLOCK_BITS width of bits per pageblock, finally
  * round what is now in bits to nearest long in bits, then return it in
  * bytes.
  */
@@ -6213,12 +6213,9 @@  static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
 	unsigned long usemapsize;
 
 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
-	usemapsize = roundup(zonesize, pageblock_nr_pages);
-	usemapsize = usemapsize >> pageblock_order;
+	usemapsize = DIV_ROUND_UP(zonesize, pageblock_nr_pages);
 	usemapsize *= NR_PAGEBLOCK_BITS;
-	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
-
-	return usemapsize / 8;
+	return BITS_TO_LONGS(usemapsize) * sizeof(unsigned long);
 }
 
 static void __ref setup_usemap(struct pglist_data *pgdat,