diff mbox series

[5/6] mm/memremap: Rename and consolidate SECTION_SIZE

Message ID 1554265806-11501-6-git-send-email-anshuman.khandual@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64/mm: Enable memory hot remove and ZONE_DEVICE | expand

Commit Message

Anshuman Khandual April 3, 2019, 4:30 a.m. UTC
From: Robin Murphy <robin.murphy@arm.com>

Enabling ZONE_DEVICE (through ARCH_HAS_ZONE_DEVICE) for arm64 reveals that
memremap's internal helpers for sparsemem sections conflict with arm64's
definitions for hugepages which inherit the name of "sections" from earlier
versions of the ARM architecture.

Disambiguate memremap by propagating sparsemem's PA_ prefix, to clarify
that these values are in terms of addresses rather than PFNs (and
because it's a heck of a lot easier than changing all the arch code).
SECTION_MASK is unused, so it can just go. While here consolidate single
instance of PA_SECTION_SIZE from mm/hmm.c as well.

[anshuman: Consolidated mm/hmm.c instance and updated the commit message]

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 include/linux/mmzone.h |  1 +
 kernel/memremap.c      | 10 ++++------
 mm/hmm.c               |  2 --
 3 files changed, 5 insertions(+), 8 deletions(-)

Comments

Michal Hocko April 3, 2019, 9:26 a.m. UTC | #1
On Wed 03-04-19 10:00:05, Anshuman Khandual wrote:
> From: Robin Murphy <robin.murphy@arm.com>
> 
> Enabling ZONE_DEVICE (through ARCH_HAS_ZONE_DEVICE) for arm64 reveals that
> memremap's internal helpers for sparsemem sections conflict with arm64's
> definitions for hugepages which inherit the name of "sections" from earlier
> versions of the ARM architecture.
> 
> Disambiguate memremap by propagating sparsemem's PA_ prefix, to clarify
> that these values are in terms of addresses rather than PFNs (and
> because it's a heck of a lot easier than changing all the arch code).
> SECTION_MASK is unused, so it can just go. While here consolidate single
> instance of PA_SECTION_SIZE from mm/hmm.c as well.
> 
> [anshuman: Consolidated mm/hmm.c instance and updated the commit message]

Agreed. mremap shouldn't have redefined SECTION_SIZE in the first place.
This just adds a confusion.

> Signed-off-by: Robin Murphy <robin.murphy@arm.com>
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  include/linux/mmzone.h |  1 +
>  kernel/memremap.c      | 10 ++++------
>  mm/hmm.c               |  2 --
>  3 files changed, 5 insertions(+), 8 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index fba7741..ed7dd27 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1081,6 +1081,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
>   * PFN_SECTION_SHIFT		pfn to/from section number
>   */
>  #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
> +#define PA_SECTION_SIZE		(1UL << PA_SECTION_SHIFT)
>  #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)
>  
>  #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)
> diff --git a/kernel/memremap.c b/kernel/memremap.c
> index a856cb5..dda1367 100644
> --- a/kernel/memremap.c
> +++ b/kernel/memremap.c
> @@ -14,8 +14,6 @@
>  #include <linux/hmm.h>
>  
>  static DEFINE_XARRAY(pgmap_array);
> -#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
> -#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
>  
>  #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
>  vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
> @@ -98,8 +96,8 @@ static void devm_memremap_pages_release(void *data)
>  		put_page(pfn_to_page(pfn));
>  
>  	/* pages are dead and unused, undo the arch mapping */
> -	align_start = res->start & ~(SECTION_SIZE - 1);
> -	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
> +	align_start = res->start & ~(PA_SECTION_SIZE - 1);
> +	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
>  		- align_start;
>  
>  	nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
> @@ -154,8 +152,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
>  	if (!pgmap->ref || !pgmap->kill)
>  		return ERR_PTR(-EINVAL);
>  
> -	align_start = res->start & ~(SECTION_SIZE - 1);
> -	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
> +	align_start = res->start & ~(PA_SECTION_SIZE - 1);
> +	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
>  		- align_start;
>  	align_end = align_start + align_size - 1;
>  
> diff --git a/mm/hmm.c b/mm/hmm.c
> index fe1cd87..ef9e4e6 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -33,8 +33,6 @@
>  #include <linux/mmu_notifier.h>
>  #include <linux/memory_hotplug.h>
>  
> -#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
> -
>  #if IS_ENABLED(CONFIG_HMM_MIRROR)
>  static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
>  
> -- 
> 2.7.4
>
David Hildenbrand April 3, 2019, 9:30 a.m. UTC | #2
On 03.04.19 06:30, Anshuman Khandual wrote:
> From: Robin Murphy <robin.murphy@arm.com>
> 
> Enabling ZONE_DEVICE (through ARCH_HAS_ZONE_DEVICE) for arm64 reveals that
> memremap's internal helpers for sparsemem sections conflict with arm64's
> definitions for hugepages which inherit the name of "sections" from earlier
> versions of the ARM architecture.
> 
> Disambiguate memremap by propagating sparsemem's PA_ prefix, to clarify
> that these values are in terms of addresses rather than PFNs (and
> because it's a heck of a lot easier than changing all the arch code).
> SECTION_MASK is unused, so it can just go. While here consolidate single
> instance of PA_SECTION_SIZE from mm/hmm.c as well.
> 
> [anshuman: Consolidated mm/hmm.c instance and updated the commit message]
> 
> Signed-off-by: Robin Murphy <robin.murphy@arm.com>
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>  include/linux/mmzone.h |  1 +
>  kernel/memremap.c      | 10 ++++------
>  mm/hmm.c               |  2 --
>  3 files changed, 5 insertions(+), 8 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index fba7741..ed7dd27 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1081,6 +1081,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
>   * PFN_SECTION_SHIFT		pfn to/from section number
>   */
>  #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
> +#define PA_SECTION_SIZE		(1UL << PA_SECTION_SHIFT)
>  #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)
>  
>  #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)
> diff --git a/kernel/memremap.c b/kernel/memremap.c
> index a856cb5..dda1367 100644
> --- a/kernel/memremap.c
> +++ b/kernel/memremap.c
> @@ -14,8 +14,6 @@
>  #include <linux/hmm.h>
>  
>  static DEFINE_XARRAY(pgmap_array);
> -#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
> -#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
>  
>  #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
>  vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
> @@ -98,8 +96,8 @@ static void devm_memremap_pages_release(void *data)
>  		put_page(pfn_to_page(pfn));
>  
>  	/* pages are dead and unused, undo the arch mapping */
> -	align_start = res->start & ~(SECTION_SIZE - 1);
> -	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
> +	align_start = res->start & ~(PA_SECTION_SIZE - 1);
> +	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
>  		- align_start;
>  
>  	nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
> @@ -154,8 +152,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
>  	if (!pgmap->ref || !pgmap->kill)
>  		return ERR_PTR(-EINVAL);
>  
> -	align_start = res->start & ~(SECTION_SIZE - 1);
> -	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
> +	align_start = res->start & ~(PA_SECTION_SIZE - 1);
> +	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
>  		- align_start;
>  	align_end = align_start + align_size - 1;
>  
> diff --git a/mm/hmm.c b/mm/hmm.c
> index fe1cd87..ef9e4e6 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -33,8 +33,6 @@
>  #include <linux/mmu_notifier.h>
>  #include <linux/memory_hotplug.h>
>  
> -#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
> -
>  #if IS_ENABLED(CONFIG_HMM_MIRROR)
>  static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
>  
> 

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fba7741..ed7dd27 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1081,6 +1081,7 @@  static inline unsigned long early_pfn_to_nid(unsigned long pfn)
  * PFN_SECTION_SHIFT		pfn to/from section number
  */
 #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
+#define PA_SECTION_SIZE		(1UL << PA_SECTION_SHIFT)
 #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)
 
 #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)
diff --git a/kernel/memremap.c b/kernel/memremap.c
index a856cb5..dda1367 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -14,8 +14,6 @@ 
 #include <linux/hmm.h>
 
 static DEFINE_XARRAY(pgmap_array);
-#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
-#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
 
 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
@@ -98,8 +96,8 @@  static void devm_memremap_pages_release(void *data)
 		put_page(pfn_to_page(pfn));
 
 	/* pages are dead and unused, undo the arch mapping */
-	align_start = res->start & ~(SECTION_SIZE - 1);
-	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
+	align_start = res->start & ~(PA_SECTION_SIZE - 1);
+	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
 		- align_start;
 
 	nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
@@ -154,8 +152,8 @@  void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 	if (!pgmap->ref || !pgmap->kill)
 		return ERR_PTR(-EINVAL);
 
-	align_start = res->start & ~(SECTION_SIZE - 1);
-	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
+	align_start = res->start & ~(PA_SECTION_SIZE - 1);
+	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
 		- align_start;
 	align_end = align_start + align_size - 1;
 
diff --git a/mm/hmm.c b/mm/hmm.c
index fe1cd87..ef9e4e6 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -33,8 +33,6 @@ 
 #include <linux/mmu_notifier.h>
 #include <linux/memory_hotplug.h>
 
-#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
-
 #if IS_ENABLED(CONFIG_HMM_MIRROR)
 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;