diff mbox series

[v4,5/6] powerpc/book3s64/memhotplug: Enable memmap on memory for radix

Message ID 20230718024409.95742-6-aneesh.kumar@linux.ibm.com (mailing list archive)
State New
Headers show
Series Add support for memmap on memory feature on ppc64 | expand

Commit Message

Aneesh Kumar K.V July 18, 2023, 2:44 a.m. UTC
Radix vmemmap mapping can map things correctly at the PMD level or PTE
level based on different device boundary checks. Hence we skip the
restrictions w.r.t vmemmap size to be multiple of PMD_SIZE. This also
makes the feature widely useful because to use PMD_SIZE vmemmap area we
require a memory block size of 2GiB

We can also use MHP_RESERVE_PAGES_MEMMAP_ON_MEMORY to that the feature
can work with a memory block size of 256MB. Using altmap.reserve feature
to align things correctly at pageblock granularity. We can end up
losing some pages in memory with this. For ex: with a 256MiB memory block
size, we require 4 pages to map vmemmap pages, In order to align things
correctly we end up adding a reserve of 28 pages. ie, for every 4096
pages 28 pages get reserved.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 arch/powerpc/Kconfig                          |  1 +
 arch/powerpc/include/asm/pgtable.h            | 24 +++++++++++++++++++
 .../platforms/pseries/hotplug-memory.c        |  3 ++-
 mm/memory_hotplug.c                           |  2 ++
 4 files changed, 29 insertions(+), 1 deletion(-)

Comments

David Hildenbrand July 24, 2023, 2:34 p.m. UTC | #1
On 18.07.23 04:44, Aneesh Kumar K.V wrote:
> Radix vmemmap mapping can map things correctly at the PMD level or PTE
> level based on different device boundary checks. Hence we skip the
> restrictions w.r.t vmemmap size to be multiple of PMD_SIZE. This also
> makes the feature widely useful because to use PMD_SIZE vmemmap area we
> require a memory block size of 2GiB
> 
> We can also use MHP_RESERVE_PAGES_MEMMAP_ON_MEMORY to that the feature
> can work with a memory block size of 256MB. Using altmap.reserve feature
> to align things correctly at pageblock granularity. We can end up
> losing some pages in memory with this. For ex: with a 256MiB memory block
> size, we require 4 pages to map vmemmap pages, In order to align things
> correctly we end up adding a reserve of 28 pages. ie, for every 4096
> pages 28 pages get reserved.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
>   arch/powerpc/Kconfig                          |  1 +
>   arch/powerpc/include/asm/pgtable.h            | 24 +++++++++++++++++++
>   .../platforms/pseries/hotplug-memory.c        |  3 ++-
>   mm/memory_hotplug.c                           |  2 ++
>   4 files changed, 29 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 116d6add0bb0..f890907e5bbf 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -157,6 +157,7 @@ config PPC
>   	select ARCH_HAS_UBSAN_SANITIZE_ALL
>   	select ARCH_HAVE_NMI_SAFE_CMPXCHG
>   	select ARCH_KEEP_MEMBLOCK
> +	select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE	if PPC_RADIX_MMU
>   	select ARCH_MIGHT_HAVE_PC_PARPORT
>   	select ARCH_MIGHT_HAVE_PC_SERIO
>   	select ARCH_OPTIONAL_KERNEL_RWX		if ARCH_HAS_STRICT_KERNEL_RWX
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 68817ea7f994..3d35371395a9 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -169,6 +169,30 @@ static inline bool is_ioremap_addr(const void *x)
>   int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
>   bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
>   			   unsigned long page_size);
> +/*
> + * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
> + * some of the restrictions. We don't check for PMD_SIZE because our
> + * vmemmap allocation code can fallback correctly. The pageblock
> + * alignment requirement is met using altmap->reserve blocks.
> + */
> +#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
> +static inline bool arch_supports_memmap_on_memory(unsigned long size)
> +{
> +	unsigned long nr_pages = size >> PAGE_SHIFT;
> +	unsigned long vmemmap_size = nr_pages * sizeof(struct page);
> +
> +	if (!radix_enabled())
> +		return false;
> +
> +	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
> +		return IS_ALIGNED(vmemmap_size, PMD_SIZE);

Can you add a comment why we care about that in the 4K case only?

> +	/*
> +	 * The pageblock alignment requirement is met by using
> +	 * reserve blocks in altmap.
> +	 */

Just drop that comment, that's handled by common code now.

> +	return true;
> +}
> +
>   #endif /* CONFIG_PPC64 */
>   
>   #endif /* __ASSEMBLY__ */
> diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
> index 9c62c2c3b3d0..1447509357a7 100644
> --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
> +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
> @@ -617,6 +617,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
>   
>   static int dlpar_add_lmb(struct drmem_lmb *lmb)
>   {
> +	mhp_t mhp_flags = MHP_NONE | MHP_MEMMAP_ON_MEMORY;
>   	unsigned long block_sz;
>   	int nid, rc;
>   
> @@ -637,7 +638,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
>   		nid = first_online_node;
>   
>   	/* Add the memory */
> -	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
> +	rc = __add_memory(nid, lmb->base_addr, block_sz, mhp_flags);
>   	if (rc) {
>   		invalidate_lmb_associativity_index(lmb);
>   		return rc;
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index c409f5ff6a59..6da063c80733 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -2174,6 +2174,8 @@ static int __ref try_remove_memory(u64 start, u64 size)
>   			 * right thing if we used vmem_altmap when hot-adding
>   			 * the range.
>   			 */
> +			mhp_altmap.base_pfn = PHYS_PFN(start);
> +			mhp_altmap.free = PHYS_PFN(size) - nr_vmemmap_pages;


That change does not belong into this patch.

>   			mhp_altmap.alloc = nr_vmemmap_pages;
>   			altmap = &mhp_altmap;
>   		}
Aneesh Kumar K.V July 24, 2023, 2:46 p.m. UTC | #2
On 7/24/23 8:04 PM, David Hildenbrand wrote:
> On 18.07.23 04:44, Aneesh Kumar K.V wrote:
>> Radix vmemmap mapping can map things correctly at the PMD level or PTE
>> level based on different device boundary checks. Hence we skip the
>> restrictions w.r.t vmemmap size to be multiple of PMD_SIZE. This also
>> makes the feature widely useful because to use PMD_SIZE vmemmap area we
>> require a memory block size of 2GiB
>>
>> We can also use MHP_RESERVE_PAGES_MEMMAP_ON_MEMORY to that the feature
>> can work with a memory block size of 256MB. Using altmap.reserve feature
>> to align things correctly at pageblock granularity. We can end up
>> losing some pages in memory with this. For ex: with a 256MiB memory block
>> size, we require 4 pages to map vmemmap pages, In order to align things
>> correctly we end up adding a reserve of 28 pages. ie, for every 4096
>> pages 28 pages get reserved.
>>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>>   arch/powerpc/Kconfig                          |  1 +
>>   arch/powerpc/include/asm/pgtable.h            | 24 +++++++++++++++++++
>>   .../platforms/pseries/hotplug-memory.c        |  3 ++-
>>   mm/memory_hotplug.c                           |  2 ++
>>   4 files changed, 29 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 116d6add0bb0..f890907e5bbf 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -157,6 +157,7 @@ config PPC
>>       select ARCH_HAS_UBSAN_SANITIZE_ALL
>>       select ARCH_HAVE_NMI_SAFE_CMPXCHG
>>       select ARCH_KEEP_MEMBLOCK
>> +    select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE    if PPC_RADIX_MMU
>>       select ARCH_MIGHT_HAVE_PC_PARPORT
>>       select ARCH_MIGHT_HAVE_PC_SERIO
>>       select ARCH_OPTIONAL_KERNEL_RWX        if ARCH_HAS_STRICT_KERNEL_RWX
>> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
>> index 68817ea7f994..3d35371395a9 100644
>> --- a/arch/powerpc/include/asm/pgtable.h
>> +++ b/arch/powerpc/include/asm/pgtable.h
>> @@ -169,6 +169,30 @@ static inline bool is_ioremap_addr(const void *x)
>>   int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
>>   bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
>>                  unsigned long page_size);
>> +/*
>> + * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
>> + * some of the restrictions. We don't check for PMD_SIZE because our
>> + * vmemmap allocation code can fallback correctly. The pageblock
>> + * alignment requirement is met using altmap->reserve blocks.
>> + */
>> +#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
>> +static inline bool arch_supports_memmap_on_memory(unsigned long size)
>> +{
>> +    unsigned long nr_pages = size >> PAGE_SHIFT;
>> +    unsigned long vmemmap_size = nr_pages * sizeof(struct page);
>> +
>> +    if (!radix_enabled())
>> +        return false;
>> +
>> +    if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
>> +        return IS_ALIGNED(vmemmap_size, PMD_SIZE);
> 
> Can you add a comment why we care about that in the 4K case only?


Sure. We keep the PMD_SIZE alignment for the same reason we have it for x86. With 4K page size and 2M hugepage size
things get properly aligned and we can make this feature useful even with this alignment restrictions. With 64K
page size and 2M hugepage size, having this alignment restrictions makes it more or less not useful to a large
number of memory blocksize we support. I will add that comment in here. 

> 
>> +    /*
>> +     * The pageblock alignment requirement is met by using
>> +     * reserve blocks in altmap.
>> +     */
> 
> Just drop that comment, that's handled by common code now.
> 

Ok. 

>> +    return true;
>> +}
>> +
>>   #endif /* CONFIG_PPC64 */
>>     #endif /* __ASSEMBLY__ */
>> diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
>> index 9c62c2c3b3d0..1447509357a7 100644
>> --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
>> +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
>> @@ -617,6 +617,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
>>     static int dlpar_add_lmb(struct drmem_lmb *lmb)
>>   {
>> +    mhp_t mhp_flags = MHP_NONE | MHP_MEMMAP_ON_MEMORY;
>>       unsigned long block_sz;
>>       int nid, rc;
>>   @@ -637,7 +638,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
>>           nid = first_online_node;
>>         /* Add the memory */
>> -    rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
>> +    rc = __add_memory(nid, lmb->base_addr, block_sz, mhp_flags);
>>       if (rc) {
>>           invalidate_lmb_associativity_index(lmb);
>>           return rc;
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index c409f5ff6a59..6da063c80733 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -2174,6 +2174,8 @@ static int __ref try_remove_memory(u64 start, u64 size)
>>                * right thing if we used vmem_altmap when hot-adding
>>                * the range.
>>                */
>> +            mhp_altmap.base_pfn = PHYS_PFN(start);
>> +            mhp_altmap.free = PHYS_PFN(size) - nr_vmemmap_pages;
> 
> 
> That change does not belong into this patch.
> 


I kept that change with ppc64 enablement because only ppc64 arch got check against
those values in the free path. 

>>               mhp_altmap.alloc = nr_vmemmap_pages;
>>               altmap = &mhp_altmap;
>>           }
> 

-aneesh
David Hildenbrand July 24, 2023, 3:52 p.m. UTC | #3
>>> +            mhp_altmap.base_pfn = PHYS_PFN(start);
>>> +            mhp_altmap.free = PHYS_PFN(size) - nr_vmemmap_pages;
>>
>>
>> That change does not belong into this patch.
>>
> 
> 
> I kept that change with ppc64 enablement because only ppc64 arch got check against
> those values in the free path.

Let's make that accounting consistent in patch #4. I think it really 
belongs in there, especially once we clarify the "free vs. reserved" 
handling.
diff mbox series

Patch

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 116d6add0bb0..f890907e5bbf 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -157,6 +157,7 @@  config PPC
 	select ARCH_HAS_UBSAN_SANITIZE_ALL
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_KEEP_MEMBLOCK
+	select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE	if PPC_RADIX_MMU
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
 	select ARCH_OPTIONAL_KERNEL_RWX		if ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 68817ea7f994..3d35371395a9 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -169,6 +169,30 @@  static inline bool is_ioremap_addr(const void *x)
 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
 			   unsigned long page_size);
+/*
+ * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
+ * some of the restrictions. We don't check for PMD_SIZE because our
+ * vmemmap allocation code can fallback correctly. The pageblock
+ * alignment requirement is met using altmap->reserve blocks.
+ */
+#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
+static inline bool arch_supports_memmap_on_memory(unsigned long size)
+{
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+	unsigned long vmemmap_size = nr_pages * sizeof(struct page);
+
+	if (!radix_enabled())
+		return false;
+
+	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
+		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
+	/*
+	 * The pageblock alignment requirement is met by using
+	 * reserve blocks in altmap.
+	 */
+	return true;
+}
+
 #endif /* CONFIG_PPC64 */
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 9c62c2c3b3d0..1447509357a7 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -617,6 +617,7 @@  static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
 
 static int dlpar_add_lmb(struct drmem_lmb *lmb)
 {
+	mhp_t mhp_flags = MHP_NONE | MHP_MEMMAP_ON_MEMORY;
 	unsigned long block_sz;
 	int nid, rc;
 
@@ -637,7 +638,7 @@  static int dlpar_add_lmb(struct drmem_lmb *lmb)
 		nid = first_online_node;
 
 	/* Add the memory */
-	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
+	rc = __add_memory(nid, lmb->base_addr, block_sz, mhp_flags);
 	if (rc) {
 		invalidate_lmb_associativity_index(lmb);
 		return rc;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c409f5ff6a59..6da063c80733 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -2174,6 +2174,8 @@  static int __ref try_remove_memory(u64 start, u64 size)
 			 * right thing if we used vmem_altmap when hot-adding
 			 * the range.
 			 */
+			mhp_altmap.base_pfn = PHYS_PFN(start);
+			mhp_altmap.free = PHYS_PFN(size) - nr_vmemmap_pages;
 			mhp_altmap.alloc = nr_vmemmap_pages;
 			altmap = &mhp_altmap;
 		}