diff mbox series

mm/vmalloc:arm64 support cont-pte huge vmalloc mapping

Message ID 20240906130919.3765401-1-fuhaiwang@bytedance.com (mailing list archive)
State New
Headers show
Series mm/vmalloc:arm64 support cont-pte huge vmalloc mapping | expand

Commit Message

Haiwang Fu Sept. 6, 2024, 1:09 p.m. UTC
From: fuhaiwang <fuhaiwang@bytedance.com>

Arm64 support contiguous bit which is used to increase the mapping size
at the pmd and pte level.

Now huge vmalloc support PMD and PTE level mapping, and support
multi size at pte level.

arm64: implement the fllowing interface on arm64 to support
cont-pte huge vmalloc mapping.
arch_vmap_pte_supported_shift(*)
arch_vmap_pte_range_map_size(*)

Signed-off-by: fuhaiwang <fuhaiwang@bytedance.com>
---
 arch/arm64/include/asm/pgtable.h | 28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

Comments

Ryan Roberts Sept. 6, 2024, 1:45 p.m. UTC | #1
On 06/09/2024 14:09, Haiwang Fu wrote:
> From: fuhaiwang <fuhaiwang@bytedance.com>
> 
> Arm64 support contiguous bit which is used to increase the mapping size
> at the pmd and pte level.
> 
> Now huge vmalloc support PMD and PTE level mapping, and support
> multi size at pte level.
> 
> arm64: implement the fllowing interface on arm64 to support
> cont-pte huge vmalloc mapping.
> arch_vmap_pte_supported_shift(*)
> arch_vmap_pte_range_map_size(*)

I believe that riscv tried to do the same thing and had to revert it because its
possible to unmap a portion of what was allocated and there was no easy way to
fix up the mapping safely. See [1].

I believe arm64 might suffer from a similar problem; I'm guessing the contpte
code would attempt to repaint the ptes with/out the PTE_CONT bit, as needed. But
that isn't safe on kernel mappings because there is no way to recover if another
thread tries to access the mapping concurrently. The code is only safe for user
mappings where the racing fault will get serialized behind the PTL.

[1] https://lore.kernel.org/linux-riscv/20240227205016.121901-2-
alexghiti@rivosinc.com/

Thanks,
Ryan


> 
> Signed-off-by: fuhaiwang <fuhaiwang@bytedance.com>
> ---
>  arch/arm64/include/asm/pgtable.h | 28 ++++++++++++++++++++++++++++
>  1 file changed, 28 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index c329ea061dc9..3f32e3150680 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1814,6 +1814,34 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
>  
>  #endif /* CONFIG_ARM64_CONTPTE */
>  
> +static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
> +					u64 pfn, unsigned int max_page_shift)
> +{
> +	if (end - addr < CONT_PTE_SIZE)
> +		return PAGE_SIZE;
> +
> +	if ((1UL << max_page_shift) < CONT_PTE_SIZE)
> +		return PAGE_SIZE;
> +
> +	if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
> +		return PAGE_SIZE;
> +
> +	if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
> +		return PAGE_SIZE;
> +
> +	return CONT_PTE_SIZE;
> +}
> +#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
> +
> +static inline int arch_vmap_pte_supported_shift(unsigned long size)
> +{
> +	if (size >= CONT_PTE_SIZE)
> +		return CONT_PTE_SHIFT;
> +	else
> +		return PAGE_SHIFT;
> +}
> +#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
> +
>  #endif /* !__ASSEMBLY__ */
>  
>  #endif /* __ASM_PGTABLE_H */
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c329ea061dc9..3f32e3150680 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1814,6 +1814,34 @@  static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
 
 #endif /* CONFIG_ARM64_CONTPTE */
 
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+					u64 pfn, unsigned int max_page_shift)
+{
+	if (end - addr < CONT_PTE_SIZE)
+		return PAGE_SIZE;
+
+	if ((1UL << max_page_shift) < CONT_PTE_SIZE)
+		return PAGE_SIZE;
+
+	if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
+		return PAGE_SIZE;
+
+	if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
+		return PAGE_SIZE;
+
+	return CONT_PTE_SIZE;
+}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+	if (size >= CONT_PTE_SIZE)
+		return CONT_PTE_SHIFT;
+	else
+		return PAGE_SHIFT;
+}
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */