diff mbox series

[v9] mm,kfence: decouple kfence from page granularity mapping judgement

Message ID 1678956620-26103-1-git-send-email-quic_zhenhuah@quicinc.com (mailing list archive)
State New
Headers show
Series [v9] mm,kfence: decouple kfence from page granularity mapping judgement | expand

Commit Message

Zhenhua Huang March 16, 2023, 8:50 a.m. UTC
Kfence only needs its pool to be mapped as page granularity, if it is
inited early. Previous judgement was a bit over protected. From [1], Mark
suggested to "just map the KFENCE region a page granularity". So I
decouple it from judgement and do page granularity mapping for kfence
pool only. Need to be noticed that late init of kfence pool still requires
page granularity mapping.

Page granularity mapping in theory cost more(2M per 1GB) memory on arm64
platform. Like what I've tested on QEMU(emulated 1GB RAM) with
gki_defconfig, also turning off rodata protection:
Before:
[root@liebao ]# cat /proc/meminfo
MemTotal:         999484 kB
After:
[root@liebao ]# cat /proc/meminfo
MemTotal:        1001480 kB

To implement this, also relocate the kfence pool allocation before the
linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
addr, __kfence_pool is to be set after linear mapping set up.

LINK: [1] https://lore.kernel.org/linux-arm-kernel/Y+IsdrvDNILA59UN@FVFF77S0Q05N/
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
---
 arch/arm64/include/asm/kfence.h | 16 +++++++++++
 arch/arm64/mm/mmu.c             | 59 +++++++++++++++++++++++++++++++++++++++++
 arch/arm64/mm/pageattr.c        |  9 +++++--
 include/linux/kfence.h          |  1 +
 mm/kfence/core.c                |  4 +++
 5 files changed, 87 insertions(+), 2 deletions(-)

Comments

Pavan Kondeti March 16, 2023, 9:58 a.m. UTC | #1
On Thu, Mar 16, 2023 at 04:50:20PM +0800, Zhenhua Huang wrote:
> Kfence only needs its pool to be mapped as page granularity, if it is
> inited early. Previous judgement was a bit over protected. From [1], Mark
> suggested to "just map the KFENCE region a page granularity". So I
> decouple it from judgement and do page granularity mapping for kfence
> pool only. Need to be noticed that late init of kfence pool still requires
> page granularity mapping.
> 
> Page granularity mapping in theory cost more(2M per 1GB) memory on arm64
> platform. Like what I've tested on QEMU(emulated 1GB RAM) with
> gki_defconfig, also turning off rodata protection:
> Before:
> [root@liebao ]# cat /proc/meminfo
> MemTotal:         999484 kB
> After:
> [root@liebao ]# cat /proc/meminfo
> MemTotal:        1001480 kB
> 
> To implement this, also relocate the kfence pool allocation before the
> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
> addr, __kfence_pool is to be set after linear mapping set up.
> 
> LINK: [1] https://lore.kernel.org/linux-arm-kernel/Y+IsdrvDNILA59UN@FVFF77S0Q05N/
> Suggested-by: Mark Rutland <mark.rutland@arm.com>
> Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
> ---
>  arch/arm64/include/asm/kfence.h | 16 +++++++++++
>  arch/arm64/mm/mmu.c             | 59 +++++++++++++++++++++++++++++++++++++++++
>  arch/arm64/mm/pageattr.c        |  9 +++++--
>  include/linux/kfence.h          |  1 +
>  mm/kfence/core.c                |  4 +++
>  5 files changed, 87 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
> index aa855c6..8143c91 100644
> --- a/arch/arm64/include/asm/kfence.h
> +++ b/arch/arm64/include/asm/kfence.h
> @@ -10,6 +10,22 @@
>  
>  #include <asm/set_memory.h>
>  
> +extern phys_addr_t early_kfence_pool;
> +
> +#ifdef CONFIG_KFENCE
> +
> +extern char *__kfence_pool;
> +static inline void kfence_set_pool(phys_addr_t addr)
> +{
> +	__kfence_pool = phys_to_virt(addr);
> +}
> +
> +#else
> +
> +static inline void kfence_set_pool(phys_addr_t addr) { }
> +
> +#endif
> +
>  static inline bool arch_kfence_init_pool(void) { return true; }
>  
>  static inline bool kfence_protect_page(unsigned long addr, bool protect)
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 6f9d889..61944c70 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -24,6 +24,7 @@
>  #include <linux/mm.h>
>  #include <linux/vmalloc.h>
>  #include <linux/set_memory.h>
> +#include <linux/kfence.h>
>  
>  #include <asm/barrier.h>
>  #include <asm/cputype.h>
> @@ -38,6 +39,7 @@
>  #include <asm/ptdump.h>
>  #include <asm/tlbflush.h>
>  #include <asm/pgalloc.h>
> +#include <asm/kfence.h>
>  
>  #define NO_BLOCK_MAPPINGS	BIT(0)
>  #define NO_CONT_MAPPINGS	BIT(1)
> @@ -525,6 +527,48 @@ static int __init enable_crash_mem_map(char *arg)
>  }
>  early_param("crashkernel", enable_crash_mem_map);
>  
> +#ifdef CONFIG_KFENCE
> +
> +static bool kfence_early_init __initdata = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
> +/*
> + * early_param can be parsed before linear mapping
> + * set up
> + */
> +static int __init parse_kfence_early_init(char *p)
> +{
> +	int val;
> +
> +	if (get_option(&p, &val))
> +		kfence_early_init = !!val;
> +	return 0;
> +}
> +early_param("kfence.sample_interval", parse_kfence_early_init);
> +
> +static phys_addr_t arm64_kfence_alloc_pool(void)
> +{
> +	phys_addr_t kfence_pool;
> +
> +	if (!kfence_early_init)
> +		return 0;
> +
> +	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
> +	if (!kfence_pool)
> +		pr_err("failed to allocate kfence pool\n");
> +
> +	return kfence_pool;
> +}
> +
> +#else
> +
> +static phys_addr_t arm64_kfence_alloc_pool(void)
> +{
> +	return 0;
> +}
> +
> +#endif
> +
> +phys_addr_t early_kfence_pool;
> +
>  static void __init map_mem(pgd_t *pgdp)
>  {
>  	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
> @@ -543,6 +587,10 @@ static void __init map_mem(pgd_t *pgdp)
>  	 */
>  	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
>  
> +	early_kfence_pool = arm64_kfence_alloc_pool();
> +	if (early_kfence_pool)
> +		memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
> +
>  	if (can_set_direct_map())
>  		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>  
> @@ -608,6 +656,17 @@ static void __init map_mem(pgd_t *pgdp)
>  		}
>  	}
>  #endif
> +
> +	/* Kfence pool needs page-level mapping */
> +	if (early_kfence_pool) {
> +		__map_memblock(pgdp, early_kfence_pool,
> +			early_kfence_pool + KFENCE_POOL_SIZE,
> +			pgprot_tagged(PAGE_KERNEL),
> +			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> +		memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
> +		/* kfence_pool really mapped now */
> +		kfence_set_pool(early_kfence_pool);
> +	}

Why not wrap this under CONFIG_KFENCE ? early_kfence_pool can also go in
there?

Thanks,
Pavan
Marco Elver March 16, 2023, 10:06 a.m. UTC | #2
On Thu, Mar 16, 2023 at 04:50PM +0800, Zhenhua Huang wrote:
> Kfence only needs its pool to be mapped as page granularity, if it is
> inited early. Previous judgement was a bit over protected. From [1], Mark
> suggested to "just map the KFENCE region a page granularity". So I
> decouple it from judgement and do page granularity mapping for kfence
> pool only. Need to be noticed that late init of kfence pool still requires
> page granularity mapping.
> 
> Page granularity mapping in theory cost more(2M per 1GB) memory on arm64
> platform. Like what I've tested on QEMU(emulated 1GB RAM) with
> gki_defconfig, also turning off rodata protection:
> Before:
> [root@liebao ]# cat /proc/meminfo
> MemTotal:         999484 kB
> After:
> [root@liebao ]# cat /proc/meminfo
> MemTotal:        1001480 kB
> 
> To implement this, also relocate the kfence pool allocation before the
> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
> addr, __kfence_pool is to be set after linear mapping set up.
> 
> LINK: [1] https://lore.kernel.org/linux-arm-kernel/Y+IsdrvDNILA59UN@FVFF77S0Q05N/
> Suggested-by: Mark Rutland <mark.rutland@arm.com>
> Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
> ---
>  arch/arm64/include/asm/kfence.h | 16 +++++++++++
>  arch/arm64/mm/mmu.c             | 59 +++++++++++++++++++++++++++++++++++++++++
>  arch/arm64/mm/pageattr.c        |  9 +++++--
>  include/linux/kfence.h          |  1 +
>  mm/kfence/core.c                |  4 +++
>  5 files changed, 87 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
> index aa855c6..8143c91 100644
> --- a/arch/arm64/include/asm/kfence.h
> +++ b/arch/arm64/include/asm/kfence.h
> @@ -10,6 +10,22 @@
>  
>  #include <asm/set_memory.h>
>  
> +extern phys_addr_t early_kfence_pool;

This should not be accessible if !CONFIG_KFENCE.

> +#ifdef CONFIG_KFENCE
> +
> +extern char *__kfence_pool;
> +static inline void kfence_set_pool(phys_addr_t addr)
> +{
> +	__kfence_pool = phys_to_virt(addr);
> +}

kfence_set_pool() is redundant if it's for arm64 only, because we know
where it's needed, and there you could just access __kfence_pool
directly. So let's just remove this function. (Initially I thought you
want to provide it generally, also for other architectures.)

> +#else
> +
> +static inline void kfence_set_pool(phys_addr_t addr) { }
> +
> +#endif
> +
>  static inline bool arch_kfence_init_pool(void) { return true; }
[...]
> +#endif
> +
> +phys_addr_t early_kfence_pool;

This variable now exists in non-KFENCE builds, which is wrong.

>  static void __init map_mem(pgd_t *pgdp)
>  {
>  	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
> @@ -543,6 +587,10 @@ static void __init map_mem(pgd_t *pgdp)
>  	 */
>  	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
>  
> +	early_kfence_pool = arm64_kfence_alloc_pool();
> +	if (early_kfence_pool)
> +		memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
> +
>  	if (can_set_direct_map())
>  		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>  
> @@ -608,6 +656,17 @@ static void __init map_mem(pgd_t *pgdp)
>  		}
>  	}
>  #endif
> +
> +	/* Kfence pool needs page-level mapping */
> +	if (early_kfence_pool) {
> +		__map_memblock(pgdp, early_kfence_pool,
> +			early_kfence_pool + KFENCE_POOL_SIZE,
> +			pgprot_tagged(PAGE_KERNEL),
> +			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> +		memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
> +		/* kfence_pool really mapped now */
> +		kfence_set_pool(early_kfence_pool);
> +	}

This whole piece of code could also be wrapped in another function,
which becomes a no-op if !CONFIG_KFENCE. Then you also don't need to
provide the KFENCE_POOL_SIZE define for 0 if !CONFIG_KFENCE.

[...]
> +	 *
> +	 * Kfence pool requires page granularity mapping also if we init it
> +	 * late.
>  	 */
>  	return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
> -		IS_ENABLED(CONFIG_KFENCE);
> +	    (IS_ENABLED(CONFIG_KFENCE) && !early_kfence_pool);

Accessing a non-existent variable if !CONFIG_KFENCE works because the
compiler optimizes out the access, but is generally bad style.


I think the only issue that I have is that the separation between KFENCE
and non-KFENCE builds is not great.

At the end of the email are is a diff against your patch which would be
my suggested changes (while at it, I fixed up a bunch of other issues).
Untested, so if you decide to adopt these changes, please test.

Thanks,
-- Marco

------ >8 ------


diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index 8143c91854e1..a81937fae9f6 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -10,22 +10,6 @@
 
 #include <asm/set_memory.h>
 
-extern phys_addr_t early_kfence_pool;
-
-#ifdef CONFIG_KFENCE
-
-extern char *__kfence_pool;
-static inline void kfence_set_pool(phys_addr_t addr)
-{
-	__kfence_pool = phys_to_virt(addr);
-}
-
-#else
-
-static inline void kfence_set_pool(phys_addr_t addr) { }
-
-#endif
-
 static inline bool arch_kfence_init_pool(void) { return true; }
 
 static inline bool kfence_protect_page(unsigned long addr, bool protect)
@@ -35,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
 	return true;
 }
 
+#ifdef CONFIG_KFENCE
+extern bool kfence_early_init;
+static inline bool arm64_kfence_can_set_direct_map(void)
+{
+	return !kfence_early_init;
+}
+#else /* CONFIG_KFENCE */
+static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
+#endif /* CONFIG_KFENCE */
+
 #endif /* __ASM_KFENCE_H */
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 61944c7091f0..683958616ac1 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -528,17 +528,14 @@ static int __init enable_crash_mem_map(char *arg)
 early_param("crashkernel", enable_crash_mem_map);
 
 #ifdef CONFIG_KFENCE
+bool kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
 
-static bool kfence_early_init __initdata = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
-/*
- * early_param can be parsed before linear mapping
- * set up
- */
-static int __init parse_kfence_early_init(char *p)
+/* early_param() will be parsed before map_mem() below. */
+static int __init parse_kfence_early_init(char *arg)
 {
 	int val;
 
-	if (get_option(&p, &val))
+	if (get_option(&arg, &val))
 		kfence_early_init = !!val;
 	return 0;
 }
@@ -552,22 +549,34 @@ static phys_addr_t arm64_kfence_alloc_pool(void)
 		return 0;
 
 	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
-	if (!kfence_pool)
+	if (!kfence_pool) {
 		pr_err("failed to allocate kfence pool\n");
+		kfence_early_init = false;
+		return 0;
+	}
+
+	/* Temporarily mark as NOMAP. */
+	memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
 
 	return kfence_pool;
 }
 
-#else
-
-static phys_addr_t arm64_kfence_alloc_pool(void)
+static void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
 {
-	return 0;
-}
-
-#endif
+	if (!kfence_pool)
+		return;
 
-phys_addr_t early_kfence_pool;
+	/* KFENCE pool needs page-level mapping. */
+	__map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
+		       pgprot_tagged(PAGE_KERNEL),
+		       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+	memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+	__kfence_pool = phys_to_virt(kfence_pool);
+}
+#else /* CONFIG_KFENCE */
+static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
+static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
+#endif /* CONFIG_KFENCE */
 
 static void __init map_mem(pgd_t *pgdp)
 {
@@ -575,6 +584,7 @@ static void __init map_mem(pgd_t *pgdp)
 	phys_addr_t kernel_start = __pa_symbol(_stext);
 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
 	phys_addr_t start, end;
+	phys_addr_t early_kfence_pool;
 	int flags = NO_EXEC_MAPPINGS;
 	u64 i;
 
@@ -588,8 +598,6 @@ static void __init map_mem(pgd_t *pgdp)
 	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
 
 	early_kfence_pool = arm64_kfence_alloc_pool();
-	if (early_kfence_pool)
-		memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
 
 	if (can_set_direct_map())
 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
@@ -656,17 +664,7 @@ static void __init map_mem(pgd_t *pgdp)
 		}
 	}
 #endif
-
-	/* Kfence pool needs page-level mapping */
-	if (early_kfence_pool) {
-		__map_memblock(pgdp, early_kfence_pool,
-			early_kfence_pool + KFENCE_POOL_SIZE,
-			pgprot_tagged(PAGE_KERNEL),
-			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
-		memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
-		/* kfence_pool really mapped now */
-		kfence_set_pool(early_kfence_pool);
-	}
+	arm64_kfence_map_pool(early_kfence_pool, pgdp);
 }
 
 void mark_rodata_ro(void)
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 7ce5295cc6fb..aa8fd12cc96f 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -7,7 +7,6 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
-#include <linux/kfence.h>
 
 #include <asm/cacheflush.h>
 #include <asm/set_memory.h>
@@ -28,11 +27,10 @@ bool can_set_direct_map(void)
 	 * mapped at page granularity, so that it is possible to
 	 * protect/unprotect single pages.
 	 *
-	 * Kfence pool requires page granularity mapping also if we init it
-	 * late.
+	 * KFENCE pool requires page-granular mapping if initialized late.
 	 */
 	return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
-	    (IS_ENABLED(CONFIG_KFENCE) && !early_kfence_pool);
+	       arm64_kfence_can_set_direct_map();
 }
 
 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 91cbcc98e293..726857a4b680 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -222,7 +222,6 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
 
 #else /* CONFIG_KFENCE */
 
-#define KFENCE_POOL_SIZE 0
 static inline bool is_kfence_address(const void *addr) { return false; }
 static inline void kfence_alloc_pool(void) { }
 static inline void kfence_init(void) { }
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index fab087d39633..e7f22af5e710 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -818,7 +818,7 @@ void __init kfence_alloc_pool(void)
 	if (!kfence_sample_interval)
 		return;
 
-	/* if the pool has already been initialized by arch, skip the below */
+	/* If the pool has already been initialized by arch, skip the below. */
 	if (__kfence_pool)
 		return;
Zhenhua Huang March 16, 2023, 10:36 a.m. UTC | #3
On 2023/3/16 18:06, Marco Elver wrote:
> On Thu, Mar 16, 2023 at 04:50PM +0800, Zhenhua Huang wrote:
>> Kfence only needs its pool to be mapped as page granularity, if it is
>> inited early. Previous judgement was a bit over protected. From [1], Mark
>> suggested to "just map the KFENCE region a page granularity". So I
>> decouple it from judgement and do page granularity mapping for kfence
>> pool only. Need to be noticed that late init of kfence pool still requires
>> page granularity mapping.
>>
>> Page granularity mapping in theory cost more(2M per 1GB) memory on arm64
>> platform. Like what I've tested on QEMU(emulated 1GB RAM) with
>> gki_defconfig, also turning off rodata protection:
>> Before:
>> [root@liebao ]# cat /proc/meminfo
>> MemTotal:         999484 kB
>> After:
>> [root@liebao ]# cat /proc/meminfo
>> MemTotal:        1001480 kB
>>
>> To implement this, also relocate the kfence pool allocation before the
>> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
>> addr, __kfence_pool is to be set after linear mapping set up.
>>
>> LINK: [1] https://lore.kernel.org/linux-arm-kernel/Y+IsdrvDNILA59UN@FVFF77S0Q05N/
>> Suggested-by: Mark Rutland <mark.rutland@arm.com>
>> Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
>> ---
>>   arch/arm64/include/asm/kfence.h | 16 +++++++++++
>>   arch/arm64/mm/mmu.c             | 59 +++++++++++++++++++++++++++++++++++++++++
>>   arch/arm64/mm/pageattr.c        |  9 +++++--
>>   include/linux/kfence.h          |  1 +
>>   mm/kfence/core.c                |  4 +++
>>   5 files changed, 87 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
>> index aa855c6..8143c91 100644
>> --- a/arch/arm64/include/asm/kfence.h
>> +++ b/arch/arm64/include/asm/kfence.h
>> @@ -10,6 +10,22 @@
>>   
>>   #include <asm/set_memory.h>
>>   
>> +extern phys_addr_t early_kfence_pool;
> 
> This should not be accessible if !CONFIG_KFENCE.
> 
>> +#ifdef CONFIG_KFENCE
>> +
>> +extern char *__kfence_pool;
>> +static inline void kfence_set_pool(phys_addr_t addr)
>> +{
>> +	__kfence_pool = phys_to_virt(addr);
>> +}
> 
> kfence_set_pool() is redundant if it's for arm64 only, because we know
> where it's needed, and there you could just access __kfence_pool
> directly. So let's just remove this function. (Initially I thought you
> want to provide it generally, also for other architectures.)
> 
>> +#else
>> +
>> +static inline void kfence_set_pool(phys_addr_t addr) { }
>> +
>> +#endif
>> +
>>   static inline bool arch_kfence_init_pool(void) { return true; }
> [...]
>> +#endif
>> +
>> +phys_addr_t early_kfence_pool;
> 
> This variable now exists in non-KFENCE builds, which is wrong.
> 
>>   static void __init map_mem(pgd_t *pgdp)
>>   {
>>   	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
>> @@ -543,6 +587,10 @@ static void __init map_mem(pgd_t *pgdp)
>>   	 */
>>   	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
>>   
>> +	early_kfence_pool = arm64_kfence_alloc_pool();
>> +	if (early_kfence_pool)
>> +		memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>> +
>>   	if (can_set_direct_map())
>>   		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>>   
>> @@ -608,6 +656,17 @@ static void __init map_mem(pgd_t *pgdp)
>>   		}
>>   	}
>>   #endif
>> +
>> +	/* Kfence pool needs page-level mapping */
>> +	if (early_kfence_pool) {
>> +		__map_memblock(pgdp, early_kfence_pool,
>> +			early_kfence_pool + KFENCE_POOL_SIZE,
>> +			pgprot_tagged(PAGE_KERNEL),
>> +			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
>> +		memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>> +		/* kfence_pool really mapped now */
>> +		kfence_set_pool(early_kfence_pool);
>> +	}
> 
> This whole piece of code could also be wrapped in another function,
> which becomes a no-op if !CONFIG_KFENCE. Then you also don't need to
> provide the KFENCE_POOL_SIZE define for 0 if !CONFIG_KFENCE.
> 
> [...]
>> +	 *
>> +	 * Kfence pool requires page granularity mapping also if we init it
>> +	 * late.
>>   	 */
>>   	return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
>> -		IS_ENABLED(CONFIG_KFENCE);
>> +	    (IS_ENABLED(CONFIG_KFENCE) && !early_kfence_pool);
> 
> Accessing a non-existent variable if !CONFIG_KFENCE works because the
> compiler optimizes out the access, but is generally bad style.

Hi Marco,

Actually my previous intention is not to do separation between KFENCE 
and non-KFENCE, instead to ensure early_kfence_pool always to be NULL in 
non-KFENCE build. That works well from my side w/ and w/o 
CONFIG_KFENCE.. but Yes that not clear to have this variable still in 
non-Kfence build.

Sure, I will follow your suggestion below and tested on my side. Thanks.

Thanks,
Zhenhua

> 
> 
> I think the only issue that I have is that the separation between KFENCE
> and non-KFENCE builds is not great.
> 
> At the end of the email are is a diff against your patch which would be
> my suggested changes (while at it, I fixed up a bunch of other issues).
> Untested, so if you decide to adopt these changes, please test.
> 
> Thanks,
> -- Marco
> 
> ------ >8 ------
> 
> 
> diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
> index 8143c91854e1..a81937fae9f6 100644
> --- a/arch/arm64/include/asm/kfence.h
> +++ b/arch/arm64/include/asm/kfence.h
> @@ -10,22 +10,6 @@
>   
>   #include <asm/set_memory.h>
>   
> -extern phys_addr_t early_kfence_pool;
> -
> -#ifdef CONFIG_KFENCE
> -
> -extern char *__kfence_pool;
> -static inline void kfence_set_pool(phys_addr_t addr)
> -{
> -	__kfence_pool = phys_to_virt(addr);
> -}
> -
> -#else
> -
> -static inline void kfence_set_pool(phys_addr_t addr) { }
> -
> -#endif
> -
>   static inline bool arch_kfence_init_pool(void) { return true; }
>   
>   static inline bool kfence_protect_page(unsigned long addr, bool protect)
> @@ -35,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>   	return true;
>   }
>   
> +#ifdef CONFIG_KFENCE
> +extern bool kfence_early_init;
> +static inline bool arm64_kfence_can_set_direct_map(void)
> +{
> +	return !kfence_early_init;
> +}
> +#else /* CONFIG_KFENCE */
> +static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
> +#endif /* CONFIG_KFENCE */
> +
>   #endif /* __ASM_KFENCE_H */
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 61944c7091f0..683958616ac1 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -528,17 +528,14 @@ static int __init enable_crash_mem_map(char *arg)
>   early_param("crashkernel", enable_crash_mem_map);
>   
>   #ifdef CONFIG_KFENCE
> +bool kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
>   
> -static bool kfence_early_init __initdata = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
> -/*
> - * early_param can be parsed before linear mapping
> - * set up
> - */
> -static int __init parse_kfence_early_init(char *p)
> +/* early_param() will be parsed before map_mem() below. */
> +static int __init parse_kfence_early_init(char *arg)
>   {
>   	int val;
>   
> -	if (get_option(&p, &val))
> +	if (get_option(&arg, &val))
>   		kfence_early_init = !!val;
>   	return 0;
>   }
> @@ -552,22 +549,34 @@ static phys_addr_t arm64_kfence_alloc_pool(void)
>   		return 0;
>   
>   	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
> -	if (!kfence_pool)
> +	if (!kfence_pool) {
>   		pr_err("failed to allocate kfence pool\n");
> +		kfence_early_init = false;
> +		return 0;
> +	}
> +
> +	/* Temporarily mark as NOMAP. */
> +	memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
>   
>   	return kfence_pool;
>   }
>   
> -#else
> -
> -static phys_addr_t arm64_kfence_alloc_pool(void)
> +static void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
>   {
> -	return 0;
> -}
> -
> -#endif
> +	if (!kfence_pool)
> +		return;
>   
> -phys_addr_t early_kfence_pool;
> +	/* KFENCE pool needs page-level mapping. */
> +	__map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
> +		       pgprot_tagged(PAGE_KERNEL),
> +		       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> +	memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
> +	__kfence_pool = phys_to_virt(kfence_pool);
> +}
> +#else /* CONFIG_KFENCE */
> +static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
> +static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
> +#endif /* CONFIG_KFENCE */
>   
>   static void __init map_mem(pgd_t *pgdp)
>   {
> @@ -575,6 +584,7 @@ static void __init map_mem(pgd_t *pgdp)
>   	phys_addr_t kernel_start = __pa_symbol(_stext);
>   	phys_addr_t kernel_end = __pa_symbol(__init_begin);
>   	phys_addr_t start, end;
> +	phys_addr_t early_kfence_pool;
>   	int flags = NO_EXEC_MAPPINGS;
>   	u64 i;
>   
> @@ -588,8 +598,6 @@ static void __init map_mem(pgd_t *pgdp)
>   	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
>   
>   	early_kfence_pool = arm64_kfence_alloc_pool();
> -	if (early_kfence_pool)
> -		memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>   
>   	if (can_set_direct_map())
>   		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
> @@ -656,17 +664,7 @@ static void __init map_mem(pgd_t *pgdp)
>   		}
>   	}
>   #endif
> -
> -	/* Kfence pool needs page-level mapping */
> -	if (early_kfence_pool) {
> -		__map_memblock(pgdp, early_kfence_pool,
> -			early_kfence_pool + KFENCE_POOL_SIZE,
> -			pgprot_tagged(PAGE_KERNEL),
> -			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> -		memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
> -		/* kfence_pool really mapped now */
> -		kfence_set_pool(early_kfence_pool);
> -	}
> +	arm64_kfence_map_pool(early_kfence_pool, pgdp);
>   }
>   
>   void mark_rodata_ro(void)
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 7ce5295cc6fb..aa8fd12cc96f 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -7,7 +7,6 @@
>   #include <linux/module.h>
>   #include <linux/sched.h>
>   #include <linux/vmalloc.h>
> -#include <linux/kfence.h>
>   
>   #include <asm/cacheflush.h>
>   #include <asm/set_memory.h>
> @@ -28,11 +27,10 @@ bool can_set_direct_map(void)
>   	 * mapped at page granularity, so that it is possible to
>   	 * protect/unprotect single pages.
>   	 *
> -	 * Kfence pool requires page granularity mapping also if we init it
> -	 * late.
> +	 * KFENCE pool requires page-granular mapping if initialized late.
>   	 */
>   	return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
> -	    (IS_ENABLED(CONFIG_KFENCE) && !early_kfence_pool);
> +	       arm64_kfence_can_set_direct_map();
>   }
>   
>   static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> diff --git a/include/linux/kfence.h b/include/linux/kfence.h
> index 91cbcc98e293..726857a4b680 100644
> --- a/include/linux/kfence.h
> +++ b/include/linux/kfence.h
> @@ -222,7 +222,6 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
>   
>   #else /* CONFIG_KFENCE */
>   
> -#define KFENCE_POOL_SIZE 0
>   static inline bool is_kfence_address(const void *addr) { return false; }
>   static inline void kfence_alloc_pool(void) { }
>   static inline void kfence_init(void) { }
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index fab087d39633..e7f22af5e710 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -818,7 +818,7 @@ void __init kfence_alloc_pool(void)
>   	if (!kfence_sample_interval)
>   		return;
>   
> -	/* if the pool has already been initialized by arch, skip the below */
> +	/* If the pool has already been initialized by arch, skip the below. */
>   	if (__kfence_pool)
>   		return;
>
Zhenhua Huang March 16, 2023, 10:44 a.m. UTC | #4
On 2023/3/16 17:58, Pavan Kondeti wrote:
> On Thu, Mar 16, 2023 at 04:50:20PM +0800, Zhenhua Huang wrote:
>> Kfence only needs its pool to be mapped as page granularity, if it is
>> inited early. Previous judgement was a bit over protected. From [1], Mark
>> suggested to "just map the KFENCE region a page granularity". So I
>> decouple it from judgement and do page granularity mapping for kfence
>> pool only. Need to be noticed that late init of kfence pool still requires
>> page granularity mapping.
>>
>> Page granularity mapping in theory cost more(2M per 1GB) memory on arm64
>> platform. Like what I've tested on QEMU(emulated 1GB RAM) with
>> gki_defconfig, also turning off rodata protection:
>> Before:
>> [root@liebao ]# cat /proc/meminfo
>> MemTotal:         999484 kB
>> After:
>> [root@liebao ]# cat /proc/meminfo
>> MemTotal:        1001480 kB
>>
>> To implement this, also relocate the kfence pool allocation before the
>> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
>> addr, __kfence_pool is to be set after linear mapping set up.
>>
>> LINK: [1] https://lore.kernel.org/linux-arm-kernel/Y+IsdrvDNILA59UN@FVFF77S0Q05N/
>> Suggested-by: Mark Rutland <mark.rutland@arm.com>
>> Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
>> ---
>>   arch/arm64/include/asm/kfence.h | 16 +++++++++++
>>   arch/arm64/mm/mmu.c             | 59 +++++++++++++++++++++++++++++++++++++++++
>>   arch/arm64/mm/pageattr.c        |  9 +++++--
>>   include/linux/kfence.h          |  1 +
>>   mm/kfence/core.c                |  4 +++
>>   5 files changed, 87 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
>> index aa855c6..8143c91 100644
>> --- a/arch/arm64/include/asm/kfence.h
>> +++ b/arch/arm64/include/asm/kfence.h
>> @@ -10,6 +10,22 @@
>>   
>>   #include <asm/set_memory.h>
>>   
>> +extern phys_addr_t early_kfence_pool;
>> +
>> +#ifdef CONFIG_KFENCE
>> +
>> +extern char *__kfence_pool;
>> +static inline void kfence_set_pool(phys_addr_t addr)
>> +{
>> +	__kfence_pool = phys_to_virt(addr);
>> +}
>> +
>> +#else
>> +
>> +static inline void kfence_set_pool(phys_addr_t addr) { }
>> +
>> +#endif
>> +
>>   static inline bool arch_kfence_init_pool(void) { return true; }
>>   
>>   static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index 6f9d889..61944c70 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -24,6 +24,7 @@
>>   #include <linux/mm.h>
>>   #include <linux/vmalloc.h>
>>   #include <linux/set_memory.h>
>> +#include <linux/kfence.h>
>>   
>>   #include <asm/barrier.h>
>>   #include <asm/cputype.h>
>> @@ -38,6 +39,7 @@
>>   #include <asm/ptdump.h>
>>   #include <asm/tlbflush.h>
>>   #include <asm/pgalloc.h>
>> +#include <asm/kfence.h>
>>   
>>   #define NO_BLOCK_MAPPINGS	BIT(0)
>>   #define NO_CONT_MAPPINGS	BIT(1)
>> @@ -525,6 +527,48 @@ static int __init enable_crash_mem_map(char *arg)
>>   }
>>   early_param("crashkernel", enable_crash_mem_map);
>>   
>> +#ifdef CONFIG_KFENCE
>> +
>> +static bool kfence_early_init __initdata = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
>> +/*
>> + * early_param can be parsed before linear mapping
>> + * set up
>> + */
>> +static int __init parse_kfence_early_init(char *p)
>> +{
>> +	int val;
>> +
>> +	if (get_option(&p, &val))
>> +		kfence_early_init = !!val;
>> +	return 0;
>> +}
>> +early_param("kfence.sample_interval", parse_kfence_early_init);
>> +
>> +static phys_addr_t arm64_kfence_alloc_pool(void)
>> +{
>> +	phys_addr_t kfence_pool;
>> +
>> +	if (!kfence_early_init)
>> +		return 0;
>> +
>> +	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
>> +	if (!kfence_pool)
>> +		pr_err("failed to allocate kfence pool\n");
>> +
>> +	return kfence_pool;
>> +}
>> +
>> +#else
>> +
>> +static phys_addr_t arm64_kfence_alloc_pool(void)
>> +{
>> +	return 0;
>> +}
>> +
>> +#endif
>> +
>> +phys_addr_t early_kfence_pool;
>> +
>>   static void __init map_mem(pgd_t *pgdp)
>>   {
>>   	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
>> @@ -543,6 +587,10 @@ static void __init map_mem(pgd_t *pgdp)
>>   	 */
>>   	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
>>   
>> +	early_kfence_pool = arm64_kfence_alloc_pool();
>> +	if (early_kfence_pool)
>> +		memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>> +
>>   	if (can_set_direct_map())
>>   		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>>   
>> @@ -608,6 +656,17 @@ static void __init map_mem(pgd_t *pgdp)
>>   		}
>>   	}
>>   #endif
>> +
>> +	/* Kfence pool needs page-level mapping */
>> +	if (early_kfence_pool) {
>> +		__map_memblock(pgdp, early_kfence_pool,
>> +			early_kfence_pool + KFENCE_POOL_SIZE,
>> +			pgprot_tagged(PAGE_KERNEL),
>> +			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
>> +		memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>> +		/* kfence_pool really mapped now */
>> +		kfence_set_pool(early_kfence_pool);
>> +	}
> 
> Why not wrap this under CONFIG_KFENCE ? early_kfence_pool can also go in
> there?

Because I didn't want to add CONFIG_KFENCE in function.. in the case of 
w/o CONFIG_KFENCE, early_kfence_pool should be always NULL.

Thanks,
Zhenhua

> 
> Thanks,
> Pavan
Alexander Potapenko March 16, 2023, 10:56 a.m. UTC | #5
>
>
>
> >> +    /* Kfence pool needs page-level mapping */
> >> +    if (early_kfence_pool) {
> >> +            __map_memblock(pgdp, early_kfence_pool,
> >> +                    early_kfence_pool + KFENCE_POOL_SIZE,
> >> +                    pgprot_tagged(PAGE_KERNEL),
> >> +                    NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> >> +            memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
> >> +            /* kfence_pool really mapped now */
> >> +            kfence_set_pool(early_kfence_pool);
> >> +    }
> >
> > Why not wrap this under CONFIG_KFENCE ? early_kfence_pool can also go in
> > there?
>
> Because I didn't want to add CONFIG_KFENCE in function.. in the case of
> w/o CONFIG_KFENCE, early_kfence_pool should be always NULL.
>
> Please no. If the code is not used in non-KFENCE build, it should not be
compiled. Same holds for the variables that only exist in KFENCE builds.
Zhenhua Huang March 16, 2023, 11:17 a.m. UTC | #6
On 2023/3/16 18:56, Alexander Potapenko wrote:
> 
> 
>      >> +    /* Kfence pool needs page-level mapping */
>      >> +    if (early_kfence_pool) {
>      >> +            __map_memblock(pgdp, early_kfence_pool,
>      >> +                    early_kfence_pool + KFENCE_POOL_SIZE,
>      >> +                    pgprot_tagged(PAGE_KERNEL),
>      >> +                    NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
>      >> +            memblock_clear_nomap(early_kfence_pool,
>     KFENCE_POOL_SIZE);
>      >> +            /* kfence_pool really mapped now */
>      >> +            kfence_set_pool(early_kfence_pool);
>      >> +    }
>      >
>      > Why not wrap this under CONFIG_KFENCE ? early_kfence_pool can
>     also go in
>      > there?
> 
>     Because I didn't want to add CONFIG_KFENCE in function.. in the case of
>     w/o CONFIG_KFENCE, early_kfence_pool should be always NULL.
> 
> Please no. If the code is not used in non-KFENCE build, it should not be 
> compiled. Same holds for the variables that only exist in KFENCE builds.

Got it, yeah.. it seems not make sense to have this variable w/o 
CONFIG_KFENCE.

Thanks,
Zhenhua
Zhenhua Huang March 16, 2023, 12:19 p.m. UTC | #7
On 2023/3/16 18:36, Zhenhua Huang wrote:
> 
> 
> On 2023/3/16 18:06, Marco Elver wrote:
>> On Thu, Mar 16, 2023 at 04:50PM +0800, Zhenhua Huang wrote:
>>> Kfence only needs its pool to be mapped as page granularity, if it is
>>> inited early. Previous judgement was a bit over protected. From [1], 
>>> Mark
>>> suggested to "just map the KFENCE region a page granularity". So I
>>> decouple it from judgement and do page granularity mapping for kfence
>>> pool only. Need to be noticed that late init of kfence pool still 
>>> requires
>>> page granularity mapping.
>>>
>>> Page granularity mapping in theory cost more(2M per 1GB) memory on arm64
>>> platform. Like what I've tested on QEMU(emulated 1GB RAM) with
>>> gki_defconfig, also turning off rodata protection:
>>> Before:
>>> [root@liebao ]# cat /proc/meminfo
>>> MemTotal:         999484 kB
>>> After:
>>> [root@liebao ]# cat /proc/meminfo
>>> MemTotal:        1001480 kB
>>>
>>> To implement this, also relocate the kfence pool allocation before the
>>> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
>>> addr, __kfence_pool is to be set after linear mapping set up.
>>>
>>> LINK: [1] 
>>> https://lore.kernel.org/linux-arm-kernel/Y+IsdrvDNILA59UN@FVFF77S0Q05N/
>>> Suggested-by: Mark Rutland <mark.rutland@arm.com>
>>> Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
>>> ---
>>>   arch/arm64/include/asm/kfence.h | 16 +++++++++++
>>>   arch/arm64/mm/mmu.c             | 59 
>>> +++++++++++++++++++++++++++++++++++++++++
>>>   arch/arm64/mm/pageattr.c        |  9 +++++--
>>>   include/linux/kfence.h          |  1 +
>>>   mm/kfence/core.c                |  4 +++
>>>   5 files changed, 87 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/arch/arm64/include/asm/kfence.h 
>>> b/arch/arm64/include/asm/kfence.h
>>> index aa855c6..8143c91 100644
>>> --- a/arch/arm64/include/asm/kfence.h
>>> +++ b/arch/arm64/include/asm/kfence.h
>>> @@ -10,6 +10,22 @@
>>>   #include <asm/set_memory.h>
>>> +extern phys_addr_t early_kfence_pool;
>>
>> This should not be accessible if !CONFIG_KFENCE.
>>
>>> +#ifdef CONFIG_KFENCE
>>> +
>>> +extern char *__kfence_pool;
>>> +static inline void kfence_set_pool(phys_addr_t addr)
>>> +{
>>> +    __kfence_pool = phys_to_virt(addr);
>>> +}
>>
>> kfence_set_pool() is redundant if it's for arm64 only, because we know
>> where it's needed, and there you could just access __kfence_pool
>> directly. So let's just remove this function. (Initially I thought you
>> want to provide it generally, also for other architectures.)
>>
>>> +#else
>>> +
>>> +static inline void kfence_set_pool(phys_addr_t addr) { }
>>> +
>>> +#endif
>>> +
>>>   static inline bool arch_kfence_init_pool(void) { return true; }
>> [...]
>>> +#endif
>>> +
>>> +phys_addr_t early_kfence_pool;
>>
>> This variable now exists in non-KFENCE builds, which is wrong.
>>
>>>   static void __init map_mem(pgd_t *pgdp)
>>>   {
>>>       static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
>>> @@ -543,6 +587,10 @@ static void __init map_mem(pgd_t *pgdp)
>>>        */
>>>       BUILD_BUG_ON(pgd_index(direct_map_end - 1) == 
>>> pgd_index(direct_map_end));
>>> +    early_kfence_pool = arm64_kfence_alloc_pool();
>>> +    if (early_kfence_pool)
>>> +        memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>>> +
>>>       if (can_set_direct_map())
>>>           flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>>> @@ -608,6 +656,17 @@ static void __init map_mem(pgd_t *pgdp)
>>>           }
>>>       }
>>>   #endif
>>> +
>>> +    /* Kfence pool needs page-level mapping */
>>> +    if (early_kfence_pool) {
>>> +        __map_memblock(pgdp, early_kfence_pool,
>>> +            early_kfence_pool + KFENCE_POOL_SIZE,
>>> +            pgprot_tagged(PAGE_KERNEL),
>>> +            NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
>>> +        memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>>> +        /* kfence_pool really mapped now */
>>> +        kfence_set_pool(early_kfence_pool);
>>> +    }
>>
>> This whole piece of code could also be wrapped in another function,
>> which becomes a no-op if !CONFIG_KFENCE. Then you also don't need to
>> provide the KFENCE_POOL_SIZE define for 0 if !CONFIG_KFENCE.
>>
>> [...]
>>> +     *
>>> +     * Kfence pool requires page granularity mapping also if we init it
>>> +     * late.
>>>        */
>>>       return (rodata_enabled && rodata_full) || 
>>> debug_pagealloc_enabled() ||
>>> -        IS_ENABLED(CONFIG_KFENCE);
>>> +        (IS_ENABLED(CONFIG_KFENCE) && !early_kfence_pool);
>>
>> Accessing a non-existent variable if !CONFIG_KFENCE works because the
>> compiler optimizes out the access, but is generally bad style.
> 
> Hi Marco,
> 
> Actually my previous intention is not to do separation between KFENCE 
> and non-KFENCE, instead to ensure early_kfence_pool always to be NULL in 
> non-KFENCE build. That works well from my side w/ and w/o 
> CONFIG_KFENCE.. but Yes that not clear to have this variable still in 
> non-Kfence build.
> 
> Sure, I will follow your suggestion below and tested on my side. Thanks.
> 
> Thanks,
> Zhenhua
> 
>>
>>
>> I think the only issue that I have is that the separation between KFENCE
>> and non-KFENCE builds is not great.
>>
>> At the end of the email are is a diff against your patch which would be
>> my suggested changes (while at it, I fixed up a bunch of other issues).
>> Untested, so if you decide to adopt these changes, please test.

Hi Marco,

All below seems well except minor change like we can't define 
kfence_early_init as __initdata because it is used by non init function 
like can_set_direct_map. Warning reported:
WARNING: modpost: vmlinux.o: section mismatch in reference: 
can_set_direct_map (section: .text) -> kfence_early_init (section: 
.init.data)

I have modified and sent out a new patchset. Please help review.

Thanks,
Zhenhua

>>
>> Thanks,
>> -- Marco
>>
>> ------ >8 ------
>>
>>
>> diff --git a/arch/arm64/include/asm/kfence.h 
>> b/arch/arm64/include/asm/kfence.h
>> index 8143c91854e1..a81937fae9f6 100644
>> --- a/arch/arm64/include/asm/kfence.h
>> +++ b/arch/arm64/include/asm/kfence.h
>> @@ -10,22 +10,6 @@
>>   #include <asm/set_memory.h>
>> -extern phys_addr_t early_kfence_pool;
>> -
>> -#ifdef CONFIG_KFENCE
>> -
>> -extern char *__kfence_pool;
>> -static inline void kfence_set_pool(phys_addr_t addr)
>> -{
>> -    __kfence_pool = phys_to_virt(addr);
>> -}
>> -
>> -#else
>> -
>> -static inline void kfence_set_pool(phys_addr_t addr) { }
>> -
>> -#endif
>> -
>>   static inline bool arch_kfence_init_pool(void) { return true; }
>>   static inline bool kfence_protect_page(unsigned long addr, bool 
>> protect)
>> @@ -35,4 +19,14 @@ static inline bool kfence_protect_page(unsigned 
>> long addr, bool protect)
>>       return true;
>>   }
>> +#ifdef CONFIG_KFENCE
>> +extern bool kfence_early_init;
>> +static inline bool arm64_kfence_can_set_direct_map(void)
>> +{
>> +    return !kfence_early_init;
>> +}
>> +#else /* CONFIG_KFENCE */
>> +static inline bool arm64_kfence_can_set_direct_map(void) { return 
>> false; }
>> +#endif /* CONFIG_KFENCE */
>> +
>>   #endif /* __ASM_KFENCE_H */
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index 61944c7091f0..683958616ac1 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -528,17 +528,14 @@ static int __init enable_crash_mem_map(char *arg)
>>   early_param("crashkernel", enable_crash_mem_map);
>>   #ifdef CONFIG_KFENCE
>> +bool kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
>> -static bool kfence_early_init __initdata = 
>> !!CONFIG_KFENCE_SAMPLE_INTERVAL;
>> -/*
>> - * early_param can be parsed before linear mapping
>> - * set up
>> - */
>> -static int __init parse_kfence_early_init(char *p)
>> +/* early_param() will be parsed before map_mem() below. */
>> +static int __init parse_kfence_early_init(char *arg)
>>   {
>>       int val;
>> -    if (get_option(&p, &val))
>> +    if (get_option(&arg, &val))
>>           kfence_early_init = !!val;
>>       return 0;
>>   }
>> @@ -552,22 +549,34 @@ static phys_addr_t arm64_kfence_alloc_pool(void)
>>           return 0;
>>       kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
>> -    if (!kfence_pool)
>> +    if (!kfence_pool) {
>>           pr_err("failed to allocate kfence pool\n");
>> +        kfence_early_init = false;
>> +        return 0;
>> +    }
>> +
>> +    /* Temporarily mark as NOMAP. */
>> +    memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
>>       return kfence_pool;
>>   }
>> -#else
>> -
>> -static phys_addr_t arm64_kfence_alloc_pool(void)
>> +static void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
>>   {
>> -    return 0;
>> -}
>> -
>> -#endif
>> +    if (!kfence_pool)
>> +        return;
>> -phys_addr_t early_kfence_pool;
>> +    /* KFENCE pool needs page-level mapping. */
>> +    __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
>> +               pgprot_tagged(PAGE_KERNEL),
>> +               NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
>> +    memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
>> +    __kfence_pool = phys_to_virt(kfence_pool);
>> +}
>> +#else /* CONFIG_KFENCE */
>> +static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
>> +static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, 
>> pgd_t *pgdp) { }
>> +#endif /* CONFIG_KFENCE */
>>   static void __init map_mem(pgd_t *pgdp)
>>   {
>> @@ -575,6 +584,7 @@ static void __init map_mem(pgd_t *pgdp)
>>       phys_addr_t kernel_start = __pa_symbol(_stext);
>>       phys_addr_t kernel_end = __pa_symbol(__init_begin);
>>       phys_addr_t start, end;
>> +    phys_addr_t early_kfence_pool;
>>       int flags = NO_EXEC_MAPPINGS;
>>       u64 i;
>> @@ -588,8 +598,6 @@ static void __init map_mem(pgd_t *pgdp)
>>       BUILD_BUG_ON(pgd_index(direct_map_end - 1) == 
>> pgd_index(direct_map_end));
>>       early_kfence_pool = arm64_kfence_alloc_pool();
>> -    if (early_kfence_pool)
>> -        memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>>       if (can_set_direct_map())
>>           flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>> @@ -656,17 +664,7 @@ static void __init map_mem(pgd_t *pgdp)
>>           }
>>       }
>>   #endif
>> -
>> -    /* Kfence pool needs page-level mapping */
>> -    if (early_kfence_pool) {
>> -        __map_memblock(pgdp, early_kfence_pool,
>> -            early_kfence_pool + KFENCE_POOL_SIZE,
>> -            pgprot_tagged(PAGE_KERNEL),
>> -            NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
>> -        memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
>> -        /* kfence_pool really mapped now */
>> -        kfence_set_pool(early_kfence_pool);
>> -    }
>> +    arm64_kfence_map_pool(early_kfence_pool, pgdp);
>>   }
>>   void mark_rodata_ro(void)
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> index 7ce5295cc6fb..aa8fd12cc96f 100644
>> --- a/arch/arm64/mm/pageattr.c
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -7,7 +7,6 @@
>>   #include <linux/module.h>
>>   #include <linux/sched.h>
>>   #include <linux/vmalloc.h>
>> -#include <linux/kfence.h>
>>   #include <asm/cacheflush.h>
>>   #include <asm/set_memory.h>
>> @@ -28,11 +27,10 @@ bool can_set_direct_map(void)
>>        * mapped at page granularity, so that it is possible to
>>        * protect/unprotect single pages.
>>        *
>> -     * Kfence pool requires page granularity mapping also if we init it
>> -     * late.
>> +     * KFENCE pool requires page-granular mapping if initialized late.
>>        */
>>       return (rodata_enabled && rodata_full) || 
>> debug_pagealloc_enabled() ||
>> -        (IS_ENABLED(CONFIG_KFENCE) && !early_kfence_pool);
>> +           arm64_kfence_can_set_direct_map();
>>   }
>>   static int change_page_range(pte_t *ptep, unsigned long addr, void 
>> *data)
>> diff --git a/include/linux/kfence.h b/include/linux/kfence.h
>> index 91cbcc98e293..726857a4b680 100644
>> --- a/include/linux/kfence.h
>> +++ b/include/linux/kfence.h
>> @@ -222,7 +222,6 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, 
>> void *object, struct slab *sla
>>   #else /* CONFIG_KFENCE */
>> -#define KFENCE_POOL_SIZE 0
>>   static inline bool is_kfence_address(const void *addr) { return 
>> false; }
>>   static inline void kfence_alloc_pool(void) { }
>>   static inline void kfence_init(void) { }
>> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
>> index fab087d39633..e7f22af5e710 100644
>> --- a/mm/kfence/core.c
>> +++ b/mm/kfence/core.c
>> @@ -818,7 +818,7 @@ void __init kfence_alloc_pool(void)
>>       if (!kfence_sample_interval)
>>           return;
>> -    /* if the pool has already been initialized by arch, skip the 
>> below */
>> +    /* If the pool has already been initialized by arch, skip the 
>> below. */
>>       if (__kfence_pool)
>>           return;
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index aa855c6..8143c91 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -10,6 +10,22 @@ 
 
 #include <asm/set_memory.h>
 
+extern phys_addr_t early_kfence_pool;
+
+#ifdef CONFIG_KFENCE
+
+extern char *__kfence_pool;
+static inline void kfence_set_pool(phys_addr_t addr)
+{
+	__kfence_pool = phys_to_virt(addr);
+}
+
+#else
+
+static inline void kfence_set_pool(phys_addr_t addr) { }
+
+#endif
+
 static inline bool arch_kfence_init_pool(void) { return true; }
 
 static inline bool kfence_protect_page(unsigned long addr, bool protect)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6f9d889..61944c70 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -24,6 +24,7 @@ 
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <linux/set_memory.h>
+#include <linux/kfence.h>
 
 #include <asm/barrier.h>
 #include <asm/cputype.h>
@@ -38,6 +39,7 @@ 
 #include <asm/ptdump.h>
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 
 #define NO_BLOCK_MAPPINGS	BIT(0)
 #define NO_CONT_MAPPINGS	BIT(1)
@@ -525,6 +527,48 @@  static int __init enable_crash_mem_map(char *arg)
 }
 early_param("crashkernel", enable_crash_mem_map);
 
+#ifdef CONFIG_KFENCE
+
+static bool kfence_early_init __initdata = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
+/*
+ * early_param can be parsed before linear mapping
+ * set up
+ */
+static int __init parse_kfence_early_init(char *p)
+{
+	int val;
+
+	if (get_option(&p, &val))
+		kfence_early_init = !!val;
+	return 0;
+}
+early_param("kfence.sample_interval", parse_kfence_early_init);
+
+static phys_addr_t arm64_kfence_alloc_pool(void)
+{
+	phys_addr_t kfence_pool;
+
+	if (!kfence_early_init)
+		return 0;
+
+	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+	if (!kfence_pool)
+		pr_err("failed to allocate kfence pool\n");
+
+	return kfence_pool;
+}
+
+#else
+
+static phys_addr_t arm64_kfence_alloc_pool(void)
+{
+	return 0;
+}
+
+#endif
+
+phys_addr_t early_kfence_pool;
+
 static void __init map_mem(pgd_t *pgdp)
 {
 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -543,6 +587,10 @@  static void __init map_mem(pgd_t *pgdp)
 	 */
 	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
 
+	early_kfence_pool = arm64_kfence_alloc_pool();
+	if (early_kfence_pool)
+		memblock_mark_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
+
 	if (can_set_direct_map())
 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
@@ -608,6 +656,17 @@  static void __init map_mem(pgd_t *pgdp)
 		}
 	}
 #endif
+
+	/* Kfence pool needs page-level mapping */
+	if (early_kfence_pool) {
+		__map_memblock(pgdp, early_kfence_pool,
+			early_kfence_pool + KFENCE_POOL_SIZE,
+			pgprot_tagged(PAGE_KERNEL),
+			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+		memblock_clear_nomap(early_kfence_pool, KFENCE_POOL_SIZE);
+		/* kfence_pool really mapped now */
+		kfence_set_pool(early_kfence_pool);
+	}
 }
 
 void mark_rodata_ro(void)
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 79dd201..7ce5295 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -7,10 +7,12 @@ 
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
+#include <linux/kfence.h>
 
 #include <asm/cacheflush.h>
 #include <asm/set_memory.h>
 #include <asm/tlbflush.h>
+#include <asm/kfence.h>
 
 struct page_change_data {
 	pgprot_t set_mask;
@@ -22,12 +24,15 @@  bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
 bool can_set_direct_map(void)
 {
 	/*
-	 * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
+	 * rodata_full and DEBUG_PAGEALLOC require linear map to be
 	 * mapped at page granularity, so that it is possible to
 	 * protect/unprotect single pages.
+	 *
+	 * Kfence pool requires page granularity mapping also if we init it
+	 * late.
 	 */
 	return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
-		IS_ENABLED(CONFIG_KFENCE);
+	    (IS_ENABLED(CONFIG_KFENCE) && !early_kfence_pool);
 }
 
 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 726857a..91cbcc9 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -222,6 +222,7 @@  bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
 
 #else /* CONFIG_KFENCE */
 
+#define KFENCE_POOL_SIZE 0
 static inline bool is_kfence_address(const void *addr) { return false; }
 static inline void kfence_alloc_pool(void) { }
 static inline void kfence_init(void) { }
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 5349c37..e05ccf1 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -814,6 +814,10 @@  void __init kfence_alloc_pool(void)
 	if (!kfence_sample_interval)
 		return;
 
+	/* if the pool has already been initialized by arch, skip the below */
+	if (__kfence_pool)
+		return;
+
 	__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
 
 	if (!__kfence_pool)