diff mbox series

[3/6] mm: kfence: make kfence_protect_page() void

Message ID 20230328095807.7014-4-songmuchun@bytedance.com (mailing list archive)
State New
Headers show
Series Simplify kfence code | expand

Commit Message

Muchun Song March 28, 2023, 9:58 a.m. UTC
The arch_kfence_init_pool() make sure kfence pool is mapped with base page
size (e.g. 4KB), so the following PTE lookup in kfence_protect_page() will
always succeed. Then there is no way to stop kfence_protect_page() always
returning true, so make it void to simplify the code.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 arch/arm/include/asm/kfence.h     |   4 +-
 arch/arm64/include/asm/kfence.h   |   4 +-
 arch/parisc/include/asm/kfence.h  |   7 +-
 arch/powerpc/include/asm/kfence.h |   8 +--
 arch/riscv/include/asm/kfence.h   |   4 +-
 arch/s390/include/asm/kfence.h    |   3 +-
 arch/x86/include/asm/kfence.h     |   9 +--
 mm/kfence/core.c                  | 142 +++++++++++++++++---------------------
 8 files changed, 73 insertions(+), 108 deletions(-)

Comments

Marco Elver March 28, 2023, 10:32 a.m. UTC | #1
On Tue, 28 Mar 2023 at 11:58, Muchun Song <songmuchun@bytedance.com> wrote:
>
> The arch_kfence_init_pool() make sure kfence pool is mapped with base page
> size (e.g. 4KB), so the following PTE lookup in kfence_protect_page() will
> always succeed. Then there is no way to stop kfence_protect_page() always
> returning true, so make it void to simplify the code.
>
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> ---
>  arch/arm/include/asm/kfence.h     |   4 +-
>  arch/arm64/include/asm/kfence.h   |   4 +-
>  arch/parisc/include/asm/kfence.h  |   7 +-
>  arch/powerpc/include/asm/kfence.h |   8 +--
>  arch/riscv/include/asm/kfence.h   |   4 +-
>  arch/s390/include/asm/kfence.h    |   3 +-
>  arch/x86/include/asm/kfence.h     |   9 +--
>  mm/kfence/core.c                  | 142 +++++++++++++++++---------------------
>  8 files changed, 73 insertions(+), 108 deletions(-)
>
> diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
> index 7980d0f2271f..c30a5f8125e8 100644
> --- a/arch/arm/include/asm/kfence.h
> +++ b/arch/arm/include/asm/kfence.h
> @@ -43,11 +43,9 @@ static inline bool arch_kfence_init_pool(void)
>         return true;
>  }
>
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
>         set_memory_valid(addr, 1, !protect);
> -
> -       return true;
>  }
>
>  #endif /* __ASM_ARM_KFENCE_H */
> diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
> index a81937fae9f6..7717c6d98b6f 100644
> --- a/arch/arm64/include/asm/kfence.h
> +++ b/arch/arm64/include/asm/kfence.h
> @@ -12,11 +12,9 @@
>
>  static inline bool arch_kfence_init_pool(void) { return true; }
>
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
>         set_memory_valid(addr, 1, !protect);
> -
> -       return true;
>  }
>
>  #ifdef CONFIG_KFENCE
> diff --git a/arch/parisc/include/asm/kfence.h b/arch/parisc/include/asm/kfence.h
> index 6259e5ac1fea..290792009315 100644
> --- a/arch/parisc/include/asm/kfence.h
> +++ b/arch/parisc/include/asm/kfence.h
> @@ -19,13 +19,10 @@ static inline bool arch_kfence_init_pool(void)
>  }
>
>  /* Protect the given page and flush TLB. */
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
>         pte_t *pte = virt_to_kpte(addr);
>
> -       if (WARN_ON(!pte))
> -               return false;
> -
>         /*
>          * We need to avoid IPIs, as we may get KFENCE allocations or faults
>          * with interrupts disabled.
> @@ -37,8 +34,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>                 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
>
>         flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> -
> -       return true;
>  }
>
>  #endif /* _ASM_PARISC_KFENCE_H */
> diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
> index 6fd2b4d486c5..9d8502a7d0a4 100644
> --- a/arch/powerpc/include/asm/kfence.h
> +++ b/arch/powerpc/include/asm/kfence.h
> @@ -21,16 +21,14 @@ static inline bool arch_kfence_init_pool(void)
>  }
>
>  #ifdef CONFIG_PPC64
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
>         struct page *page = virt_to_page(addr);
>
>         __kernel_map_pages(page, 1, !protect);
> -
> -       return true;
>  }
>  #else
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
>         pte_t *kpte = virt_to_kpte(addr);
>
> @@ -40,8 +38,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>         } else {
>                 pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
>         }
> -
> -       return true;
>  }
>  #endif
>
> diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
> index d887a54042aa..1299f47170b5 100644
> --- a/arch/riscv/include/asm/kfence.h
> +++ b/arch/riscv/include/asm/kfence.h
> @@ -46,7 +46,7 @@ static inline bool arch_kfence_init_pool(void)
>         return true;
>  }
>
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
>         pte_t *pte = virt_to_kpte(addr);
>
> @@ -56,8 +56,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>                 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
>
>         flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> -
> -       return true;
>  }
>
>  #endif /* _ASM_RISCV_KFENCE_H */
> diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
> index d55ba878378b..6d7b3632d79c 100644
> --- a/arch/s390/include/asm/kfence.h
> +++ b/arch/s390/include/asm/kfence.h
> @@ -33,10 +33,9 @@ static __always_inline void kfence_split_mapping(void)
>  #endif
>  }
>
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
>         __kernel_map_pages(virt_to_page(addr), 1, !protect);
> -       return true;
>  }
>
>  #endif /* _ASM_S390_KFENCE_H */
> diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
> index ff5c7134a37a..6ffd4a078a71 100644
> --- a/arch/x86/include/asm/kfence.h
> +++ b/arch/x86/include/asm/kfence.h
> @@ -38,13 +38,9 @@ static inline bool arch_kfence_init_pool(void)
>  }
>
>  /* Protect the given page and flush TLB. */
> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>  {
> -       unsigned int level;
> -       pte_t *pte = lookup_address(addr, &level);
> -
> -       if (WARN_ON(!pte || level != PG_LEVEL_4K))
> -               return false;
> +       pte_t *pte = virt_to_kpte(addr);

This WARN and bailing here has helped us catch an issue early before
[1] - and because KFENCE ought to be enabled as a debugging tool, the
philosophy is to be failure tolerant and not crash the system here,
hence the "return false".

[1] https://lore.kernel.org/lkml/Y3bCV6VckVUEF7Pq@elver.google.com/

We're relying on the architecture doing the "right thing", but it's
not entirely unlikely that the arch ends up doing the wrong thing due
to some bug like above (i.e. arch_kfence_init_pool() is faulty).

Nack.
Muchun Song March 28, 2023, 1:04 p.m. UTC | #2
> On Mar 28, 2023, at 18:32, Marco Elver <elver@google.com> wrote:
> 
> On Tue, 28 Mar 2023 at 11:58, Muchun Song <songmuchun@bytedance.com> wrote:
>> 
>> The arch_kfence_init_pool() make sure kfence pool is mapped with base page
>> size (e.g. 4KB), so the following PTE lookup in kfence_protect_page() will
>> always succeed. Then there is no way to stop kfence_protect_page() always
>> returning true, so make it void to simplify the code.
>> 
>> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
>> ---
>> arch/arm/include/asm/kfence.h     |   4 +-
>> arch/arm64/include/asm/kfence.h   |   4 +-
>> arch/parisc/include/asm/kfence.h  |   7 +-
>> arch/powerpc/include/asm/kfence.h |   8 +--
>> arch/riscv/include/asm/kfence.h   |   4 +-
>> arch/s390/include/asm/kfence.h    |   3 +-
>> arch/x86/include/asm/kfence.h     |   9 +--
>> mm/kfence/core.c                  | 142 +++++++++++++++++---------------------
>> 8 files changed, 73 insertions(+), 108 deletions(-)
>> 
>> diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
>> index 7980d0f2271f..c30a5f8125e8 100644
>> --- a/arch/arm/include/asm/kfence.h
>> +++ b/arch/arm/include/asm/kfence.h
>> @@ -43,11 +43,9 @@ static inline bool arch_kfence_init_pool(void)
>>        return true;
>> }
>> 
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>>        set_memory_valid(addr, 1, !protect);
>> -
>> -       return true;
>> }
>> 
>> #endif /* __ASM_ARM_KFENCE_H */
>> diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
>> index a81937fae9f6..7717c6d98b6f 100644
>> --- a/arch/arm64/include/asm/kfence.h
>> +++ b/arch/arm64/include/asm/kfence.h
>> @@ -12,11 +12,9 @@
>> 
>> static inline bool arch_kfence_init_pool(void) { return true; }
>> 
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>>        set_memory_valid(addr, 1, !protect);
>> -
>> -       return true;
>> }
>> 
>> #ifdef CONFIG_KFENCE
>> diff --git a/arch/parisc/include/asm/kfence.h b/arch/parisc/include/asm/kfence.h
>> index 6259e5ac1fea..290792009315 100644
>> --- a/arch/parisc/include/asm/kfence.h
>> +++ b/arch/parisc/include/asm/kfence.h
>> @@ -19,13 +19,10 @@ static inline bool arch_kfence_init_pool(void)
>> }
>> 
>> /* Protect the given page and flush TLB. */
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>>        pte_t *pte = virt_to_kpte(addr);
>> 
>> -       if (WARN_ON(!pte))
>> -               return false;
>> -
>>        /*
>>         * We need to avoid IPIs, as we may get KFENCE allocations or faults
>>         * with interrupts disabled.
>> @@ -37,8 +34,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>>                set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
>> 
>>        flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
>> -
>> -       return true;
>> }
>> 
>> #endif /* _ASM_PARISC_KFENCE_H */
>> diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
>> index 6fd2b4d486c5..9d8502a7d0a4 100644
>> --- a/arch/powerpc/include/asm/kfence.h
>> +++ b/arch/powerpc/include/asm/kfence.h
>> @@ -21,16 +21,14 @@ static inline bool arch_kfence_init_pool(void)
>> }
>> 
>> #ifdef CONFIG_PPC64
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>>        struct page *page = virt_to_page(addr);
>> 
>>        __kernel_map_pages(page, 1, !protect);
>> -
>> -       return true;
>> }
>> #else
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>>        pte_t *kpte = virt_to_kpte(addr);
>> 
>> @@ -40,8 +38,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>>        } else {
>>                pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
>>        }
>> -
>> -       return true;
>> }
>> #endif
>> 
>> diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
>> index d887a54042aa..1299f47170b5 100644
>> --- a/arch/riscv/include/asm/kfence.h
>> +++ b/arch/riscv/include/asm/kfence.h
>> @@ -46,7 +46,7 @@ static inline bool arch_kfence_init_pool(void)
>>        return true;
>> }
>> 
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>>        pte_t *pte = virt_to_kpte(addr);
>> 
>> @@ -56,8 +56,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>>                set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
>> 
>>        flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
>> -
>> -       return true;
>> }
>> 
>> #endif /* _ASM_RISCV_KFENCE_H */
>> diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
>> index d55ba878378b..6d7b3632d79c 100644
>> --- a/arch/s390/include/asm/kfence.h
>> +++ b/arch/s390/include/asm/kfence.h
>> @@ -33,10 +33,9 @@ static __always_inline void kfence_split_mapping(void)
>> #endif
>> }
>> 
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>>        __kernel_map_pages(virt_to_page(addr), 1, !protect);
>> -       return true;
>> }
>> 
>> #endif /* _ASM_S390_KFENCE_H */
>> diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
>> index ff5c7134a37a..6ffd4a078a71 100644
>> --- a/arch/x86/include/asm/kfence.h
>> +++ b/arch/x86/include/asm/kfence.h
>> @@ -38,13 +38,9 @@ static inline bool arch_kfence_init_pool(void)
>> }
>> 
>> /* Protect the given page and flush TLB. */
>> -static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +static inline void kfence_protect_page(unsigned long addr, bool protect)
>> {
>> -       unsigned int level;
>> -       pte_t *pte = lookup_address(addr, &level);
>> -
>> -       if (WARN_ON(!pte || level != PG_LEVEL_4K))
>> -               return false;
>> +       pte_t *pte = virt_to_kpte(addr);
> 
> This WARN and bailing here has helped us catch an issue early before
> [1] - and because KFENCE ought to be enabled as a debugging tool, the
> philosophy is to be failure tolerant and not crash the system here,
> hence the "return false".
> 
> [1] https://lore.kernel.org/lkml/Y3bCV6VckVUEF7Pq@elver.google.com/

A good example.

> 
> We're relying on the architecture doing the "right thing", but it's
> not entirely unlikely that the arch ends up doing the wrong thing due
> to some bug like above (i.e. arch_kfence_init_pool() is faulty).

Got it. I’ll drop this one next version.

Thanks

> 
> Nack.
diff mbox series

Patch

diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
index 7980d0f2271f..c30a5f8125e8 100644
--- a/arch/arm/include/asm/kfence.h
+++ b/arch/arm/include/asm/kfence.h
@@ -43,11 +43,9 @@  static inline bool arch_kfence_init_pool(void)
 	return true;
 }
 
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
 	set_memory_valid(addr, 1, !protect);
-
-	return true;
 }
 
 #endif /* __ASM_ARM_KFENCE_H */
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index a81937fae9f6..7717c6d98b6f 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -12,11 +12,9 @@ 
 
 static inline bool arch_kfence_init_pool(void) { return true; }
 
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
 	set_memory_valid(addr, 1, !protect);
-
-	return true;
 }
 
 #ifdef CONFIG_KFENCE
diff --git a/arch/parisc/include/asm/kfence.h b/arch/parisc/include/asm/kfence.h
index 6259e5ac1fea..290792009315 100644
--- a/arch/parisc/include/asm/kfence.h
+++ b/arch/parisc/include/asm/kfence.h
@@ -19,13 +19,10 @@  static inline bool arch_kfence_init_pool(void)
 }
 
 /* Protect the given page and flush TLB. */
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
 	pte_t *pte = virt_to_kpte(addr);
 
-	if (WARN_ON(!pte))
-		return false;
-
 	/*
 	 * We need to avoid IPIs, as we may get KFENCE allocations or faults
 	 * with interrupts disabled.
@@ -37,8 +34,6 @@  static inline bool kfence_protect_page(unsigned long addr, bool protect)
 		set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
 
 	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
-
-	return true;
 }
 
 #endif /* _ASM_PARISC_KFENCE_H */
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index 6fd2b4d486c5..9d8502a7d0a4 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -21,16 +21,14 @@  static inline bool arch_kfence_init_pool(void)
 }
 
 #ifdef CONFIG_PPC64
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
 	struct page *page = virt_to_page(addr);
 
 	__kernel_map_pages(page, 1, !protect);
-
-	return true;
 }
 #else
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
 	pte_t *kpte = virt_to_kpte(addr);
 
@@ -40,8 +38,6 @@  static inline bool kfence_protect_page(unsigned long addr, bool protect)
 	} else {
 		pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
 	}
-
-	return true;
 }
 #endif
 
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
index d887a54042aa..1299f47170b5 100644
--- a/arch/riscv/include/asm/kfence.h
+++ b/arch/riscv/include/asm/kfence.h
@@ -46,7 +46,7 @@  static inline bool arch_kfence_init_pool(void)
 	return true;
 }
 
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
 	pte_t *pte = virt_to_kpte(addr);
 
@@ -56,8 +56,6 @@  static inline bool kfence_protect_page(unsigned long addr, bool protect)
 		set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
 
 	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
-
-	return true;
 }
 
 #endif /* _ASM_RISCV_KFENCE_H */
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
index d55ba878378b..6d7b3632d79c 100644
--- a/arch/s390/include/asm/kfence.h
+++ b/arch/s390/include/asm/kfence.h
@@ -33,10 +33,9 @@  static __always_inline void kfence_split_mapping(void)
 #endif
 }
 
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
 	__kernel_map_pages(virt_to_page(addr), 1, !protect);
-	return true;
 }
 
 #endif /* _ASM_S390_KFENCE_H */
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
index ff5c7134a37a..6ffd4a078a71 100644
--- a/arch/x86/include/asm/kfence.h
+++ b/arch/x86/include/asm/kfence.h
@@ -38,13 +38,9 @@  static inline bool arch_kfence_init_pool(void)
 }
 
 /* Protect the given page and flush TLB. */
-static inline bool kfence_protect_page(unsigned long addr, bool protect)
+static inline void kfence_protect_page(unsigned long addr, bool protect)
 {
-	unsigned int level;
-	pte_t *pte = lookup_address(addr, &level);
-
-	if (WARN_ON(!pte || level != PG_LEVEL_4K))
-		return false;
+	pte_t *pte = virt_to_kpte(addr);
 
 	/*
 	 * We need to avoid IPIs, as we may get KFENCE allocations or faults
@@ -65,7 +61,6 @@  static inline bool kfence_protect_page(unsigned long addr, bool protect)
 	preempt_disable();
 	flush_tlb_one_kernel(addr);
 	preempt_enable();
-	return true;
 }
 
 #endif /* !MODULE */
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 6781af1dfa66..5726bf2ae13c 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -229,14 +229,14 @@  static bool alloc_covered_contains(u32 alloc_stack_hash)
 	return true;
 }
 
-static bool kfence_protect(unsigned long addr)
+static inline void kfence_protect(unsigned long addr)
 {
-	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
+	kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true);
 }
 
-static bool kfence_unprotect(unsigned long addr)
+static inline void kfence_unprotect(unsigned long addr)
 {
-	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
+	kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false);
 }
 
 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
@@ -531,30 +531,19 @@  static void rcu_guarded_free(struct rcu_head *h)
 	kfence_guarded_free((void *)meta->addr, meta, false);
 }
 
-/*
- * Initialization of the KFENCE pool after its allocation.
- * Returns 0 on success; otherwise returns the address up to
- * which partial initialization succeeded.
- */
-static unsigned long kfence_init_pool(void)
+static void kfence_init_pool(void)
 {
 	unsigned long addr = (unsigned long)__kfence_pool;
 	int i;
 
-	if (!arch_kfence_init_pool())
-		return addr;
 	/*
 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
 	 * merely serves as an extended guard page. However, adding one
 	 * additional page in the beginning gives us an even number of pages,
 	 * which simplifies the mapping of address to metadata index.
 	 */
-	for (i = 0; i < 2; i++) {
-		if (unlikely(!kfence_protect(addr)))
-			return addr;
-
-		addr += PAGE_SIZE;
-	}
+	for (i = 0; i < 2; i++, addr += PAGE_SIZE)
+		kfence_protect(addr);
 
 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr += 2 * PAGE_SIZE) {
 		struct kfence_metadata *meta = &kfence_metadata[i];
@@ -568,38 +557,33 @@  static unsigned long kfence_init_pool(void)
 		list_add_tail(&meta->list, &kfence_freelist);
 
 		/* Protect the right redzone. */
-		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
-			return addr;
+		kfence_protect(addr + PAGE_SIZE);
 
 		__folio_set_slab(slab_folio(slab));
 #ifdef CONFIG_MEMCG
 		slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS;
 #endif
 	}
-
-	return 0;
 }
 
 static bool __init kfence_init_pool_early(void)
 {
-	unsigned long addr;
-
 	if (!__kfence_pool)
 		return false;
 
-	addr = kfence_init_pool();
-
-	if (!addr) {
-		/*
-		 * The pool is live and will never be deallocated from this point on.
-		 * Ignore the pool object from the kmemleak phys object tree, as it would
-		 * otherwise overlap with allocations returned by kfence_alloc(), which
-		 * are registered with kmemleak through the slab post-alloc hook.
-		 */
-		kmemleak_ignore_phys(__pa(__kfence_pool));
-		return true;
-	}
+	if (!arch_kfence_init_pool())
+		goto free;
 
+	kfence_init_pool();
+	/*
+	 * The pool is live and will never be deallocated from this point on.
+	 * Ignore the pool object from the kmemleak phys object tree, as it would
+	 * otherwise overlap with allocations returned by kfence_alloc(), which
+	 * are registered with kmemleak through the slab post-alloc hook.
+	 */
+	kmemleak_ignore_phys(__pa(__kfence_pool));
+	return true;
+free:
 	/*
 	 * Only release unprotected pages, and do not try to go back and change
 	 * page attributes due to risk of failing to do so as well. If changing
@@ -607,27 +591,7 @@  static bool __init kfence_init_pool_early(void)
 	 * fails for the first page, and therefore expect addr==__kfence_pool in
 	 * most failure cases.
 	 */
-	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
-	__kfence_pool = NULL;
-	return false;
-}
-
-static bool kfence_init_pool_late(void)
-{
-	unsigned long addr, free_size;
-
-	addr = kfence_init_pool();
-
-	if (!addr)
-		return true;
-
-	/* Same as above. */
-	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
-#ifdef CONFIG_CONTIG_ALLOC
-	free_contig_range(page_to_pfn(virt_to_page((void *)addr)), free_size / PAGE_SIZE);
-#else
-	free_pages_exact((void *)addr, free_size);
-#endif
+	memblock_free_late(__pa(__kfence_pool), KFENCE_POOL_SIZE);
 	__kfence_pool = NULL;
 	return false;
 }
@@ -830,30 +794,50 @@  void __init kfence_init(void)
 	kfence_init_enable();
 }
 
-static int kfence_init_late(void)
-{
-	const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
 #ifdef CONFIG_CONTIG_ALLOC
-	struct page *pages;
+static inline void *kfence_pool_alloc(void)
+{
+	struct page *page = alloc_contig_pages(KFENCE_POOL_SIZE / PAGE_SIZE,
+					       GFP_KERNEL, first_online_node, NULL);
 
-	pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
-	if (!pages)
-		return -ENOMEM;
-	__kfence_pool = page_to_virt(pages);
+	return page ? page_to_virt(page) : NULL;
+}
+
+static inline void kfence_pool_free(const void *ptr)
+{
+	free_contig_range(page_to_pfn(virt_to_page(ptr)), KFENCE_POOL_SIZE / PAGE_SIZE);
+}
 #else
+static inline void *kfence_pool_alloc(void)
+{
 	BUILD_BUG_ON_MSG(get_order(KFENCE_POOL_SIZE) > MAX_ORDER,
 			 "CONFIG_KFENCE_NUM_OBJECTS is too large for buddy allocator");
 
-	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+	return alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+}
+
+static inline void kfence_pool_free(const void *ptr)
+{
+	free_pages_exact(virt_to_page(ptr), KFENCE_POOL_SIZE);
+}
+#endif
+
+static int kfence_init_late(void)
+{
+	if (__kfence_pool)
+		return 0;
+
+	__kfence_pool = kfence_pool_alloc();
 	if (!__kfence_pool)
 		return -ENOMEM;
-#endif
 
-	if (!kfence_init_pool_late()) {
-		pr_err("%s failed\n", __func__);
+	if (!arch_kfence_init_pool()) {
+		kfence_pool_free(__kfence_pool);
+		__kfence_pool = NULL;
 		return -EBUSY;
 	}
 
+	kfence_init_pool();
 	kfence_init_enable();
 	kfence_debugfs_init();
 
@@ -862,8 +846,8 @@  static int kfence_init_late(void)
 
 static int kfence_enable_late(void)
 {
-	if (!__kfence_pool)
-		return kfence_init_late();
+	if (kfence_init_late())
+		return -ENOMEM;
 
 	WRITE_ONCE(kfence_enabled, true);
 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
@@ -1054,8 +1038,9 @@  bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
 	if (!is_kfence_address((void *)addr))
 		return false;
 
-	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
-		return kfence_unprotect(addr); /* ... unprotect and proceed. */
+	/* If disabled at runtime ... unprotect and proceed. */
+	if (!READ_ONCE(kfence_enabled))
+		goto out;
 
 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
 
@@ -1079,7 +1064,7 @@  bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
 		}
 
 		if (!to_report)
-			goto out;
+			goto report;
 
 		raw_spin_lock_irqsave(&to_report->lock, flags);
 		to_report->unprotected_page = addr;
@@ -1093,7 +1078,7 @@  bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
 	} else {
 		to_report = addr_to_metadata(addr);
 		if (!to_report)
-			goto out;
+			goto report;
 
 		raw_spin_lock_irqsave(&to_report->lock, flags);
 		error_type = KFENCE_ERROR_UAF;
@@ -1105,7 +1090,7 @@  bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
 		 */
 	}
 
-out:
+report:
 	if (to_report) {
 		kfence_report_error(addr, is_write, regs, to_report, error_type);
 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
@@ -1113,6 +1098,7 @@  bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
 		/* This may be a UAF or OOB access, but we can't be sure. */
 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
 	}
-
-	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
+out:
+	kfence_unprotect(addr);
+	return true;
 }