diff mbox series

[RFC,1/2] arm64: Support execute-only permissions with Enhanced PAN

Message ID 20201113152023.102855-2-vladimir.murzin@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: Support Enhanced PAN | expand

Commit Message

Vladimir Murzin Nov. 13, 2020, 3:20 p.m. UTC
Enhanced Privileged Access Never (EPAN) allows Privileged Access Never
to be used with Execute-only mappings.

Absence of such support was a reason for 24cecc377463 ("arm64: Revert
support for execute-only user mappings"). Thus now it can be revisited
and re-enabled.

Cc: Kees Cook <keescook@chromium.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
---
 arch/arm64/Kconfig                    | 17 +++++++++++++++++
 arch/arm64/include/asm/cpucaps.h      |  3 ++-
 arch/arm64/include/asm/pgtable-prot.h |  5 +++--
 arch/arm64/include/asm/pgtable.h      | 23 +++++++++++++++++++++--
 arch/arm64/include/asm/sysreg.h       |  1 +
 arch/arm64/kernel/cpufeature.c        | 20 ++++++++++++++++++++
 arch/arm64/mm/fault.c                 |  3 +++
 7 files changed, 67 insertions(+), 5 deletions(-)

Comments

Catalin Marinas Nov. 17, 2020, 4:47 p.m. UTC | #1
On Fri, Nov 13, 2020 at 03:20:22PM +0000, Vladimir Murzin wrote:
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 4ff12a7..d1f68d2 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -113,8 +113,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
>  #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
>  
>  #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
> -#define pte_valid_not_user(pte) \
> -	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
> +#define pte_valid_not_user(pte)										\
> +({													\
> +	int __val;											\
> +	if (cpus_have_const_cap(ARM64_HAS_EPAN))							\
> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN);	\
> +	else												\
> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID;				\
> +	__val;												\

Is it worth having the cap check here? I'd go with the PTE_VALID|PTE_UXN
check only.

> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index dcc165b..2033e0b 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -1602,6 +1602,13 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
>  }
>  #endif /* CONFIG_ARM64_PAN */
>  
> +#ifdef CONFIG_ARM64_EPAN
> +static void cpu_enable_epan(const struct arm64_cpu_capabilities *__unused)
> +{
> +	sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_EPAN);
> +}
> +#endif /* CONFIG_ARM64_EPAN */

I checked the spec (2020 arch updates) and the EPAN bit is permitted to
be cached in the TLB. I think we get away with this because this
function is called before cnp is enabled. Maybe we should make it
explicit and move the CnP entry last with a comment.
Vladimir Murzin Nov. 18, 2020, 12:37 p.m. UTC | #2
On 11/17/20 4:48 PM, Catalin Marinas wrote:
> On Fri, Nov 13, 2020 at 03:20:22PM +0000, Vladimir Murzin wrote:
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index 4ff12a7..d1f68d2 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -113,8 +113,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
>>  #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
>>  
>>  #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
>> -#define pte_valid_not_user(pte) \
>> -	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
>> +#define pte_valid_not_user(pte)										\
>> +({													\
>> +	int __val;											\
>> +	if (cpus_have_const_cap(ARM64_HAS_EPAN))							\
>> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN);	\
>> +	else												\
>> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID;				\
>> +	__val;												\
> 
> Is it worth having the cap check here? I'd go with the PTE_VALID|PTE_UXN
> check only.
> 

I do not know to be honest. I do not have full picture in mind and what could be side effects of the
change (that's why RFC). 24cecc377463 the PTE_VALID|PTE_UXN moved to PTE_VALID, so I decided to be
safe than sorry...

>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>> index dcc165b..2033e0b 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -1602,6 +1602,13 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
>>  }
>>  #endif /* CONFIG_ARM64_PAN */
>>  
>> +#ifdef CONFIG_ARM64_EPAN
>> +static void cpu_enable_epan(const struct arm64_cpu_capabilities *__unused)
>> +{
>> +	sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_EPAN);
>> +}
>> +#endif /* CONFIG_ARM64_EPAN */
> 
> I checked the spec (2020 arch updates) and the EPAN bit is permitted to
> be cached in the TLB. I think we get away with this because this
> function is called before cnp is enabled. Maybe we should make it
> explicit and move the CnP entry last with a comment.
> 

Hmm, so we rely on CnP's enable method to (indirectly) involve local_flush_tlb_all()? It doesn't seem
robust since CONFIG_ARM64_CNP could be unset. I can add local_flush_tlb_all() into cpu_enable_epan()
or we can have something like

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index bb2016c..0f0a27b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2416,6 +2416,8 @@ static int cpu_enable_non_boot_scope_capabilities(void *__unused)
                if (cap->cpu_enable)
                        cap->cpu_enable(cap);
        }
+
+       local_flush_tlb_all();
        return 0;
 }
 
@@ -2467,6 +2469,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
        if (!boot_scope)
                stop_machine(cpu_enable_non_boot_scope_capabilities,
                             NULL, cpu_online_mask);
+       else
+               local_flush_tlb_all();
 }
 
 /*

What would be your preference?

Cheers
Vladimir
Catalin Marinas Nov. 18, 2020, 4:04 p.m. UTC | #3
On Wed, Nov 18, 2020 at 12:37:40PM +0000, Vladimir Murzin wrote:
> On 11/17/20 4:48 PM, Catalin Marinas wrote:
> > On Fri, Nov 13, 2020 at 03:20:22PM +0000, Vladimir Murzin wrote:
> >> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> >> index 4ff12a7..d1f68d2 100644
> >> --- a/arch/arm64/include/asm/pgtable.h
> >> +++ b/arch/arm64/include/asm/pgtable.h
> >> @@ -113,8 +113,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> >>  #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
> >>  
> >>  #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
> >> -#define pte_valid_not_user(pte) \
> >> -	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
> >> +#define pte_valid_not_user(pte)										\
> >> +({													\
> >> +	int __val;											\
> >> +	if (cpus_have_const_cap(ARM64_HAS_EPAN))							\
> >> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN);	\
> >> +	else												\
> >> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID;				\
> >> +	__val;												\
> > 
> > Is it worth having the cap check here? I'd go with the PTE_VALID|PTE_UXN
> > check only.
> 
> I do not know to be honest. I do not have full picture in mind and
> what could be side effects of the change (that's why RFC).
> 24cecc377463 the PTE_VALID|PTE_UXN moved to PTE_VALID, so I decided to
> be safe than sorry...

A user has access to a page if it has PTE_VALID && (PTE_USER || !PTE_UXN)
(wrong user of the logic operators but you get the idea). So negating
the user part in the above expression, pte_valid_not_user() means
PTE_VALID && !PTE_USER && PTE_UXN.

Prior to these patches (or the old exec-only), we can't have PTE_UXN and
PTE_USER both cleared, this is introduced by PAGE_EXECONLY. IOW, without
EPAN, !PTE_USER implies PTE_UXN, so we can use the same check as for the
EPAN case.

> >> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> >> index dcc165b..2033e0b 100644
> >> --- a/arch/arm64/kernel/cpufeature.c
> >> +++ b/arch/arm64/kernel/cpufeature.c
> >> @@ -1602,6 +1602,13 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
> >>  }
> >>  #endif /* CONFIG_ARM64_PAN */
> >>  
> >> +#ifdef CONFIG_ARM64_EPAN
> >> +static void cpu_enable_epan(const struct arm64_cpu_capabilities *__unused)
> >> +{
> >> +	sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_EPAN);
> >> +}
> >> +#endif /* CONFIG_ARM64_EPAN */
> > 
> > I checked the spec (2020 arch updates) and the EPAN bit is permitted to
> > be cached in the TLB. I think we get away with this because this
> > function is called before cnp is enabled. Maybe we should make it
> > explicit and move the CnP entry last with a comment.
> 
> Hmm, so we rely on CnP's enable method to (indirectly) involve
> local_flush_tlb_all()? It doesn't seem robust since CONFIG_ARM64_CNP
> could be unset. I can add local_flush_tlb_all() into cpu_enable_epan()
> or we can have something like

A local_flush_tlb_all() in cpu_enable_epan() would be fine before user
space starts. However, a late CPU bring-up may cause a temporary
disabling of EPAN in the sibling core if CnP is enabled first.

> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index bb2016c..0f0a27b 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -2416,6 +2416,8 @@ static int cpu_enable_non_boot_scope_capabilities(void *__unused)
>                 if (cap->cpu_enable)
>                         cap->cpu_enable(cap);
>         }
> +
> +       local_flush_tlb_all();
>         return 0;
>  }
>  
> @@ -2467,6 +2469,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
>         if (!boot_scope)
>                 stop_machine(cpu_enable_non_boot_scope_capabilities,
>                              NULL, cpu_online_mask);
> +       else
> +               local_flush_tlb_all();
>  }

Any local TLBI would clear the mismatch but it doesn't solve the
temporary difference between sibling cores. I think the only guarantee
here is if CnP is turned on after the feature in question.
Vladimir Murzin Nov. 19, 2020, 1:39 p.m. UTC | #4
On 11/18/20 4:04 PM, Catalin Marinas wrote:
> On Wed, Nov 18, 2020 at 12:37:40PM +0000, Vladimir Murzin wrote:
>> On 11/17/20 4:48 PM, Catalin Marinas wrote:
>>> On Fri, Nov 13, 2020 at 03:20:22PM +0000, Vladimir Murzin wrote:
>>>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>>>> index 4ff12a7..d1f68d2 100644
>>>> --- a/arch/arm64/include/asm/pgtable.h
>>>> +++ b/arch/arm64/include/asm/pgtable.h
>>>> @@ -113,8 +113,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
>>>>  #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
>>>>  
>>>>  #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
>>>> -#define pte_valid_not_user(pte) \
>>>> -	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
>>>> +#define pte_valid_not_user(pte)										\
>>>> +({													\
>>>> +	int __val;											\
>>>> +	if (cpus_have_const_cap(ARM64_HAS_EPAN))							\
>>>> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN);	\
>>>> +	else												\
>>>> +		__val = (pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID;				\
>>>> +	__val;												\
>>>
>>> Is it worth having the cap check here? I'd go with the PTE_VALID|PTE_UXN
>>> check only.
>>
>> I do not know to be honest. I do not have full picture in mind and
>> what could be side effects of the change (that's why RFC).
>> 24cecc377463 the PTE_VALID|PTE_UXN moved to PTE_VALID, so I decided to
>> be safe than sorry...
> 
> A user has access to a page if it has PTE_VALID && (PTE_USER || !PTE_UXN)
> (wrong user of the logic operators but you get the idea). So negating
> the user part in the above expression, pte_valid_not_user() means
> PTE_VALID && !PTE_USER && PTE_UXN.
> 
> Prior to these patches (or the old exec-only), we can't have PTE_UXN and
> PTE_USER both cleared, this is introduced by PAGE_EXECONLY. IOW, without
> EPAN, !PTE_USER implies PTE_UXN, so we can use the same check as for the
> EPAN case.
> 
>>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>>>> index dcc165b..2033e0b 100644
>>>> --- a/arch/arm64/kernel/cpufeature.c
>>>> +++ b/arch/arm64/kernel/cpufeature.c
>>>> @@ -1602,6 +1602,13 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
>>>>  }
>>>>  #endif /* CONFIG_ARM64_PAN */
>>>>  
>>>> +#ifdef CONFIG_ARM64_EPAN
>>>> +static void cpu_enable_epan(const struct arm64_cpu_capabilities *__unused)
>>>> +{
>>>> +	sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_EPAN);
>>>> +}
>>>> +#endif /* CONFIG_ARM64_EPAN */
>>>
>>> I checked the spec (2020 arch updates) and the EPAN bit is permitted to
>>> be cached in the TLB. I think we get away with this because this
>>> function is called before cnp is enabled. Maybe we should make it
>>> explicit and move the CnP entry last with a comment.
>>
>> Hmm, so we rely on CnP's enable method to (indirectly) involve
>> local_flush_tlb_all()? It doesn't seem robust since CONFIG_ARM64_CNP
>> could be unset. I can add local_flush_tlb_all() into cpu_enable_epan()
>> or we can have something like
> 
> A local_flush_tlb_all() in cpu_enable_epan() would be fine before user
> space starts. However, a late CPU bring-up may cause a temporary
> disabling of EPAN in the sibling core if CnP is enabled first.
> 
>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>> index bb2016c..0f0a27b 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -2416,6 +2416,8 @@ static int cpu_enable_non_boot_scope_capabilities(void *__unused)
>>                 if (cap->cpu_enable)
>>                         cap->cpu_enable(cap);
>>         }
>> +
>> +       local_flush_tlb_all();
>>         return 0;
>>  }
>>  
>> @@ -2467,6 +2469,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
>>         if (!boot_scope)
>>                 stop_machine(cpu_enable_non_boot_scope_capabilities,
>>                              NULL, cpu_online_mask);
>> +       else
>> +               local_flush_tlb_all();
>>  }
> 
> Any local TLBI would clear the mismatch but it doesn't solve the
> temporary difference between sibling cores. I think the only guarantee
> here is if CnP is turned on after the feature in question.
> 

Thanks for explanation, I'll send updated version shortly!

Cheers
Vladimir
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1515f6f..6639244 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1056,6 +1056,9 @@  config ARCH_WANT_HUGE_PMD_SHARE
 config ARCH_HAS_CACHE_LINE_SIZE
 	def_bool y
 
+config ARCH_HAS_FILTER_PGPROT
+	def_bool y
+
 config ARCH_ENABLE_SPLIT_PMD_PTLOCK
 	def_bool y if PGTABLE_LEVELS > 2
 
@@ -1688,6 +1691,20 @@  config ARM64_MTE
 
 endmenu
 
+menu "ARMv8.7 architectural features"
+
+config ARM64_EPAN
+	bool "Enable support for Enhanced Privileged Access Never (EPAN)"
+	default y
+	depends on ARM64_PAN
+	help
+	 Enhanced Privileged Access Never (EPAN) allows Privileged
+	 Access Never to be used with Execute-only mappings.
+
+	 The feature is detected at runtime, and will remain disabled
+	 if the cpu does not implement the feature.
+endmenu
+
 config ARM64_SVE
 	bool "ARM Scalable Vector Extension support"
 	default y
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index e7d9899..046202f 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -66,7 +66,8 @@ 
 #define ARM64_HAS_TLB_RANGE			56
 #define ARM64_MTE				57
 #define ARM64_WORKAROUND_1508412		58
+#define ARM64_HAS_EPAN				59
 
-#define ARM64_NCAPS				59
+#define ARM64_NCAPS				60
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 046be78..f91c2aa 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -88,12 +88,13 @@  extern bool arm64_use_ng_mappings;
 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+#define PAGE_EXECONLY		__pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
 
 #define __P000  PAGE_NONE
 #define __P001  PAGE_READONLY
 #define __P010  PAGE_READONLY
 #define __P011  PAGE_READONLY
-#define __P100  PAGE_READONLY_EXEC
+#define __P100  PAGE_EXECONLY
 #define __P101  PAGE_READONLY_EXEC
 #define __P110  PAGE_READONLY_EXEC
 #define __P111  PAGE_READONLY_EXEC
@@ -102,7 +103,7 @@  extern bool arm64_use_ng_mappings;
 #define __S001  PAGE_READONLY
 #define __S010  PAGE_SHARED
 #define __S011  PAGE_SHARED
-#define __S100  PAGE_READONLY_EXEC
+#define __S100  PAGE_EXECONLY
 #define __S101  PAGE_READONLY_EXEC
 #define __S110  PAGE_SHARED_EXEC
 #define __S111  PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 4ff12a7..d1f68d2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -113,8 +113,15 @@  extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
-#define pte_valid_not_user(pte) \
-	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+#define pte_valid_not_user(pte)										\
+({													\
+	int __val;											\
+	if (cpus_have_const_cap(ARM64_HAS_EPAN))							\
+		__val = (pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN);	\
+	else												\
+		__val = (pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID;				\
+	__val;												\
+})
 #define pte_valid_young(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
 #define pte_valid_user(pte) \
@@ -974,6 +981,18 @@  static inline bool arch_faults_on_old_pte(void)
 }
 #define arch_faults_on_old_pte arch_faults_on_old_pte
 
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+	if (cpus_have_const_cap(ARM64_HAS_EPAN))
+		return prot;
+
+	if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
+		return prot;
+
+	return PAGE_READONLY_EXEC;
+}
+
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 174817b..19147b6 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -579,6 +579,7 @@ 
 #endif
 
 /* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_EPAN		(BIT(57))
 #define SCTLR_EL1_ATA0		(BIT(42))
 
 #define SCTLR_EL1_TCF0_SHIFT	38
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index dcc165b..2033e0b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1602,6 +1602,13 @@  static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
 }
 #endif /* CONFIG_ARM64_PAN */
 
+#ifdef CONFIG_ARM64_EPAN
+static void cpu_enable_epan(const struct arm64_cpu_capabilities *__unused)
+{
+	sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_EPAN);
+}
+#endif /* CONFIG_ARM64_EPAN */
+
 #ifdef CONFIG_ARM64_RAS_EXTN
 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 {
@@ -1750,6 +1757,19 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.cpu_enable = cpu_enable_pan,
 	},
 #endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_EPAN
+	{
+		.desc = "Enhanced Privileged Access Never",
+		.capability = ARM64_HAS_EPAN,
+		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64MMFR1_EL1,
+		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 3,
+		.cpu_enable = cpu_enable_epan,
+	},
+#endif /* CONFIG_ARM64_EPAN */
 #ifdef CONFIG_ARM64_LSE_ATOMICS
 	{
 		.desc = "LSE atomic instructions",
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1ee9400..b93222e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -467,6 +467,9 @@  static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 	if (faulthandler_disabled() || !mm)
 		goto no_context;
 
+	if (cpus_have_const_cap(ARM64_HAS_EPAN))
+		vm_flags &= ~VM_EXEC;
+
 	if (user_mode(regs))
 		mm_flags |= FAULT_FLAG_USER;