Message ID | 20230606145859.697944-14-joey.gouly@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Permission Indirection Extension | expand |
On Tue, 6 Jun 2023 at 17:00, Joey Gouly <joey.gouly@arm.com> wrote: > > Make these macros available to assembly code, so they can be re-used by the > PIE initialisation code. > > This involves adding some extra macros, prepended with _ that are the raw > values not `pgprot` values. > > A dummy value for PTE_MAYBE_NG is also provided, for use in assembly. > ... > + > +#ifdef __ASSEMBLY__ > +#define PTE_MAYBE_NG 0 > +#endif > + I am struggling a bit to understand why this is ok. I get that the PIE index macros mask off the nG bit even if it is set, but this exposes a definition of PROT_DEFAULT and everything based on it to asm code that deviates from the one observed by C code. I am running into this because I am adding PTE_MAYBE_SHARED for LPA2 support (which repurposes the shareability bits as output address bits), and I could just #define it to 0x0 as well for assembly, but I am not sure this is the right approach.
Hi Ard, On Tue, Aug 22, 2023 at 04:10:35PM +0200, Ard Biesheuvel wrote: > On Tue, 6 Jun 2023 at 17:00, Joey Gouly <joey.gouly@arm.com> wrote: > > > > Make these macros available to assembly code, so they can be re-used by the > > PIE initialisation code. > > > > This involves adding some extra macros, prepended with _ that are the raw > > values not `pgprot` values. > > > > A dummy value for PTE_MAYBE_NG is also provided, for use in assembly. > > > ... > > + > > +#ifdef __ASSEMBLY__ > > +#define PTE_MAYBE_NG 0 > > +#endif > > + > > I am struggling a bit to understand why this is ok. I get that the PIE > index macros mask off the nG bit even if it is set, but this exposes a > definition of PROT_DEFAULT and everything based on it to asm code that > deviates from the one observed by C code. Yes, it's a bit of a hack to share as much as possible, and it's "ok" because, as you said PIE masks that bit out. > > I am running into this because I am adding PTE_MAYBE_SHARED for LPA2 > support (which repurposes the shareability bits as output address > bits), and I could just #define it to 0x0 as well for assembly, but I > am not sure this is the right approach. Happy to do this differently, if there is a better approach. I reverted this patch (fa4cdccaa582), and applied something like (just compile tested): diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index c7d77333ce1e..8fceeb111ad1 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -20,6 +20,17 @@ #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ +#define PIE_PAGE_SHARED (PTE_USER | PTE_PXN | PTE_UXN | PTE_WRITE) +#define PIE_PAGE_SHARED_EXEC (PTE_USER | PTE_PXN | PTE_WRITE) +#define PIE_PAGE_READONLY (PTE_USER | PTE_PXN | PTE_UXN) +#define PIE_PAGE_READONLY_EXEC (PTE_USER | PTE_PXN) +#define PIE_PAGE_EXECONLY (PTE_PXN) + +#define PIE_PAGE_KERNEL (PTE_PXN | PTE_UXN | PTE_WRITE) +#define PIE_PAGE_KERNEL_RO (PTE_PXN | PTE_UXN) +#define PIE_PAGE_KERNEL_ROX (PTE_UXN) +#define PIE_PAGE_KERNEL_EXEC (PTE_UXN | PTE_WRITE) + /* * This bit indicates that the entry is present i.e. pmd_page() * still points to a valid huge page in memory even if the pmd @@ -83,11 +94,11 @@ extern bool arm64_use_ng_mappings; #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) -#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED) +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY_EXEC) +#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_EXECONLY) #endif /* __ASSEMBLY__ */ @@ -124,21 +135,21 @@ extern bool arm64_use_ng_mappings; /* f: PAGE_SHARED PTE_UXN | PTE_PXN | PTE_WRITE | PTE_USER */ #define PIE_E0 ( \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW)) + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_X_O) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_RX) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RWX) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW)) #define PIE_E1 ( \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW)) + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_NONE_O) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_R) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RW) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_ROX), PIE_RX) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_EXEC), PIE_RWX) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_RO), PIE_R) | \ + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL), PIE_RW)) #endif /* __ASM_PGTABLE_PROT_H */ The PAGE_KERNEL bits are harder to share, because they are based on PROT_NORMAL. But maybe this bit of duplication is better than the #define 0x0 hack I had. Could maybe add a BUILD_BUG_ON somewhere to check that PIE_PAGE_KERNEL* and PAGE_KERNEL have matching bits? Thanks, Joey
On Thu, 24 Aug 2023 at 12:16, Joey Gouly <joey.gouly@arm.com> wrote: > > Hi Ard, > > On Tue, Aug 22, 2023 at 04:10:35PM +0200, Ard Biesheuvel wrote: > > On Tue, 6 Jun 2023 at 17:00, Joey Gouly <joey.gouly@arm.com> wrote: > > > > > > Make these macros available to assembly code, so they can be re-used by the > > > PIE initialisation code. > > > > > > This involves adding some extra macros, prepended with _ that are the raw > > > values not `pgprot` values. > > > > > > A dummy value for PTE_MAYBE_NG is also provided, for use in assembly. > > > > > ... > > > + > > > +#ifdef __ASSEMBLY__ > > > +#define PTE_MAYBE_NG 0 > > > +#endif > > > + > > > > I am struggling a bit to understand why this is ok. I get that the PIE > > index macros mask off the nG bit even if it is set, but this exposes a > > definition of PROT_DEFAULT and everything based on it to asm code that > > deviates from the one observed by C code. > > Yes, it's a bit of a hack to share as much as possible, and it's "ok" because, > as you said PIE masks that bit out. > > > > > I am running into this because I am adding PTE_MAYBE_SHARED for LPA2 > > support (which repurposes the shareability bits as output address > > bits), and I could just #define it to 0x0 as well for assembly, but I > > am not sure this is the right approach. > > Happy to do this differently, if there is a better approach. > > I reverted this patch (fa4cdccaa582), and applied something like (just compile tested): > > diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h > index c7d77333ce1e..8fceeb111ad1 100644 > --- a/arch/arm64/include/asm/pgtable-prot.h > +++ b/arch/arm64/include/asm/pgtable-prot.h > @@ -20,6 +20,17 @@ > #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) > #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ > > +#define PIE_PAGE_SHARED (PTE_USER | PTE_PXN | PTE_UXN | PTE_WRITE) > +#define PIE_PAGE_SHARED_EXEC (PTE_USER | PTE_PXN | PTE_WRITE) > +#define PIE_PAGE_READONLY (PTE_USER | PTE_PXN | PTE_UXN) > +#define PIE_PAGE_READONLY_EXEC (PTE_USER | PTE_PXN) > +#define PIE_PAGE_EXECONLY (PTE_PXN) > + > +#define PIE_PAGE_KERNEL (PTE_PXN | PTE_UXN | PTE_WRITE) > +#define PIE_PAGE_KERNEL_RO (PTE_PXN | PTE_UXN) > +#define PIE_PAGE_KERNEL_ROX (PTE_UXN) > +#define PIE_PAGE_KERNEL_EXEC (PTE_UXN | PTE_WRITE) > + > /* > * This bit indicates that the entry is present i.e. pmd_page() > * still points to a valid huge page in memory even if the pmd > @@ -83,11 +94,11 @@ extern bool arm64_use_ng_mappings; > > #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) > /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ > -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) > -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) > -#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) > -#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) > -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) > +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED) > +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED_EXEC) > +#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY) > +#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY_EXEC) > +#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_EXECONLY) > > #endif /* __ASSEMBLY__ */ > > @@ -124,21 +135,21 @@ extern bool arm64_use_ng_mappings; > /* f: PAGE_SHARED PTE_UXN | PTE_PXN | PTE_WRITE | PTE_USER */ > > #define PIE_E0 ( \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW)) > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_X_O) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_RX) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RWX) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW)) > > #define PIE_E1 ( \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \ > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW)) > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_NONE_O) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_R) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RW) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_ROX), PIE_RX) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_EXEC), PIE_RWX) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_RO), PIE_R) | \ > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL), PIE_RW)) > > #endif /* __ASM_PGTABLE_PROT_H */ > > > The PAGE_KERNEL bits are harder to share, because they are based on > PROT_NORMAL. But maybe this bit of duplication is better than the #define 0x0 > hack I had. Could maybe add a BUILD_BUG_ON somewhere to check that PIE_PAGE_KERNEL* > and PAGE_KERNEL have matching bits? > That seems rather invasive. I was about to send out a patch that does the below instead. Would that work for you? diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index eed814b00a389..282e0ba658f03 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -57,10 +57,6 @@ #define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) -#ifdef __ASSEMBLY__ -#define PTE_MAYBE_NG 0 -#endif - #ifndef __ASSEMBLY__ #include <asm/cpufeature.h> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 22201066749e9..069265a8c4384 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -457,11 +457,24 @@ alternative_else_nop_endif ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 cbz x1, .Lskip_indirection + /* + * The PROT_* macros describing the various memory types may resolve to + * C expressions if they include the PTE_MAYBE_* macros, and so they + * can only be used from C code. The PIE_E* constants below are also + * defined in terms of those macros, but will mask out those + * PTE_MAYBE_* constants, whether they are set or not. So #define them + * as 0x0 here so we can evaluate the PIE_E* constants in asm context. + */ + +#define PTE_MAYBE_NG 0 + mov_q x0, PIE_E0 msr REG_PIRE0_EL1, x0 mov_q x0, PIE_E1 msr REG_PIR_EL1, x0 +#undef PTE_MAYBE_NG + mov x0, TCR2_EL1x_PIE msr REG_TCR2_EL1, x0
On Thu, Aug 24, 2023 at 12:18:51PM +0200, Ard Biesheuvel wrote: > On Thu, 24 Aug 2023 at 12:16, Joey Gouly <joey.gouly@arm.com> wrote: > > > > Hi Ard, > > > > On Tue, Aug 22, 2023 at 04:10:35PM +0200, Ard Biesheuvel wrote: > > > On Tue, 6 Jun 2023 at 17:00, Joey Gouly <joey.gouly@arm.com> wrote: > > > > > > > > Make these macros available to assembly code, so they can be re-used by the > > > > PIE initialisation code. > > > > > > > > This involves adding some extra macros, prepended with _ that are the raw > > > > values not `pgprot` values. > > > > > > > > A dummy value for PTE_MAYBE_NG is also provided, for use in assembly. > > > > > > > ... > > > > + > > > > +#ifdef __ASSEMBLY__ > > > > +#define PTE_MAYBE_NG 0 > > > > +#endif > > > > + > > > > > > I am struggling a bit to understand why this is ok. I get that the PIE > > > index macros mask off the nG bit even if it is set, but this exposes a > > > definition of PROT_DEFAULT and everything based on it to asm code that > > > deviates from the one observed by C code. > > > > Yes, it's a bit of a hack to share as much as possible, and it's "ok" because, > > as you said PIE masks that bit out. > > > > > > > > I am running into this because I am adding PTE_MAYBE_SHARED for LPA2 > > > support (which repurposes the shareability bits as output address > > > bits), and I could just #define it to 0x0 as well for assembly, but I > > > am not sure this is the right approach. > > > > Happy to do this differently, if there is a better approach. > > > > I reverted this patch (fa4cdccaa582), and applied something like (just compile tested): > > > > diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h > > index c7d77333ce1e..8fceeb111ad1 100644 > > --- a/arch/arm64/include/asm/pgtable-prot.h > > +++ b/arch/arm64/include/asm/pgtable-prot.h > > @@ -20,6 +20,17 @@ > > #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) > > #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ > > > > +#define PIE_PAGE_SHARED (PTE_USER | PTE_PXN | PTE_UXN | PTE_WRITE) > > +#define PIE_PAGE_SHARED_EXEC (PTE_USER | PTE_PXN | PTE_WRITE) > > +#define PIE_PAGE_READONLY (PTE_USER | PTE_PXN | PTE_UXN) > > +#define PIE_PAGE_READONLY_EXEC (PTE_USER | PTE_PXN) > > +#define PIE_PAGE_EXECONLY (PTE_PXN) > > + > > +#define PIE_PAGE_KERNEL (PTE_PXN | PTE_UXN | PTE_WRITE) > > +#define PIE_PAGE_KERNEL_RO (PTE_PXN | PTE_UXN) > > +#define PIE_PAGE_KERNEL_ROX (PTE_UXN) > > +#define PIE_PAGE_KERNEL_EXEC (PTE_UXN | PTE_WRITE) > > + > > /* > > * This bit indicates that the entry is present i.e. pmd_page() > > * still points to a valid huge page in memory even if the pmd > > @@ -83,11 +94,11 @@ extern bool arm64_use_ng_mappings; > > > > #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) > > /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ > > -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) > > -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) > > -#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) > > -#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) > > -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) > > +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED) > > +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED_EXEC) > > +#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY) > > +#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY_EXEC) > > +#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_EXECONLY) > > > > #endif /* __ASSEMBLY__ */ > > > > @@ -124,21 +135,21 @@ extern bool arm64_use_ng_mappings; > > /* f: PAGE_SHARED PTE_UXN | PTE_PXN | PTE_WRITE | PTE_USER */ > > > > #define PIE_E0 ( \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW)) > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_X_O) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_RX) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RWX) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW)) > > > > #define PIE_E1 ( \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \ > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW)) > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_NONE_O) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_R) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RW) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_ROX), PIE_RX) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_EXEC), PIE_RWX) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_RO), PIE_R) | \ > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL), PIE_RW)) > > > > #endif /* __ASM_PGTABLE_PROT_H */ > > > > > > The PAGE_KERNEL bits are harder to share, because they are based on > > PROT_NORMAL. But maybe this bit of duplication is better than the #define 0x0 > > hack I had. Could maybe add a BUILD_BUG_ON somewhere to check that PIE_PAGE_KERNEL* > > and PAGE_KERNEL have matching bits? > > > > That seems rather invasive. I was about to send out a patch that does > the below instead. Would that work for you? > > diff --git a/arch/arm64/include/asm/pgtable-prot.h > b/arch/arm64/include/asm/pgtable-prot.h > index eed814b00a389..282e0ba658f03 100644 > --- a/arch/arm64/include/asm/pgtable-prot.h > +++ b/arch/arm64/include/asm/pgtable-prot.h > @@ -57,10 +57,6 @@ > #define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | > PTE_NG | PTE_PXN) > #define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) > > -#ifdef __ASSEMBLY__ > -#define PTE_MAYBE_NG 0 > -#endif > - > #ifndef __ASSEMBLY__ > > #include <asm/cpufeature.h> > diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S > index 22201066749e9..069265a8c4384 100644 > --- a/arch/arm64/mm/proc.S > +++ b/arch/arm64/mm/proc.S > @@ -457,11 +457,24 @@ alternative_else_nop_endif > ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 > cbz x1, .Lskip_indirection > > + /* > + * The PROT_* macros describing the various memory types may resolve to > + * C expressions if they include the PTE_MAYBE_* macros, and so they > + * can only be used from C code. The PIE_E* constants below are also > + * defined in terms of those macros, but will mask out those > + * PTE_MAYBE_* constants, whether they are set or not. So #define them > + * as 0x0 here so we can evaluate the PIE_E* constants in asm context. > + */ > + > +#define PTE_MAYBE_NG 0 > + > mov_q x0, PIE_E0 > msr REG_PIRE0_EL1, x0 > mov_q x0, PIE_E1 > msr REG_PIR_EL1, x0 > > +#undef PTE_MAYBE_NG > + > mov x0, TCR2_EL1x_PIE > msr REG_TCR2_EL1, x0 > Seems like a way to localise the 'hack', I'm fine with it. We can always take a similar approach to what I suggested, if it becomes a problem later. Thanks, Joey
On Thu, 24 Aug 2023 at 15:10, Joey Gouly <joey.gouly@arm.com> wrote: > > On Thu, Aug 24, 2023 at 12:18:51PM +0200, Ard Biesheuvel wrote: > > On Thu, 24 Aug 2023 at 12:16, Joey Gouly <joey.gouly@arm.com> wrote: > > > > > > Hi Ard, > > > > > > On Tue, Aug 22, 2023 at 04:10:35PM +0200, Ard Biesheuvel wrote: > > > > On Tue, 6 Jun 2023 at 17:00, Joey Gouly <joey.gouly@arm.com> wrote: > > > > > > > > > > Make these macros available to assembly code, so they can be re-used by the > > > > > PIE initialisation code. > > > > > > > > > > This involves adding some extra macros, prepended with _ that are the raw > > > > > values not `pgprot` values. > > > > > > > > > > A dummy value for PTE_MAYBE_NG is also provided, for use in assembly. > > > > > > > > > ... > > > > > + > > > > > +#ifdef __ASSEMBLY__ > > > > > +#define PTE_MAYBE_NG 0 > > > > > +#endif > > > > > + > > > > > > > > I am struggling a bit to understand why this is ok. I get that the PIE > > > > index macros mask off the nG bit even if it is set, but this exposes a > > > > definition of PROT_DEFAULT and everything based on it to asm code that > > > > deviates from the one observed by C code. > > > > > > Yes, it's a bit of a hack to share as much as possible, and it's "ok" because, > > > as you said PIE masks that bit out. > > > > > > > > > > > I am running into this because I am adding PTE_MAYBE_SHARED for LPA2 > > > > support (which repurposes the shareability bits as output address > > > > bits), and I could just #define it to 0x0 as well for assembly, but I > > > > am not sure this is the right approach. > > > > > > Happy to do this differently, if there is a better approach. > > > > > > I reverted this patch (fa4cdccaa582), and applied something like (just compile tested): > > > > > > diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h > > > index c7d77333ce1e..8fceeb111ad1 100644 > > > --- a/arch/arm64/include/asm/pgtable-prot.h > > > +++ b/arch/arm64/include/asm/pgtable-prot.h > > > @@ -20,6 +20,17 @@ > > > #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) > > > #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ > > > > > > +#define PIE_PAGE_SHARED (PTE_USER | PTE_PXN | PTE_UXN | PTE_WRITE) > > > +#define PIE_PAGE_SHARED_EXEC (PTE_USER | PTE_PXN | PTE_WRITE) > > > +#define PIE_PAGE_READONLY (PTE_USER | PTE_PXN | PTE_UXN) > > > +#define PIE_PAGE_READONLY_EXEC (PTE_USER | PTE_PXN) > > > +#define PIE_PAGE_EXECONLY (PTE_PXN) > > > + > > > +#define PIE_PAGE_KERNEL (PTE_PXN | PTE_UXN | PTE_WRITE) > > > +#define PIE_PAGE_KERNEL_RO (PTE_PXN | PTE_UXN) > > > +#define PIE_PAGE_KERNEL_ROX (PTE_UXN) > > > +#define PIE_PAGE_KERNEL_EXEC (PTE_UXN | PTE_WRITE) > > > + > > > /* > > > * This bit indicates that the entry is present i.e. pmd_page() > > > * still points to a valid huge page in memory even if the pmd > > > @@ -83,11 +94,11 @@ extern bool arm64_use_ng_mappings; > > > > > > #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) > > > /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ > > > -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) > > > -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) > > > -#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) > > > -#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) > > > -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) > > > +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED) > > > +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED_EXEC) > > > +#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY) > > > +#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY_EXEC) > > > +#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_EXECONLY) > > > > > > #endif /* __ASSEMBLY__ */ > > > > > > @@ -124,21 +135,21 @@ extern bool arm64_use_ng_mappings; > > > /* f: PAGE_SHARED PTE_UXN | PTE_PXN | PTE_WRITE | PTE_USER */ > > > > > > #define PIE_E0 ( \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW)) > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_X_O) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_RX) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RWX) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW)) > > > > > > #define PIE_E1 ( \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \ > > > - PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW)) > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_NONE_O) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_R) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RW) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_ROX), PIE_RX) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_EXEC), PIE_RWX) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_RO), PIE_R) | \ > > > + PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL), PIE_RW)) > > > > > > #endif /* __ASM_PGTABLE_PROT_H */ > > > > > > > > > The PAGE_KERNEL bits are harder to share, because they are based on > > > PROT_NORMAL. But maybe this bit of duplication is better than the #define 0x0 > > > hack I had. Could maybe add a BUILD_BUG_ON somewhere to check that PIE_PAGE_KERNEL* > > > and PAGE_KERNEL have matching bits? > > > > > > > That seems rather invasive. I was about to send out a patch that does > > the below instead. Would that work for you? > > > > diff --git a/arch/arm64/include/asm/pgtable-prot.h > > b/arch/arm64/include/asm/pgtable-prot.h > > index eed814b00a389..282e0ba658f03 100644 > > --- a/arch/arm64/include/asm/pgtable-prot.h > > +++ b/arch/arm64/include/asm/pgtable-prot.h > > @@ -57,10 +57,6 @@ > > #define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | > > PTE_NG | PTE_PXN) > > #define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) > > > > -#ifdef __ASSEMBLY__ > > -#define PTE_MAYBE_NG 0 > > -#endif > > - > > #ifndef __ASSEMBLY__ > > > > #include <asm/cpufeature.h> > > diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S > > index 22201066749e9..069265a8c4384 100644 > > --- a/arch/arm64/mm/proc.S > > +++ b/arch/arm64/mm/proc.S > > @@ -457,11 +457,24 @@ alternative_else_nop_endif > > ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 > > cbz x1, .Lskip_indirection > > > > + /* > > + * The PROT_* macros describing the various memory types may resolve to > > + * C expressions if they include the PTE_MAYBE_* macros, and so they > > + * can only be used from C code. The PIE_E* constants below are also > > + * defined in terms of those macros, but will mask out those > > + * PTE_MAYBE_* constants, whether they are set or not. So #define them > > + * as 0x0 here so we can evaluate the PIE_E* constants in asm context. > > + */ > > + > > +#define PTE_MAYBE_NG 0 > > + > > mov_q x0, PIE_E0 > > msr REG_PIRE0_EL1, x0 > > mov_q x0, PIE_E1 > > msr REG_PIR_EL1, x0 > > > > +#undef PTE_MAYBE_NG > > + > > mov x0, TCR2_EL1x_PIE > > msr REG_TCR2_EL1, x0 > > > > Seems like a way to localise the 'hack', I'm fine with it. We can always take a > similar approach to what I suggested, if it becomes a problem later. > OK
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index d26d0b427c0a..a45af0a22b25 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -27,6 +27,40 @@ */ #define PMD_PRESENT_INVALID (_AT(pteval_t, 1) << 59) /* only when !PMD_SECT_VALID */ +#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) + +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) + +#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) +#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED)) + +#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PTE_WRITE | PMD_ATTRINDX(MT_NORMAL)) +#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) + +#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) + +#define _PAGE_KERNEL (PROT_NORMAL) +#define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) +#define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) +#define _PAGE_KERNEL_EXEC (PROT_NORMAL & ~PTE_PXN) +#define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) + +#define _PAGE_SHARED (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) +#define _PAGE_SHARED_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) +#define _PAGE_READONLY (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) +#define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) +#define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) + +#ifdef __ASSEMBLY__ +#define PTE_MAYBE_NG 0 +#endif + #ifndef __ASSEMBLY__ #include <asm/cpufeature.h> @@ -34,9 +68,6 @@ extern bool arm64_use_ng_mappings; -#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) - #define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0) #define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0) @@ -50,26 +81,11 @@ extern bool arm64_use_ng_mappings; #define PTE_MAYBE_GP 0 #endif -#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) -#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) - -#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) -#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED)) - -#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PTE_WRITE | PMD_ATTRINDX(MT_NORMAL)) -#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) - -#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) - -#define PAGE_KERNEL __pgprot(PROT_NORMAL) -#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) -#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) -#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) -#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) +#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) +#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_KERNEL_ROX) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) +#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_KERNEL_EXEC_CONT) #define PAGE_S2_MEMATTR(attr, has_fwb) \ ({ \ @@ -83,11 +99,11 @@ extern bool arm64_use_ng_mappings; #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) -#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) +#define PAGE_SHARED __pgprot(_PAGE_SHARED) +#define PAGE_SHARED_EXEC __pgprot(_PAGE_SHARED_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_READONLY) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_READONLY_EXEC) +#define PAGE_EXECONLY __pgprot(_PAGE_EXECONLY) #endif /* __ASSEMBLY__ */