Message ID | 20241022092734.59984-3-yangyicong@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Support Armv8.9/v9.4 FEAT_HAFT | expand |
On Tue, Oct 22, 2024 at 05:27:31PM +0800, Yicong Yang wrote: > From: Yicong Yang <yangyicong@hisilicon.com> > > TCR2_EL1 introduced some additional controls besides TCR_EL1. Currently > only PIE is supported and enabled by writing TCR2_EL1 directly if PIE > detected. > > Introduce a named register 'tcr2' just like 'tcr' we've already had. > It'll be initialized to 0 and updated if certain feature detected and > needs to be enabled. Touch the TCR2_EL1 registers at last with the > updated 'tcr2' value if FEAT_TCR2 supported by checking > ID_AA64MMFR3_EL1.TCRX. Then we can extend the support of other features > controlled by TCR2_EL1. > > Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> > --- > arch/arm64/mm/proc.S | 12 ++++++++++-- > 1 file changed, 10 insertions(+), 2 deletions(-) > > diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S > index 8abdc7fed321..ccbae4525891 100644 > --- a/arch/arm64/mm/proc.S > +++ b/arch/arm64/mm/proc.S > @@ -465,10 +465,12 @@ SYM_FUNC_START(__cpu_setup) > */ > mair .req x17 > tcr .req x16 > + tcr2 .req x15 > mov_q mair, MAIR_EL1_SET > mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \ > TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ > TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS > + mov tcr2, xzr > > tcr_clear_errata_bits tcr, x9, x5 > > @@ -525,11 +527,16 @@ alternative_else_nop_endif > #undef PTE_MAYBE_NG > #undef PTE_MAYBE_SHARED > > - mov x0, TCR2_EL1x_PIE > - msr REG_TCR2_EL1, x0 > + orr tcr2, tcr2, TCR2_EL1x_PIE > > .Lskip_indirection: > > + mrs_s x1, SYS_ID_AA64MMFR3_EL1 > + ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4 > + cbz x1, 1f > + msr REG_TCR2_EL1, tcr2 > +1: It makes sense to mimic the TCR_EL1 configuration here with a single MSR at the end. I was wondering whether to simply check if the tcr2 reg (x15) is non-zero under the assumption that bits in it would only be set if the features are present (and those features imply TCRX). However, we can set RES0 bits in here even if the feature is not supported in hardware (more on the next patch). So I think this patch is ok as is. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
On 2024/10/23 0:54, Catalin Marinas wrote: > On Tue, Oct 22, 2024 at 05:27:31PM +0800, Yicong Yang wrote: >> From: Yicong Yang <yangyicong@hisilicon.com> >> >> TCR2_EL1 introduced some additional controls besides TCR_EL1. Currently >> only PIE is supported and enabled by writing TCR2_EL1 directly if PIE >> detected. >> >> Introduce a named register 'tcr2' just like 'tcr' we've already had. >> It'll be initialized to 0 and updated if certain feature detected and >> needs to be enabled. Touch the TCR2_EL1 registers at last with the >> updated 'tcr2' value if FEAT_TCR2 supported by checking >> ID_AA64MMFR3_EL1.TCRX. Then we can extend the support of other features >> controlled by TCR2_EL1. >> >> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> >> --- >> arch/arm64/mm/proc.S | 12 ++++++++++-- >> 1 file changed, 10 insertions(+), 2 deletions(-) >> >> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S >> index 8abdc7fed321..ccbae4525891 100644 >> --- a/arch/arm64/mm/proc.S >> +++ b/arch/arm64/mm/proc.S >> @@ -465,10 +465,12 @@ SYM_FUNC_START(__cpu_setup) >> */ >> mair .req x17 >> tcr .req x16 >> + tcr2 .req x15 >> mov_q mair, MAIR_EL1_SET >> mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \ >> TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ >> TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS >> + mov tcr2, xzr >> >> tcr_clear_errata_bits tcr, x9, x5 >> >> @@ -525,11 +527,16 @@ alternative_else_nop_endif >> #undef PTE_MAYBE_NG >> #undef PTE_MAYBE_SHARED >> >> - mov x0, TCR2_EL1x_PIE >> - msr REG_TCR2_EL1, x0 >> + orr tcr2, tcr2, TCR2_EL1x_PIE >> >> .Lskip_indirection: >> >> + mrs_s x1, SYS_ID_AA64MMFR3_EL1 >> + ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4 >> + cbz x1, 1f >> + msr REG_TCR2_EL1, tcr2 >> +1: > > It makes sense to mimic the TCR_EL1 configuration here with a single MSR > at the end. > > I was wondering whether to simply check if the tcr2 reg (x15) is > non-zero under the assumption that bits in it would only be set if the > features are present (and those features imply TCRX). However, we can > set RES0 bits in here even if the feature is not supported in hardware > (more on the next patch). > > So I think this patch is ok as is. > > Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> > . Thanks.
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 8abdc7fed321..ccbae4525891 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -465,10 +465,12 @@ SYM_FUNC_START(__cpu_setup) */ mair .req x17 tcr .req x16 + tcr2 .req x15 mov_q mair, MAIR_EL1_SET mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \ TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS + mov tcr2, xzr tcr_clear_errata_bits tcr, x9, x5 @@ -525,11 +527,16 @@ alternative_else_nop_endif #undef PTE_MAYBE_NG #undef PTE_MAYBE_SHARED - mov x0, TCR2_EL1x_PIE - msr REG_TCR2_EL1, x0 + orr tcr2, tcr2, TCR2_EL1x_PIE .Lskip_indirection: + mrs_s x1, SYS_ID_AA64MMFR3_EL1 + ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4 + cbz x1, 1f + msr REG_TCR2_EL1, tcr2 +1: + /* * Prepare SCTLR */ @@ -538,4 +545,5 @@ alternative_else_nop_endif .unreq mair .unreq tcr + .unreq tcr2 SYM_FUNC_END(__cpu_setup)