diff mbox series

[v3,1/3] arm64: Add BBM Level 2 cpu feature

Message ID 20250313104111.24196-3-miko.lenczewski@arm.com (mailing list archive)
State New
Headers show
Series Initial BBML2 support for contpte_convert() | expand

Commit Message

Mikołaj Lenczewski March 13, 2025, 10:41 a.m. UTC
The Break-Before-Make cpu feature supports multiple levels (levels 0-2),
and this commit adds a dedicated BBML2 cpufeature to test against
support for, as well as a kernel commandline parameter to optionally
disable BBML2 altogether.

This is a system feature as we might have a big.LITTLE architecture
where some cores support BBML2 and some don't, but we want all cores to
be available and BBM to default to level 0 (as opposed to having cores
without BBML2 not coming online).

To support BBML2 in as wide a range of contexts as we can, we want not
only the architectural guarantees that BBML2 makes, but additionally
want BBML2 to not create TLB conflict aborts. Not causing aborts avoids
us having to prove that no recursive faults can be induced in any path
that uses BBML2, allowing its use for arbitrary kernel mappings.
Support detection of such CPUs.

Signed-off-by: Mikołaj Lenczewski <miko.lenczewski@arm.com>
---
 .../admin-guide/kernel-parameters.txt         |  3 +
 arch/arm64/Kconfig                            | 11 +++
 arch/arm64/include/asm/cpucaps.h              |  2 +
 arch/arm64/include/asm/cpufeature.h           |  6 ++
 arch/arm64/kernel/cpufeature.c                | 76 +++++++++++++++++++
 arch/arm64/kernel/pi/idreg-override.c         |  2 +
 arch/arm64/tools/cpucaps                      |  1 +
 7 files changed, 101 insertions(+)

Comments

Ryan Roberts March 13, 2025, 4:13 p.m. UTC | #1
On 13/03/2025 10:41, Mikołaj Lenczewski wrote:
> The Break-Before-Make cpu feature supports multiple levels (levels 0-2),
> and this commit adds a dedicated BBML2 cpufeature to test against
> support for, as well as a kernel commandline parameter to optionally
> disable BBML2 altogether.
> 
> This is a system feature as we might have a big.LITTLE architecture
> where some cores support BBML2 and some don't, but we want all cores to
> be available and BBM to default to level 0 (as opposed to having cores
> without BBML2 not coming online).
> 
> To support BBML2 in as wide a range of contexts as we can, we want not
> only the architectural guarantees that BBML2 makes, but additionally
> want BBML2 to not create TLB conflict aborts. Not causing aborts avoids
> us having to prove that no recursive faults can be induced in any path
> that uses BBML2, allowing its use for arbitrary kernel mappings.
> Support detection of such CPUs.
> 
> Signed-off-by: Mikołaj Lenczewski <miko.lenczewski@arm.com>

I have 2 nits below, but with those resolved:

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  .../admin-guide/kernel-parameters.txt         |  3 +
>  arch/arm64/Kconfig                            | 11 +++
>  arch/arm64/include/asm/cpucaps.h              |  2 +
>  arch/arm64/include/asm/cpufeature.h           |  6 ++
>  arch/arm64/kernel/cpufeature.c                | 76 +++++++++++++++++++
>  arch/arm64/kernel/pi/idreg-override.c         |  2 +
>  arch/arm64/tools/cpucaps                      |  1 +
>  7 files changed, 101 insertions(+)
> 
> diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
> index fb8752b42ec8..3e4cc917a07e 100644
> --- a/Documentation/admin-guide/kernel-parameters.txt
> +++ b/Documentation/admin-guide/kernel-parameters.txt
> @@ -453,6 +453,9 @@
>  	arm64.no32bit_el0 [ARM64] Unconditionally disable the execution of
>  			32 bit applications.
>  
> +	arm64.nobbml2	[ARM64] Unconditionally disable Break-Before-Make Level
> +			2 support
> +
>  	arm64.nobti	[ARM64] Unconditionally disable Branch Target
>  			Identification support
>  
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 940343beb3d4..49deda2b22ae 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -2057,6 +2057,17 @@ config ARM64_TLB_RANGE
>  	  The feature introduces new assembly instructions, and they were
>  	  support when binutils >= 2.30.
>  
> +config ARM64_BBML2_NOABORT
> +	bool "Enable support for Break-Before-Make Level 2 detection and usage"
> +	default y
> +	help
> +	  FEAT_BBM provides detection of support levels for break-before-make
> +	  sequences. If BBM level 2 is supported, some TLB maintenance requirements
> +	  can be relaxed to improve performance. We additonally require the
> +	  property that the implementation cannot ever raise TLB Conflict Aborts.
> +	  Selecting N causes the kernel to fallback to BBM level 0 behaviour
> +	  even if the system supports BBM level 2.
> +
>  endmenu # "ARMv8.4 architectural features"
>  
>  menu "ARMv8.5 architectural features"
> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> index 0b5ca6e0eb09..2d6db33d4e45 100644
> --- a/arch/arm64/include/asm/cpucaps.h
> +++ b/arch/arm64/include/asm/cpucaps.h
> @@ -23,6 +23,8 @@ cpucap_is_possible(const unsigned int cap)
>  		return IS_ENABLED(CONFIG_ARM64_PAN);
>  	case ARM64_HAS_EPAN:
>  		return IS_ENABLED(CONFIG_ARM64_EPAN);
> +	case ARM64_HAS_BBML2_NOABORT:
> +		return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT);
>  	case ARM64_SVE:
>  		return IS_ENABLED(CONFIG_ARM64_SVE);
>  	case ARM64_SME:
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index e0e4478f5fb5..7f5b220dacde 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -18,6 +18,7 @@
>  #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR	0
>  #define ARM64_SW_FEATURE_OVERRIDE_HVHE		4
>  #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF	8
> +#define ARM64_SW_FEATURE_OVERRIDE_NOBBML2	12
>  
>  #ifndef __ASSEMBLY__
>  
> @@ -866,6 +867,11 @@ static __always_inline bool system_supports_mpam_hcr(void)
>  	return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
>  }
>  
> +static inline bool system_supports_bbml2_noabort(void)
> +{
> +	return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
> +}
> +
>  int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
>  bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
>  
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index d561cf3b8ac7..b936e0805161 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -2176,6 +2176,76 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
>  	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
>  }
>  
> +static inline bool bbml2_possible(void)
> +{
> +	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);

If you're going to keep this helper, I think it really needs to be:

return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) &&
       !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);

Then you would simplify the caller to remove it's own
IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) check.

But personally I would remove the helper and just fold the test into
has_bbml2_noabort().

Thanks,
Ryan

> +}
> +
> +static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
> +{
> +	/* We want to allow usage of bbml2 in as wide a range of kernel contexts
> +	 * as possible. This list is therefore an allow-list of known-good
> +	 * implementations that both support bbml2 and additionally, fulfill the
> +	 * extra constraint of never generating TLB conflict aborts when using
> +	 * the relaxed bbml2 semantics (such aborts make use of bbml2 in certain
> +	 * kernel contexts difficult to prove safe against recursive aborts).
> +	 *
> +	 * Note that implementations can only be considered "known-good" if their
> +	 * implementors attest to the fact that the implementation never raises
> +	 * TLBI conflict aborts for bbml2 mapping granularity changes.
> +	 */
> +	static const struct midr_range supports_bbml2_noabort_list[] = {
> +		MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
> +		MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
> +		{}
> +	};
> +
> +	return is_midr_in_range_list(cpu_midr, supports_bbml2_noabort_list);
> +}
> +
> +static inline unsigned int __cpu_read_midr(int cpu)

nit: why the double underscrore prefix?

> +{
> +	WARN_ON_ONCE(!cpu_online(cpu));
> +
> +	return per_cpu(cpu_data, cpu).reg_midr;
> +}
> +
> +static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
> +{
> +	if (!IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT))
> +		return false;
> +
> +	if (!bbml2_possible())
> +		return false;
> +
> +	if (scope & SCOPE_SYSTEM) {
> +		int cpu;
> +
> +		/* We are a boot CPU, and must verify that all enumerated boot
> +		 * CPUs have MIDR values within our allowlist. Otherwise, we do
> +		 * not allow the BBML2 feature to avoid potential faults when
> +		 * the insufficient CPUs access memory regions using BBML2
> +		 * semantics.
> +		 */
> +		for_each_online_cpu(cpu) {
> +			if (!cpu_has_bbml2_noabort(__cpu_read_midr(cpu)))
> +				return false;
> +		}
> +
> +		return true;
> +	} else if (scope & SCOPE_LOCAL_CPU) {
> +		/* We are a hot-plugged CPU, so only need to check our MIDR.
> +		 * If we have the correct MIDR, but the kernel booted on an
> +		 * insufficient CPU, we will not use BBML2 (this is safe). If
> +		 * we have an incorrect MIDR, but the kernel booted on a
> +		 * sufficient CPU, we will not bring up this CPU.
> +		 */
> +		return cpu_has_bbml2_noabort(read_cpuid_id());
> +	}
> +
> +	return false;
> +}
> +
>  #ifdef CONFIG_ARM64_PAN
>  static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
>  {
> @@ -2926,6 +2996,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.matches = has_cpuid_feature,
>  		ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
>  	},
> +	{
> +		.desc = "BBM Level 2 without conflict abort",
> +		.capability = ARM64_HAS_BBML2_NOABORT,
> +		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
> +		.matches = has_bbml2_noabort,
> +	},
>  	{
>  		.desc = "52-bit Virtual Addressing for KVM (LPA2)",
>  		.capability = ARM64_HAS_LPA2,
> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> index c6b185b885f7..9728faa10390 100644
> --- a/arch/arm64/kernel/pi/idreg-override.c
> +++ b/arch/arm64/kernel/pi/idreg-override.c
> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
>  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
>  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
>  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
>  		{}
>  	},
>  };
> @@ -246,6 +247,7 @@ static const struct {
>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
>  };
>  
>  static int __init parse_hexdigit(const char *p, u64 *v)
> diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
> index 1e65f2fb45bd..b03a375e5507 100644
> --- a/arch/arm64/tools/cpucaps
> +++ b/arch/arm64/tools/cpucaps
> @@ -14,6 +14,7 @@ HAS_ADDRESS_AUTH_ARCH_QARMA5
>  HAS_ADDRESS_AUTH_IMP_DEF
>  HAS_AMU_EXTN
>  HAS_ARMv8_4_TTL
> +HAS_BBML2_NOABORT
>  HAS_CACHE_DIC
>  HAS_CACHE_IDC
>  HAS_CNP
Yang Shi March 13, 2025, 5:21 p.m. UTC | #2
On 3/13/25 3:41 AM, Mikołaj Lenczewski wrote:
> The Break-Before-Make cpu feature supports multiple levels (levels 0-2),
> and this commit adds a dedicated BBML2 cpufeature to test against
> support for, as well as a kernel commandline parameter to optionally
> disable BBML2 altogether.
>
> This is a system feature as we might have a big.LITTLE architecture
> where some cores support BBML2 and some don't, but we want all cores to
> be available and BBM to default to level 0 (as opposed to having cores
> without BBML2 not coming online).
>
> To support BBML2 in as wide a range of contexts as we can, we want not
> only the architectural guarantees that BBML2 makes, but additionally
> want BBML2 to not create TLB conflict aborts. Not causing aborts avoids
> us having to prove that no recursive faults can be induced in any path
> that uses BBML2, allowing its use for arbitrary kernel mappings.
> Support detection of such CPUs.
>
> Signed-off-by: Mikołaj Lenczewski <miko.lenczewski@arm.com>
> ---
>   .../admin-guide/kernel-parameters.txt         |  3 +
>   arch/arm64/Kconfig                            | 11 +++
>   arch/arm64/include/asm/cpucaps.h              |  2 +
>   arch/arm64/include/asm/cpufeature.h           |  6 ++
>   arch/arm64/kernel/cpufeature.c                | 76 +++++++++++++++++++
>   arch/arm64/kernel/pi/idreg-override.c         |  2 +
>   arch/arm64/tools/cpucaps                      |  1 +
>   7 files changed, 101 insertions(+)
>
> diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
> index fb8752b42ec8..3e4cc917a07e 100644
> --- a/Documentation/admin-guide/kernel-parameters.txt
> +++ b/Documentation/admin-guide/kernel-parameters.txt
> @@ -453,6 +453,9 @@
>   	arm64.no32bit_el0 [ARM64] Unconditionally disable the execution of
>   			32 bit applications.
>   
> +	arm64.nobbml2	[ARM64] Unconditionally disable Break-Before-Make Level
> +			2 support

Hi Miko,

A question about the kernel boot parameter. Can this parameter be used 
in early boot stage? A quick look at the code shows it should be ok, for 
example, cpu_has_bti() is called in map_kernel(). But I'd like to double 
check because my patchset needs to check this parameter in map_mem() to 
determine whether large block mapping can be used or not.

And a nit below.

> +
>   	arm64.nobti	[ARM64] Unconditionally disable Branch Target
>   			Identification support
>   
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 940343beb3d4..49deda2b22ae 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -2057,6 +2057,17 @@ config ARM64_TLB_RANGE
>   	  The feature introduces new assembly instructions, and they were
>   	  support when binutils >= 2.30.
>   
> +config ARM64_BBML2_NOABORT
> +	bool "Enable support for Break-Before-Make Level 2 detection and usage"
> +	default y
> +	help
> +	  FEAT_BBM provides detection of support levels for break-before-make
> +	  sequences. If BBM level 2 is supported, some TLB maintenance requirements
> +	  can be relaxed to improve performance. We additonally require the
> +	  property that the implementation cannot ever raise TLB Conflict Aborts.
> +	  Selecting N causes the kernel to fallback to BBM level 0 behaviour
> +	  even if the system supports BBM level 2.
> +
>   endmenu # "ARMv8.4 architectural features"
>   
>   menu "ARMv8.5 architectural features"
> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> index 0b5ca6e0eb09..2d6db33d4e45 100644
> --- a/arch/arm64/include/asm/cpucaps.h
> +++ b/arch/arm64/include/asm/cpucaps.h
> @@ -23,6 +23,8 @@ cpucap_is_possible(const unsigned int cap)
>   		return IS_ENABLED(CONFIG_ARM64_PAN);
>   	case ARM64_HAS_EPAN:
>   		return IS_ENABLED(CONFIG_ARM64_EPAN);
> +	case ARM64_HAS_BBML2_NOABORT:
> +		return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT);
>   	case ARM64_SVE:
>   		return IS_ENABLED(CONFIG_ARM64_SVE);
>   	case ARM64_SME:
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index e0e4478f5fb5..7f5b220dacde 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -18,6 +18,7 @@
>   #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR	0
>   #define ARM64_SW_FEATURE_OVERRIDE_HVHE		4
>   #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF	8
> +#define ARM64_SW_FEATURE_OVERRIDE_NOBBML2	12
>   
>   #ifndef __ASSEMBLY__
>   
> @@ -866,6 +867,11 @@ static __always_inline bool system_supports_mpam_hcr(void)
>   	return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
>   }
>   
> +static inline bool system_supports_bbml2_noabort(void)
> +{
> +	return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
> +}
> +
>   int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
>   bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
>   
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index d561cf3b8ac7..b936e0805161 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -2176,6 +2176,76 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
>   	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
>   }
>   
> +static inline bool bbml2_possible(void)
> +{
> +	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
> +}

Can this be moved to cpufeature.h? My patch will use this, anyway I can 
do it in my patchset.

Thanks,
Yang

> +
> +static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
> +{
> +	/* We want to allow usage of bbml2 in as wide a range of kernel contexts
> +	 * as possible. This list is therefore an allow-list of known-good
> +	 * implementations that both support bbml2 and additionally, fulfill the
> +	 * extra constraint of never generating TLB conflict aborts when using
> +	 * the relaxed bbml2 semantics (such aborts make use of bbml2 in certain
> +	 * kernel contexts difficult to prove safe against recursive aborts).
> +	 *
> +	 * Note that implementations can only be considered "known-good" if their
> +	 * implementors attest to the fact that the implementation never raises
> +	 * TLBI conflict aborts for bbml2 mapping granularity changes.
> +	 */
> +	static const struct midr_range supports_bbml2_noabort_list[] = {
> +		MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
> +		MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
> +		{}
> +	};
> +
> +	return is_midr_in_range_list(cpu_midr, supports_bbml2_noabort_list);
> +}
> +
> +static inline unsigned int __cpu_read_midr(int cpu)
> +{
> +	WARN_ON_ONCE(!cpu_online(cpu));
> +
> +	return per_cpu(cpu_data, cpu).reg_midr;
> +}
> +
> +static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
> +{
> +	if (!IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT))
> +		return false;
> +
> +	if (!bbml2_possible())
> +		return false;
> +
> +	if (scope & SCOPE_SYSTEM) {
> +		int cpu;
> +
> +		/* We are a boot CPU, and must verify that all enumerated boot
> +		 * CPUs have MIDR values within our allowlist. Otherwise, we do
> +		 * not allow the BBML2 feature to avoid potential faults when
> +		 * the insufficient CPUs access memory regions using BBML2
> +		 * semantics.
> +		 */
> +		for_each_online_cpu(cpu) {
> +			if (!cpu_has_bbml2_noabort(__cpu_read_midr(cpu)))
> +				return false;
> +		}
> +
> +		return true;
> +	} else if (scope & SCOPE_LOCAL_CPU) {
> +		/* We are a hot-plugged CPU, so only need to check our MIDR.
> +		 * If we have the correct MIDR, but the kernel booted on an
> +		 * insufficient CPU, we will not use BBML2 (this is safe). If
> +		 * we have an incorrect MIDR, but the kernel booted on a
> +		 * sufficient CPU, we will not bring up this CPU.
> +		 */
> +		return cpu_has_bbml2_noabort(read_cpuid_id());
> +	}
> +
> +	return false;
> +}
> +
>   #ifdef CONFIG_ARM64_PAN
>   static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
>   {
> @@ -2926,6 +2996,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>   		.matches = has_cpuid_feature,
>   		ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
>   	},
> +	{
> +		.desc = "BBM Level 2 without conflict abort",
> +		.capability = ARM64_HAS_BBML2_NOABORT,
> +		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
> +		.matches = has_bbml2_noabort,
> +	},
>   	{
>   		.desc = "52-bit Virtual Addressing for KVM (LPA2)",
>   		.capability = ARM64_HAS_LPA2,
> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> index c6b185b885f7..9728faa10390 100644
> --- a/arch/arm64/kernel/pi/idreg-override.c
> +++ b/arch/arm64/kernel/pi/idreg-override.c
> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
>   		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
>   		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
>   		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
>   		{}
>   	},
>   };
> @@ -246,6 +247,7 @@ static const struct {
>   	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>   	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>   	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
>   };
>   
>   static int __init parse_hexdigit(const char *p, u64 *v)
> diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
> index 1e65f2fb45bd..b03a375e5507 100644
> --- a/arch/arm64/tools/cpucaps
> +++ b/arch/arm64/tools/cpucaps
> @@ -14,6 +14,7 @@ HAS_ADDRESS_AUTH_ARCH_QARMA5
>   HAS_ADDRESS_AUTH_IMP_DEF
>   HAS_AMU_EXTN
>   HAS_ARMv8_4_TTL
> +HAS_BBML2_NOABORT
>   HAS_CACHE_DIC
>   HAS_CACHE_IDC
>   HAS_CNP
Marc Zyngier March 13, 2025, 5:34 p.m. UTC | #3
On Thu, 13 Mar 2025 10:41:10 +0000,
Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
> 
> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> index c6b185b885f7..9728faa10390 100644
> --- a/arch/arm64/kernel/pi/idreg-override.c
> +++ b/arch/arm64/kernel/pi/idreg-override.c
> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
>  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
>  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
>  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
>  		{}
>  	},
>  };
> @@ -246,6 +247,7 @@ static const struct {
>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },

Why is that a SW feature? This looks very much like a HW feature to
me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
with it. Something like:

diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
index c6b185b885f70..803a0c99f7b46 100644
--- a/arch/arm64/kernel/pi/idreg-override.c
+++ b/arch/arm64/kernel/pi/idreg-override.c
@@ -102,6 +102,7 @@ static const struct ftr_set_desc mmfr2 __prel64_initconst = {
 	.override	= &id_aa64mmfr2_override,
 	.fields		= {
 		FIELD("varange", ID_AA64MMFR2_EL1_VARange_SHIFT, mmfr2_varange_filter),
+		FIELD("bbm", ID_AA64MMFR2_EL1_BBM_SHIFT, NULL),
 		{}
 	},
 };
@@ -246,6 +247,7 @@ static const struct {
 	{ "rodata=off",			"arm64_sw.rodataoff=1" },
 	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
 	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
+	{ "arm64.nobbml2",		"id_aa64mmfr2.bbm=0" },
 };
 
 static int __init parse_hexdigit(const char *p, u64 *v)


Thanks,

	M.
Mikołaj Lenczewski March 13, 2025, 6:08 p.m. UTC | #4
On Thu, Mar 13, 2025 at 04:13:22PM +0000, Ryan Roberts wrote:
> On 13/03/2025 10:41, Mikołaj Lenczewski wrote: 
> > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> > index d561cf3b8ac7..b936e0805161 100644
> > --- a/arch/arm64/kernel/cpufeature.c
> > +++ b/arch/arm64/kernel/cpufeature.c
> > @@ -2176,6 +2176,76 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
> >  	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
> >  }
> >  
> > +static inline bool bbml2_possible(void)
> > +{
> > +	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
> 
> If you're going to keep this helper, I think it really needs to be:
> 
> return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) &&
>        !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
> 
> Then you would simplify the caller to remove it's own
> IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) check.
> 
> But personally I would remove the helper and just fold the test into
> has_bbml2_noabort().
> 
> Thanks,
> Ryan

I was debating folding it into has_bbml2_noabort(), but went ahead and
implemented it separately to match hvhe_possible(), which was another sw
feature helper.

But I agree, folding it will be simpler and read just as easily (if not
easier). Will do so.

> > +}
> > +
> > +static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
> > +{
> > +	/* We want to allow usage of bbml2 in as wide a range of kernel contexts
> > +	 * as possible. This list is therefore an allow-list of known-good
> > +	 * implementations that both support bbml2 and additionally, fulfill the
> > +	 * extra constraint of never generating TLB conflict aborts when using
> > +	 * the relaxed bbml2 semantics (such aborts make use of bbml2 in certain
> > +	 * kernel contexts difficult to prove safe against recursive aborts).
> > +	 *
> > +	 * Note that implementations can only be considered "known-good" if their
> > +	 * implementors attest to the fact that the implementation never raises
> > +	 * TLBI conflict aborts for bbml2 mapping granularity changes.
> > +	 */
> > +	static const struct midr_range supports_bbml2_noabort_list[] = {
> > +		MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
> > +		MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
> > +		{}
> > +	};
> > +
> > +	return is_midr_in_range_list(cpu_midr, supports_bbml2_noabort_list);
> > +}
> > +
> > +static inline unsigned int __cpu_read_midr(int cpu)
> 
> nit: why the double underscrore prefix?

Again copying other helpers I saw that seemed to do similar things.
Didn't know if this was the expected style, so did as other helpers did.
Will remove.

Thank you for the review.
Mikołaj Lenczewski March 13, 2025, 6:13 p.m. UTC | #5
On Thu, Mar 13, 2025 at 10:21:51AM -0700, Yang Shi wrote:
> On 3/13/25 3:41 AM, Mikołaj Lenczewski wrote:
> > diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
> > index fb8752b42ec8..3e4cc917a07e 100644
> > --- a/Documentation/admin-guide/kernel-parameters.txt
> > +++ b/Documentation/admin-guide/kernel-parameters.txt
> > @@ -453,6 +453,9 @@
> >   	arm64.no32bit_el0 [ARM64] Unconditionally disable the execution of
> >   			32 bit applications.
> >   
> > +	arm64.nobbml2	[ARM64] Unconditionally disable Break-Before-Make Level
> > +			2 support
> 
> Hi Miko,
> 
> A question about the kernel boot parameter. Can this parameter be used 
> in early boot stage? A quick look at the code shows it should be ok, for 
> example, cpu_has_bti() is called in map_kernel(). But I'd like to double 
> check because my patchset needs to check this parameter in map_mem() to 
> determine whether large block mapping can be used or not.
> 
> And a nit below.

I will need to double check exactly when the arm64 software overrides
are finalised, but as long as those values are finalised in / before (?)
the early boot stage then it should be fine? Will reply again once I
check and have an answer.
   
> > +static inline bool bbml2_possible(void)
> > +{
> > +	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
> > +}
> 
> Can this be moved to cpufeature.h? My patch will use this, anyway I can 
> do it in my patchset.
> 
> Thanks,
> Yang

I can do so. In fact, on second thought, I will probably extend this to
also include the `IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT)` check as well,
and then move it to cpufeature.h, instead of folding said check into
has_bbml2_noabort().
Ryan Roberts March 13, 2025, 6:17 p.m. UTC | #6
On 13/03/2025 18:13, Mikołaj Lenczewski wrote:
> On Thu, Mar 13, 2025 at 10:21:51AM -0700, Yang Shi wrote:
>> On 3/13/25 3:41 AM, Mikołaj Lenczewski wrote:
>>> diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
>>> index fb8752b42ec8..3e4cc917a07e 100644
>>> --- a/Documentation/admin-guide/kernel-parameters.txt
>>> +++ b/Documentation/admin-guide/kernel-parameters.txt
>>> @@ -453,6 +453,9 @@
>>>   	arm64.no32bit_el0 [ARM64] Unconditionally disable the execution of
>>>   			32 bit applications.
>>>   
>>> +	arm64.nobbml2	[ARM64] Unconditionally disable Break-Before-Make Level
>>> +			2 support
>>
>> Hi Miko,
>>
>> A question about the kernel boot parameter. Can this parameter be used 
>> in early boot stage? A quick look at the code shows it should be ok, for 
>> example, cpu_has_bti() is called in map_kernel(). But I'd like to double 
>> check because my patchset needs to check this parameter in map_mem() to 
>> determine whether large block mapping can be used or not.
>>
>> And a nit below.
> 
> I will need to double check exactly when the arm64 software overrides
> are finalised, but as long as those values are finalised in / before (?)
> the early boot stage then it should be fine? Will reply again once I
> check and have an answer.

This will work fine. The override is setup in the PI code before start_kernel().

>    
>>> +static inline bool bbml2_possible(void)
>>> +{
>>> +	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
>>> +}
>>
>> Can this be moved to cpufeature.h? My patch will use this, anyway I can 
>> do it in my patchset.

I'd prefer to do the moving as part of the series that needs it moved.

Thanks,
Ryan

>>
>> Thanks,
>> Yang
> 
> I can do so. In fact, on second thought, I will probably extend this to
> also include the `IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT)` check as well,
> and then move it to cpufeature.h, instead of folding said check into
> has_bbml2_noabort().
>
Mikołaj Lenczewski March 13, 2025, 6:20 p.m. UTC | #7
On Thu, Mar 13, 2025 at 05:34:46PM +0000, Marc Zyngier wrote:
> On Thu, 13 Mar 2025 10:41:10 +0000,
> Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
> > 
> > diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> > index c6b185b885f7..9728faa10390 100644
> > --- a/arch/arm64/kernel/pi/idreg-override.c
> > +++ b/arch/arm64/kernel/pi/idreg-override.c
> > @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
> >  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
> >  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
> >  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
> > +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
> >  		{}
> >  	},
> >  };
> > @@ -246,6 +247,7 @@ static const struct {
> >  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
> >  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
> >  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> > +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
> 
> Why is that a SW feature? This looks very much like a HW feature to
> me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
> with it. Something like:
> 
> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> index c6b185b885f70..803a0c99f7b46 100644
> --- a/arch/arm64/kernel/pi/idreg-override.c
> +++ b/arch/arm64/kernel/pi/idreg-override.c
> @@ -102,6 +102,7 @@ static const struct ftr_set_desc mmfr2 __prel64_initconst = {
>  	.override	= &id_aa64mmfr2_override,
>  	.fields		= {
>  		FIELD("varange", ID_AA64MMFR2_EL1_VARange_SHIFT, mmfr2_varange_filter),
> +		FIELD("bbm", ID_AA64MMFR2_EL1_BBM_SHIFT, NULL),
>  		{}
>  	},
>  };
> @@ -246,6 +247,7 @@ static const struct {
>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> +	{ "arm64.nobbml2",		"id_aa64mmfr2.bbm=0" },
>  };
>  
>  static int __init parse_hexdigit(const char *p, u64 *v)
> 
> 
> Thanks,
> 
> 	M.
> 
> -- 
> Without deviation from the norm, progress is not possible.

Thanks for the review.

I think part of this confusion is due to me not including a changelog
(definitely something for the next respin!), but the discussion this
change is based on is found here:

https://lore.kernel.org/all/b46dc626-edc9-4d20-99d2-6cd08a01346c@os.amperecomputing.com/

Essentially, this is a SW feature because we do not check the
id_aa64mmfr2.bbm register as part of the has_bbml2_noabort() cpucap
matches filter. This is because certain hardware implementations
do not actually declare bbml2 via the hardware feature register, despite
implementing our bbml2_noabort feature, and certain hypervisor setups
might result in issues so we want to have an override to allow
potentially disabling the feature for generic kernels.
Ryan Roberts March 13, 2025, 6:22 p.m. UTC | #8
On 13/03/2025 17:34, Marc Zyngier wrote:
> On Thu, 13 Mar 2025 10:41:10 +0000,
> Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
>>
>> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
>> index c6b185b885f7..9728faa10390 100644
>> --- a/arch/arm64/kernel/pi/idreg-override.c
>> +++ b/arch/arm64/kernel/pi/idreg-override.c
>> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
>>  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
>>  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
>>  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
>> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
>>  		{}
>>  	},
>>  };
>> @@ -246,6 +247,7 @@ static const struct {
>>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
>> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
> 
> Why is that a SW feature? This looks very much like a HW feature to
> me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
> with it. Something like:

I think this implies that we would expect the BBM field to be advertising BBML2
support normally and we would check for that as part of the cpufeature
detection. That's how Miko was doing it in v2, but Yang pointed out that
AmpereOne, which supports BBML2+NOABORT semantics, doesn't actually advertise
BBML2 in its MMFR2. So we don't want to check that field, and instead rely
solely on the MIDR allow-list + a command line override. It was me that
suggested putting that in the SW feature register, and I think that still sounds
like the right solution for this situation?

Thanks,
Ryan

> 
> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> index c6b185b885f70..803a0c99f7b46 100644
> --- a/arch/arm64/kernel/pi/idreg-override.c
> +++ b/arch/arm64/kernel/pi/idreg-override.c
> @@ -102,6 +102,7 @@ static const struct ftr_set_desc mmfr2 __prel64_initconst = {
>  	.override	= &id_aa64mmfr2_override,
>  	.fields		= {
>  		FIELD("varange", ID_AA64MMFR2_EL1_VARange_SHIFT, mmfr2_varange_filter),
> +		FIELD("bbm", ID_AA64MMFR2_EL1_BBM_SHIFT, NULL),
>  		{}
>  	},
>  };
> @@ -246,6 +247,7 @@ static const struct {
>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> +	{ "arm64.nobbml2",		"id_aa64mmfr2.bbm=0" },
>  };
>  
>  static int __init parse_hexdigit(const char *p, u64 *v)
> 
> 
> Thanks,
> 
> 	M.
>
Marc Zyngier March 13, 2025, 6:36 p.m. UTC | #9
On Thu, 13 Mar 2025 18:22:00 +0000,
Ryan Roberts <ryan.roberts@arm.com> wrote:
> 
> On 13/03/2025 17:34, Marc Zyngier wrote:
> > On Thu, 13 Mar 2025 10:41:10 +0000,
> > Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
> >>
> >> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> >> index c6b185b885f7..9728faa10390 100644
> >> --- a/arch/arm64/kernel/pi/idreg-override.c
> >> +++ b/arch/arm64/kernel/pi/idreg-override.c
> >> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
> >>  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
> >>  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
> >>  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
> >> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
> >>  		{}
> >>  	},
> >>  };
> >> @@ -246,6 +247,7 @@ static const struct {
> >>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
> >>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
> >>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> >> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
> > 
> > Why is that a SW feature? This looks very much like a HW feature to
> > me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
> > with it. Something like:
> 
> I think this implies that we would expect the BBM field to be advertising BBML2
> support normally and we would check for that as part of the cpufeature
> detection. That's how Miko was doing it in v2, but Yang pointed out that
> AmpereOne, which supports BBML2+NOABORT semantics, doesn't actually advertise
> BBML2 in its MMFR2. So we don't want to check that field, and instead rely
> solely on the MIDR allow-list + a command line override. It was me that
> suggested putting that in the SW feature register, and I think that still sounds
> like the right solution for this situation?

I think this is mixing two different things:

- preventing BBM-L2 from being visible to the kernel: this is what my
  suggestion is doing by nuking an architectural feature in the
  relevant register

- random HW not correctly advertising what they are doing: this is an
  erratum workaround

I'd rather we don't conflate the two things, and make them very
explicitly distinct.

Thanks,

	M.
Marc Zyngier March 13, 2025, 6:39 p.m. UTC | #10
On Thu, 13 Mar 2025 18:20:26 +0000,
Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
> 
> On Thu, Mar 13, 2025 at 05:34:46PM +0000, Marc Zyngier wrote:
> > On Thu, 13 Mar 2025 10:41:10 +0000,
> > Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
> > > 
> > > diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> > > index c6b185b885f7..9728faa10390 100644
> > > --- a/arch/arm64/kernel/pi/idreg-override.c
> > > +++ b/arch/arm64/kernel/pi/idreg-override.c
> > > @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
> > >  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
> > >  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
> > >  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
> > > +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
> > >  		{}
> > >  	},
> > >  };
> > > @@ -246,6 +247,7 @@ static const struct {
> > >  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
> > >  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
> > >  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> > > +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
> > 
> > Why is that a SW feature? This looks very much like a HW feature to
> > me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
> > with it. Something like:
> > 
> > diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> > index c6b185b885f70..803a0c99f7b46 100644
> > --- a/arch/arm64/kernel/pi/idreg-override.c
> > +++ b/arch/arm64/kernel/pi/idreg-override.c
> > @@ -102,6 +102,7 @@ static const struct ftr_set_desc mmfr2 __prel64_initconst = {
> >  	.override	= &id_aa64mmfr2_override,
> >  	.fields		= {
> >  		FIELD("varange", ID_AA64MMFR2_EL1_VARange_SHIFT, mmfr2_varange_filter),
> > +		FIELD("bbm", ID_AA64MMFR2_EL1_BBM_SHIFT, NULL),
> >  		{}
> >  	},
> >  };
> > @@ -246,6 +247,7 @@ static const struct {
> >  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
> >  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
> >  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> > +	{ "arm64.nobbml2",		"id_aa64mmfr2.bbm=0" },
> >  };
> >  
> >  static int __init parse_hexdigit(const char *p, u64 *v)
> > 
> > 
> > Thanks,
> > 
> > 	M.
> > 
> > -- 
> > Without deviation from the norm, progress is not possible.
> 
> Thanks for the review.
> 
> I think part of this confusion is due to me not including a changelog
> (definitely something for the next respin!), but the discussion this
> change is based on is found here:
> 
> https://lore.kernel.org/all/b46dc626-edc9-4d20-99d2-6cd08a01346c@os.amperecomputing.com/
> 
> Essentially, this is a SW feature because we do not check the
> id_aa64mmfr2.bbm register as part of the has_bbml2_noabort() cpucap
> matches filter. This is because certain hardware implementations
> do not actually declare bbml2 via the hardware feature register, despite
> implementing our bbml2_noabort feature, and certain hypervisor setups
> might result in issues so we want to have an override to allow
> potentially disabling the feature for generic kernels.

I replied to Ryan on the same subject: not advertising a feature that
is actually supported is very much an erratum, and we should not
conflate feature control of an architecture feature (which is what the
ID override horror is doing) with implementation-specific workarounds.

Thanks,

	M.
Ryan Roberts March 14, 2025, 9:18 a.m. UTC | #11
On 13/03/2025 18:36, Marc Zyngier wrote:
> On Thu, 13 Mar 2025 18:22:00 +0000,
> Ryan Roberts <ryan.roberts@arm.com> wrote:
>>
>> On 13/03/2025 17:34, Marc Zyngier wrote:
>>> On Thu, 13 Mar 2025 10:41:10 +0000,
>>> Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
>>>>
>>>> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
>>>> index c6b185b885f7..9728faa10390 100644
>>>> --- a/arch/arm64/kernel/pi/idreg-override.c
>>>> +++ b/arch/arm64/kernel/pi/idreg-override.c
>>>> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
>>>>  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
>>>>  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
>>>>  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
>>>> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
>>>>  		{}
>>>>  	},
>>>>  };
>>>> @@ -246,6 +247,7 @@ static const struct {
>>>>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>>>>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>>>>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
>>>> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
>>>
>>> Why is that a SW feature? This looks very much like a HW feature to
>>> me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
>>> with it. Something like:
>>
>> I think this implies that we would expect the BBM field to be advertising BBML2
>> support normally and we would check for that as part of the cpufeature
>> detection. That's how Miko was doing it in v2, but Yang pointed out that
>> AmpereOne, which supports BBML2+NOABORT semantics, doesn't actually advertise
>> BBML2 in its MMFR2. So we don't want to check that field, and instead rely
>> solely on the MIDR allow-list + a command line override. It was me that
>> suggested putting that in the SW feature register, and I think that still sounds
>> like the right solution for this situation?
> 
> I think this is mixing two different things:
> 
> - preventing BBM-L2 from being visible to the kernel: this is what my
>   suggestion is doing by nuking an architectural feature in the
>   relevant register
> 
> - random HW not correctly advertising what they are doing: this is an
>   erratum workaround
> 
> I'd rather we don't conflate the two things, and make them very
> explicitly distinct.

It all sounds so obvious when you put it like that! :)

I'm guessing there is a layer where the workaround can be applied to the
sanitised feature registers on a per-cpu basis and that won't affect this global
override which will remain as an overlay on top? If so then that sounds perfect
(you can probably tell I find the whole feature management framework rather
inpeneterable). That workaround would be added as part of Yang's series anyway.

So sounds like we are back to testing MMFR2.BBM in the matches function, with
the addition of Maz's proposal above. Sorry for sending you round the houses, Miko.

Thanks,
Ryan

> 
> Thanks,
> 
> 	M.
>
Ryan Roberts March 14, 2025, 9:26 a.m. UTC | #12
On 13/03/2025 18:08, Mikołaj Lenczewski wrote:
> On Thu, Mar 13, 2025 at 04:13:22PM +0000, Ryan Roberts wrote:
>> On 13/03/2025 10:41, Mikołaj Lenczewski wrote: 
>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>>> index d561cf3b8ac7..b936e0805161 100644
>>> --- a/arch/arm64/kernel/cpufeature.c
>>> +++ b/arch/arm64/kernel/cpufeature.c
>>> @@ -2176,6 +2176,76 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
>>>  	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
>>>  }
>>>  
>>> +static inline bool bbml2_possible(void)
>>> +{
>>> +	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
>>
>> If you're going to keep this helper, I think it really needs to be:
>>
>> return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) &&
>>        !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
>>
>> Then you would simplify the caller to remove it's own
>> IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT) check.
>>
>> But personally I would remove the helper and just fold the test into
>> has_bbml2_noabort().
>>
>> Thanks,
>> Ryan
> 
> I was debating folding it into has_bbml2_noabort(), but went ahead and
> implemented it separately to match hvhe_possible(), which was another sw
> feature helper.

hvhe_possible() is a .matches function, so there is nothing to fold it into.

> 
> But I agree, folding it will be simpler and read just as easily (if not
> easier). Will do so.
> 
>>> +}
>>> +
>>> +static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
>>> +{
>>> +	/* We want to allow usage of bbml2 in as wide a range of kernel contexts
>>> +	 * as possible. This list is therefore an allow-list of known-good
>>> +	 * implementations that both support bbml2 and additionally, fulfill the
>>> +	 * extra constraint of never generating TLB conflict aborts when using
>>> +	 * the relaxed bbml2 semantics (such aborts make use of bbml2 in certain
>>> +	 * kernel contexts difficult to prove safe against recursive aborts).
>>> +	 *
>>> +	 * Note that implementations can only be considered "known-good" if their
>>> +	 * implementors attest to the fact that the implementation never raises
>>> +	 * TLBI conflict aborts for bbml2 mapping granularity changes.
>>> +	 */
>>> +	static const struct midr_range supports_bbml2_noabort_list[] = {
>>> +		MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
>>> +		MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
>>> +		{}
>>> +	};
>>> +
>>> +	return is_midr_in_range_list(cpu_midr, supports_bbml2_noabort_list);
>>> +}
>>> +
>>> +static inline unsigned int __cpu_read_midr(int cpu)
>>
>> nit: why the double underscrore prefix?
> 
> Again copying other helpers I saw that seemed to do similar things.
> Didn't know if this was the expected style, so did as other helpers did.
> Will remove.

Often those double underscores are used when you have a public function wrapping
into a private function, like this:

static void __do_a_thing(bool modify_behaviour_in_some_way);

void do_a_thing(void)
{
	__do_a_thing(false);
}

I'm sure the coding style offers a better explanation.

Thanks,
Ryan

> 
> Thank you for the review.
>
Marc Zyngier March 14, 2025, 10:11 a.m. UTC | #13
On Fri, 14 Mar 2025 09:18:43 +0000,
Ryan Roberts <ryan.roberts@arm.com> wrote:
> 
> On 13/03/2025 18:36, Marc Zyngier wrote:
> > On Thu, 13 Mar 2025 18:22:00 +0000,
> > Ryan Roberts <ryan.roberts@arm.com> wrote:
> >>
> >> On 13/03/2025 17:34, Marc Zyngier wrote:
> >>> On Thu, 13 Mar 2025 10:41:10 +0000,
> >>> Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
> >>>>
> >>>> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> >>>> index c6b185b885f7..9728faa10390 100644
> >>>> --- a/arch/arm64/kernel/pi/idreg-override.c
> >>>> +++ b/arch/arm64/kernel/pi/idreg-override.c
> >>>> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
> >>>>  		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
> >>>>  		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
> >>>>  		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
> >>>> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
> >>>>  		{}
> >>>>  	},
> >>>>  };
> >>>> @@ -246,6 +247,7 @@ static const struct {
> >>>>  	{ "rodata=off",			"arm64_sw.rodataoff=1" },
> >>>>  	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
> >>>>  	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
> >>>> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
> >>>
> >>> Why is that a SW feature? This looks very much like a HW feature to
> >>> me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
> >>> with it. Something like:
> >>
> >> I think this implies that we would expect the BBM field to be advertising BBML2
> >> support normally and we would check for that as part of the cpufeature
> >> detection. That's how Miko was doing it in v2, but Yang pointed out that
> >> AmpereOne, which supports BBML2+NOABORT semantics, doesn't actually advertise
> >> BBML2 in its MMFR2. So we don't want to check that field, and instead rely
> >> solely on the MIDR allow-list + a command line override. It was me that
> >> suggested putting that in the SW feature register, and I think that still sounds
> >> like the right solution for this situation?
> > 
> > I think this is mixing two different things:
> > 
> > - preventing BBM-L2 from being visible to the kernel: this is what my
> >   suggestion is doing by nuking an architectural feature in the
> >   relevant register
> > 
> > - random HW not correctly advertising what they are doing: this is an
> >   erratum workaround
> > 
> > I'd rather we don't conflate the two things, and make them very
> > explicitly distinct.
> 
> It all sounds so obvious when you put it like that! :)
> 
> I'm guessing there is a layer where the workaround can be applied to the
> sanitised feature registers on a per-cpu basis and that won't affect this global
> override which will remain as an overlay on top? If so then that sounds perfect
> (you can probably tell I find the whole feature management framework rather
> inpeneterable).

You and I, brother... The only person who actually understands what's
in that file is Suzuki.

> That workaround would be added as part of Yang's series anyway.

Yup, that's what I'd expect. Ideally tied to an erratum number so that
we have an actual promise from the vendor that their implementation is
actually BBM-L2 compliant despite the idreg breakage.

Thanks,

	M.
Suzuki K Poulose March 14, 2025, 12:33 p.m. UTC | #14
On 14/03/2025 09:18, Ryan Roberts wrote:
> On 13/03/2025 18:36, Marc Zyngier wrote:
>> On Thu, 13 Mar 2025 18:22:00 +0000,
>> Ryan Roberts <ryan.roberts@arm.com> wrote:
>>>
>>> On 13/03/2025 17:34, Marc Zyngier wrote:
>>>> On Thu, 13 Mar 2025 10:41:10 +0000,
>>>> Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
>>>>>
>>>>> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
>>>>> index c6b185b885f7..9728faa10390 100644
>>>>> --- a/arch/arm64/kernel/pi/idreg-override.c
>>>>> +++ b/arch/arm64/kernel/pi/idreg-override.c
>>>>> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
>>>>>   		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
>>>>>   		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
>>>>>   		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
>>>>> +		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
>>>>>   		{}
>>>>>   	},
>>>>>   };
>>>>> @@ -246,6 +247,7 @@ static const struct {
>>>>>   	{ "rodata=off",			"arm64_sw.rodataoff=1" },
>>>>>   	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
>>>>>   	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
>>>>> +	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
>>>>
>>>> Why is that a SW feature? This looks very much like a HW feature to
>>>> me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
>>>> with it. Something like:
>>>
>>> I think this implies that we would expect the BBM field to be advertising BBML2
>>> support normally and we would check for that as part of the cpufeature
>>> detection. That's how Miko was doing it in v2, but Yang pointed out that
>>> AmpereOne, which supports BBML2+NOABORT semantics, doesn't actually advertise
>>> BBML2 in its MMFR2. So we don't want to check that field, and instead rely
>>> solely on the MIDR allow-list + a command line override. It was me that
>>> suggested putting that in the SW feature register, and I think that still sounds
>>> like the right solution for this situation?
>>
>> I think this is mixing two different things:
>>
>> - preventing BBM-L2 from being visible to the kernel: this is what my
>>    suggestion is doing by nuking an architectural feature in the
>>    relevant register
>>
>> - random HW not correctly advertising what they are doing: this is an
>>    erratum workaround
>>
>> I'd rather we don't conflate the two things, and make them very
>> explicitly distinct.
> 
> It all sounds so obvious when you put it like that! :)
> 
> I'm guessing there is a layer where the workaround can be applied to the
> sanitised feature registers on a per-cpu basis and that won't affect this global
> override which will remain as an overlay on top? If so then that sounds perfect
> (you can probably tell I find the whole feature management framework rather
> inpeneterable). That workaround would be added as part of Yang's series anyway.

Unfortunately, there is no easy way to fix this via the normal erratum
workaround "capability". The sanitised feature registers are handled
separately (initialised via init_cpu_features() for boot CPU and
sanitised eachtime via update_cpu_features).

Also we do not "enable" any capability (i.e. calling cpu_enable())
until the very end, after the CPUs are all brought up (except for boot 
CPUs).

But it may be possible to "fix up" the BBML2 feature in
cpuinfo_store_*cpu(), without using the "enable" call back.

Something like:

diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 285d7d538342..8c23adbe29f8 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -479,7 +479,11 @@ static void __cpuinfo_store_cpu(struct 
cpuinfo_arm64 *info)
         info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
         info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
         info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);

+	/*
+	 * The CPU cap is not detected system wide, but we are able to
+	 * check if this CPU is affected by the Erratum.
+	 */
+       if (this_cpu_has_cap(AMPERE_ONE_ERRATUM_BBML2))
+               // Fixup  info->reg_id_aa64_mmfr2 with BBML2.
+
         info->reg_id_aa64mmfr3 = read_cpuid(ID_AA64MMFR3_EL1);
         info->reg_id_aa64mmfr4 = read_cpuid(ID_AA64MMFR4_EL1);
         info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
         info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);

Suzuki

> 
> So sounds like we are back to testing MMFR2.BBM in the matches function, with
> the addition of Maz's proposal above. Sorry for sending you round the houses, Miko.
> 
> Thanks,
> Ryan
> 
>>
>> Thanks,
>>
>> 	M.
>>
>
Ryan Roberts March 14, 2025, 1:12 p.m. UTC | #15
On 14/03/2025 12:33, Suzuki K Poulose wrote:
> On 14/03/2025 09:18, Ryan Roberts wrote:
>> On 13/03/2025 18:36, Marc Zyngier wrote:
>>> On Thu, 13 Mar 2025 18:22:00 +0000,
>>> Ryan Roberts <ryan.roberts@arm.com> wrote:
>>>>
>>>> On 13/03/2025 17:34, Marc Zyngier wrote:
>>>>> On Thu, 13 Mar 2025 10:41:10 +0000,
>>>>> Mikołaj Lenczewski <miko.lenczewski@arm.com> wrote:
>>>>>>
>>>>>> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/
>>>>>> idreg-override.c
>>>>>> index c6b185b885f7..9728faa10390 100644
>>>>>> --- a/arch/arm64/kernel/pi/idreg-override.c
>>>>>> +++ b/arch/arm64/kernel/pi/idreg-override.c
>>>>>> @@ -209,6 +209,7 @@ static const struct ftr_set_desc sw_features
>>>>>> __prel64_initconst = {
>>>>>>           FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
>>>>>>           FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
>>>>>>           FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
>>>>>> +        FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
>>>>>>           {}
>>>>>>       },
>>>>>>   };
>>>>>> @@ -246,6 +247,7 @@ static const struct {
>>>>>>       { "rodata=off",            "arm64_sw.rodataoff=1" },
>>>>>>       { "arm64.nolva",        "id_aa64mmfr2.varange=0" },
>>>>>>       { "arm64.no32bit_el0",        "id_aa64pfr0.el0=1" },
>>>>>> +    { "arm64.nobbml2",        "arm64_sw.nobbml2=1" },
>>>>>
>>>>> Why is that a SW feature? This looks very much like a HW feature to
>>>>> me, and you should instead mask out ID_AA64MMFR2_EL1.BBM, and be done
>>>>> with it. Something like:
>>>>
>>>> I think this implies that we would expect the BBM field to be advertising BBML2
>>>> support normally and we would check for that as part of the cpufeature
>>>> detection. That's how Miko was doing it in v2, but Yang pointed out that
>>>> AmpereOne, which supports BBML2+NOABORT semantics, doesn't actually advertise
>>>> BBML2 in its MMFR2. So we don't want to check that field, and instead rely
>>>> solely on the MIDR allow-list + a command line override. It was me that
>>>> suggested putting that in the SW feature register, and I think that still
>>>> sounds
>>>> like the right solution for this situation?
>>>
>>> I think this is mixing two different things:
>>>
>>> - preventing BBM-L2 from being visible to the kernel: this is what my
>>>    suggestion is doing by nuking an architectural feature in the
>>>    relevant register
>>>
>>> - random HW not correctly advertising what they are doing: this is an
>>>    erratum workaround
>>>
>>> I'd rather we don't conflate the two things, and make them very
>>> explicitly distinct.
>>
>> It all sounds so obvious when you put it like that! :)
>>
>> I'm guessing there is a layer where the workaround can be applied to the
>> sanitised feature registers on a per-cpu basis and that won't affect this global
>> override which will remain as an overlay on top? If so then that sounds perfect
>> (you can probably tell I find the whole feature management framework rather
>> inpeneterable). That workaround would be added as part of Yang's series anyway.
> 
> Unfortunately, there is no easy way to fix this via the normal erratum
> workaround "capability". The sanitised feature registers are handled
> separately (initialised via init_cpu_features() for boot CPU and
> sanitised eachtime via update_cpu_features).
> 
> Also we do not "enable" any capability (i.e. calling cpu_enable())
> until the very end, after the CPUs are all brought up (except for boot CPUs).
> 
> But it may be possible to "fix up" the BBML2 feature in
> cpuinfo_store_*cpu(), without using the "enable" call back.
> 
> Something like:
> 
> diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
> index 285d7d538342..8c23adbe29f8 100644
> --- a/arch/arm64/kernel/cpuinfo.c
> +++ b/arch/arm64/kernel/cpuinfo.c
> @@ -479,7 +479,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
>         info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
>         info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
>         info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
> 
> +    /*
> +     * The CPU cap is not detected system wide, but we are able to
> +     * check if this CPU is affected by the Erratum.
> +     */
> +       if (this_cpu_has_cap(AMPERE_ONE_ERRATUM_BBML2))
> +               // Fixup  info->reg_id_aa64_mmfr2 with BBML2.
> +
>         info->reg_id_aa64mmfr3 = read_cpuid(ID_AA64MMFR3_EL1);
>         info->reg_id_aa64mmfr4 = read_cpuid(ID_AA64MMFR4_EL1);
>         info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
>         info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
> 

This is the type of thing I was imagining, thanks!

> Suzuki
> 
>>
>> So sounds like we are back to testing MMFR2.BBM in the matches function, with
>> the addition of Maz's proposal above. Sorry for sending you round the houses,
>> Miko.
>>
>> Thanks,
>> Ryan
>>
>>>
>>> Thanks,
>>>
>>>     M.
>>>
>>
>
diff mbox series

Patch

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fb8752b42ec8..3e4cc917a07e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -453,6 +453,9 @@ 
 	arm64.no32bit_el0 [ARM64] Unconditionally disable the execution of
 			32 bit applications.
 
+	arm64.nobbml2	[ARM64] Unconditionally disable Break-Before-Make Level
+			2 support
+
 	arm64.nobti	[ARM64] Unconditionally disable Branch Target
 			Identification support
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 940343beb3d4..49deda2b22ae 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2057,6 +2057,17 @@  config ARM64_TLB_RANGE
 	  The feature introduces new assembly instructions, and they were
 	  support when binutils >= 2.30.
 
+config ARM64_BBML2_NOABORT
+	bool "Enable support for Break-Before-Make Level 2 detection and usage"
+	default y
+	help
+	  FEAT_BBM provides detection of support levels for break-before-make
+	  sequences. If BBM level 2 is supported, some TLB maintenance requirements
+	  can be relaxed to improve performance. We additonally require the
+	  property that the implementation cannot ever raise TLB Conflict Aborts.
+	  Selecting N causes the kernel to fallback to BBM level 0 behaviour
+	  even if the system supports BBM level 2.
+
 endmenu # "ARMv8.4 architectural features"
 
 menu "ARMv8.5 architectural features"
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 0b5ca6e0eb09..2d6db33d4e45 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -23,6 +23,8 @@  cpucap_is_possible(const unsigned int cap)
 		return IS_ENABLED(CONFIG_ARM64_PAN);
 	case ARM64_HAS_EPAN:
 		return IS_ENABLED(CONFIG_ARM64_EPAN);
+	case ARM64_HAS_BBML2_NOABORT:
+		return IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT);
 	case ARM64_SVE:
 		return IS_ENABLED(CONFIG_ARM64_SVE);
 	case ARM64_SME:
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e0e4478f5fb5..7f5b220dacde 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -18,6 +18,7 @@ 
 #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR	0
 #define ARM64_SW_FEATURE_OVERRIDE_HVHE		4
 #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF	8
+#define ARM64_SW_FEATURE_OVERRIDE_NOBBML2	12
 
 #ifndef __ASSEMBLY__
 
@@ -866,6 +867,11 @@  static __always_inline bool system_supports_mpam_hcr(void)
 	return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
 }
 
+static inline bool system_supports_bbml2_noabort(void)
+{
+	return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
+}
+
 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
 bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
 
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d561cf3b8ac7..b936e0805161 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2176,6 +2176,76 @@  static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
 	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
 }
 
+static inline bool bbml2_possible(void)
+{
+	return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOBBML2);
+}
+
+static bool cpu_has_bbml2_noabort(unsigned int cpu_midr)
+{
+	/* We want to allow usage of bbml2 in as wide a range of kernel contexts
+	 * as possible. This list is therefore an allow-list of known-good
+	 * implementations that both support bbml2 and additionally, fulfill the
+	 * extra constraint of never generating TLB conflict aborts when using
+	 * the relaxed bbml2 semantics (such aborts make use of bbml2 in certain
+	 * kernel contexts difficult to prove safe against recursive aborts).
+	 *
+	 * Note that implementations can only be considered "known-good" if their
+	 * implementors attest to the fact that the implementation never raises
+	 * TLBI conflict aborts for bbml2 mapping granularity changes.
+	 */
+	static const struct midr_range supports_bbml2_noabort_list[] = {
+		MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
+		MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
+		{}
+	};
+
+	return is_midr_in_range_list(cpu_midr, supports_bbml2_noabort_list);
+}
+
+static inline unsigned int __cpu_read_midr(int cpu)
+{
+	WARN_ON_ONCE(!cpu_online(cpu));
+
+	return per_cpu(cpu_data, cpu).reg_midr;
+}
+
+static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
+{
+	if (!IS_ENABLED(CONFIG_ARM64_BBML2_NOABORT))
+		return false;
+
+	if (!bbml2_possible())
+		return false;
+
+	if (scope & SCOPE_SYSTEM) {
+		int cpu;
+
+		/* We are a boot CPU, and must verify that all enumerated boot
+		 * CPUs have MIDR values within our allowlist. Otherwise, we do
+		 * not allow the BBML2 feature to avoid potential faults when
+		 * the insufficient CPUs access memory regions using BBML2
+		 * semantics.
+		 */
+		for_each_online_cpu(cpu) {
+			if (!cpu_has_bbml2_noabort(__cpu_read_midr(cpu)))
+				return false;
+		}
+
+		return true;
+	} else if (scope & SCOPE_LOCAL_CPU) {
+		/* We are a hot-plugged CPU, so only need to check our MIDR.
+		 * If we have the correct MIDR, but the kernel booted on an
+		 * insufficient CPU, we will not use BBML2 (this is safe). If
+		 * we have an incorrect MIDR, but the kernel booted on a
+		 * sufficient CPU, we will not bring up this CPU.
+		 */
+		return cpu_has_bbml2_noabort(read_cpuid_id());
+	}
+
+	return false;
+}
+
 #ifdef CONFIG_ARM64_PAN
 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
 {
@@ -2926,6 +2996,12 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.matches = has_cpuid_feature,
 		ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
 	},
+	{
+		.desc = "BBM Level 2 without conflict abort",
+		.capability = ARM64_HAS_BBML2_NOABORT,
+		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.matches = has_bbml2_noabort,
+	},
 	{
 		.desc = "52-bit Virtual Addressing for KVM (LPA2)",
 		.capability = ARM64_HAS_LPA2,
diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
index c6b185b885f7..9728faa10390 100644
--- a/arch/arm64/kernel/pi/idreg-override.c
+++ b/arch/arm64/kernel/pi/idreg-override.c
@@ -209,6 +209,7 @@  static const struct ftr_set_desc sw_features __prel64_initconst = {
 		FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
 		FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
 		FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
+		FIELD("nobbml2", ARM64_SW_FEATURE_OVERRIDE_NOBBML2, NULL),
 		{}
 	},
 };
@@ -246,6 +247,7 @@  static const struct {
 	{ "rodata=off",			"arm64_sw.rodataoff=1" },
 	{ "arm64.nolva",		"id_aa64mmfr2.varange=0" },
 	{ "arm64.no32bit_el0",		"id_aa64pfr0.el0=1" },
+	{ "arm64.nobbml2",		"arm64_sw.nobbml2=1" },
 };
 
 static int __init parse_hexdigit(const char *p, u64 *v)
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 1e65f2fb45bd..b03a375e5507 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -14,6 +14,7 @@  HAS_ADDRESS_AUTH_ARCH_QARMA5
 HAS_ADDRESS_AUTH_IMP_DEF
 HAS_AMU_EXTN
 HAS_ARMv8_4_TTL
+HAS_BBML2_NOABORT
 HAS_CACHE_DIC
 HAS_CACHE_IDC
 HAS_CNP