diff mbox series

[RFC/RFT,01/16] arm64: cpufeature: Detect SSBS and advertise to userspace

Message ID 20191004120430.11929-2-ard.biesheuvel@linaro.org (mailing list archive)
State New, archived
Headers show
Series arm64: backport SSBS handling to v4.19-stable | expand

Commit Message

Ard Biesheuvel Oct. 4, 2019, 12:04 p.m. UTC
From: Will Deacon <will.deacon@arm.com>

Armv8.5 introduces a new PSTATE bit known as Speculative Store Bypass
Safe (SSBS) which can be used as a mitigation against Spectre variant 4.

Additionally, a CPU may provide instructions to manipulate PSTATE.SSBS
directly, so that userspace can toggle the SSBS control without trapping
to the kernel.

This patch probes for the existence of SSBS and advertise the new instructions
to userspace if they exist.

Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
(cherry picked from commit d71be2b6c0e19180b5f80a6d42039cc074a693a2)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
 arch/arm64/include/asm/cpucaps.h    |  3 ++-
 arch/arm64/include/asm/sysreg.h     | 16 ++++++++++++----
 arch/arm64/include/uapi/asm/hwcap.h |  1 +
 arch/arm64/kernel/cpufeature.c      | 19 +++++++++++++++++--
 arch/arm64/kernel/cpuinfo.c         |  1 +
 5 files changed, 33 insertions(+), 7 deletions(-)

Comments

Mark Rutland Oct. 8, 2019, 2:35 p.m. UTC | #1
Hi Ard, 

I have a few general backport notes below, and one issue with this
patch/series.

On Fri, Oct 04, 2019 at 02:04:15PM +0200, Ard Biesheuvel wrote:
> From: Will Deacon <will.deacon@arm.com>
> 
> Armv8.5 introduces a new PSTATE bit known as Speculative Store Bypass
> Safe (SSBS) which can be used as a mitigation against Spectre variant 4.
> 
> Additionally, a CPU may provide instructions to manipulate PSTATE.SSBS
> directly, so that userspace can toggle the SSBS control without trapping
> to the kernel.
> 
> This patch probes for the existence of SSBS and advertise the new instructions
> to userspace if they exist.
> 
> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
> Signed-off-by: Will Deacon <will.deacon@arm.com>
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> (cherry picked from commit d71be2b6c0e19180b5f80a6d42039cc074a693a2)

This should be the first line, formatted like:

commit d71be2b6c0e19180b5f80a6d42039cc074a693a2 upstream.

... as documented in Documentation/process/stable-kernel-rules.rst.
I see that's sometimes not followed, but I think we should stick to that
unless there's a strong reason not to.

If you had to make any substantial changes, I'd recommend a note above
your S-o-B, e.g

[v4.9: Renamed foo() to bar() to match xxyyzz()]

... and regardless I usually add a [vX.Y backport] trailing my S-o-B to
make it clear from the log where that happened.

For this patch specifically, I believe that you also need to backport
commit:

  f54dada8274643e3 ("arm64: fix SSBS sanitization")

... which fixes the SSBS bit being cleared across sigreturns.

Otherwise, this patch looks good to me.

Thanks,
Mark.


> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
>  arch/arm64/include/asm/cpucaps.h    |  3 ++-
>  arch/arm64/include/asm/sysreg.h     | 16 ++++++++++++----
>  arch/arm64/include/uapi/asm/hwcap.h |  1 +
>  arch/arm64/kernel/cpufeature.c      | 19 +++++++++++++++++--
>  arch/arm64/kernel/cpuinfo.c         |  1 +
>  5 files changed, 33 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> index 25ce9056cf64..c3de0bbf0e9a 100644
> --- a/arch/arm64/include/asm/cpucaps.h
> +++ b/arch/arm64/include/asm/cpucaps.h
> @@ -52,7 +52,8 @@
>  #define ARM64_MISMATCHED_CACHE_TYPE		31
>  #define ARM64_HAS_STAGE2_FWB			32
>  #define ARM64_WORKAROUND_1463225		33
> +#define ARM64_SSBS				34
>  
> -#define ARM64_NCAPS				34
> +#define ARM64_NCAPS				35
>  
>  #endif /* __ASM_CPUCAPS_H */
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index c1470931b897..2fc6242baf11 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -419,6 +419,7 @@
>  #define SYS_ICH_LR15_EL2		__SYS__LR8_EL2(7)
>  
>  /* Common SCTLR_ELx flags. */
> +#define SCTLR_ELx_DSSBS	(1UL << 44)
>  #define SCTLR_ELx_EE    (1 << 25)
>  #define SCTLR_ELx_IESB	(1 << 21)
>  #define SCTLR_ELx_WXN	(1 << 19)
> @@ -439,7 +440,7 @@
>  			 (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
>  			 (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
>  			 (1 << 27) | (1 << 30) | (1 << 31) | \
> -			 (0xffffffffUL << 32))
> +			 (0xffffefffUL << 32))
>  
>  #ifdef CONFIG_CPU_BIG_ENDIAN
>  #define ENDIAN_SET_EL2		SCTLR_ELx_EE
> @@ -453,7 +454,7 @@
>  #define SCTLR_EL2_SET	(SCTLR_ELx_IESB   | ENDIAN_SET_EL2   | SCTLR_EL2_RES1)
>  #define SCTLR_EL2_CLEAR	(SCTLR_ELx_M      | SCTLR_ELx_A    | SCTLR_ELx_C   | \
>  			 SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
> -			 ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
> +			 SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
>  
>  #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
>  #error "Inconsistent SCTLR_EL2 set/clear bits"
> @@ -477,7 +478,7 @@
>  			 (1 << 29))
>  #define SCTLR_EL1_RES0  ((1 << 6)  | (1 << 10) | (1 << 13) | (1 << 17) | \
>  			 (1 << 27) | (1 << 30) | (1 << 31) | \
> -			 (0xffffffffUL << 32))
> +			 (0xffffefffUL << 32))
>  
>  #ifdef CONFIG_CPU_BIG_ENDIAN
>  #define ENDIAN_SET_EL1		(SCTLR_EL1_E0E | SCTLR_ELx_EE)
> @@ -494,7 +495,7 @@
>  			 ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
>  #define SCTLR_EL1_CLEAR	(SCTLR_ELx_A   | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD    |\
>  			 SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
> -			 SCTLR_EL1_RES0)
> +			 SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
>  
>  #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
>  #error "Inconsistent SCTLR_EL1 set/clear bits"
> @@ -544,6 +545,13 @@
>  #define ID_AA64PFR0_EL0_64BIT_ONLY	0x1
>  #define ID_AA64PFR0_EL0_32BIT_64BIT	0x2
>  
> +/* id_aa64pfr1 */
> +#define ID_AA64PFR1_SSBS_SHIFT		4
> +
> +#define ID_AA64PFR1_SSBS_PSTATE_NI	0
> +#define ID_AA64PFR1_SSBS_PSTATE_ONLY	1
> +#define ID_AA64PFR1_SSBS_PSTATE_INSNS	2
> +
>  /* id_aa64mmfr0 */
>  #define ID_AA64MMFR0_TGRAN4_SHIFT	28
>  #define ID_AA64MMFR0_TGRAN64_SHIFT	24
> diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
> index 17c65c8f33cb..2bcd6e4f3474 100644
> --- a/arch/arm64/include/uapi/asm/hwcap.h
> +++ b/arch/arm64/include/uapi/asm/hwcap.h
> @@ -48,5 +48,6 @@
>  #define HWCAP_USCAT		(1 << 25)
>  #define HWCAP_ILRCPC		(1 << 26)
>  #define HWCAP_FLAGM		(1 << 27)
> +#define HWCAP_SSBS		(1 << 28)
>  
>  #endif /* _UAPI__ASM_HWCAP_H */
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 859d63cc99a3..58146d636a83 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -164,6 +164,11 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
>  	ARM64_FTR_END,
>  };
>  
> +static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
> +	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
> +	ARM64_FTR_END,
> +};
> +
>  static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
>  	/*
>  	 * We already refuse to boot CPUs that don't support our configured
> @@ -379,7 +384,7 @@ static const struct __ftr_reg_entry {
>  
>  	/* Op1 = 0, CRn = 0, CRm = 4 */
>  	ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
> -	ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
> +	ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
>  	ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
>  
>  	/* Op1 = 0, CRn = 0, CRm = 5 */
> @@ -669,7 +674,6 @@ void update_cpu_features(int cpu,
>  
>  	/*
>  	 * EL3 is not our concern.
> -	 * ID_AA64PFR1 is currently RES0.
>  	 */
>  	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
>  				      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
> @@ -1254,6 +1258,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.cpu_enable = cpu_enable_hw_dbm,
>  	},
>  #endif
> +	{
> +		.desc = "Speculative Store Bypassing Safe (SSBS)",
> +		.capability = ARM64_SSBS,
> +		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
> +		.matches = has_cpuid_feature,
> +		.sys_reg = SYS_ID_AA64PFR1_EL1,
> +		.field_pos = ID_AA64PFR1_SSBS_SHIFT,
> +		.sign = FTR_UNSIGNED,
> +		.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
> +	},
>  	{},
>  };
>  
> @@ -1299,6 +1313,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
>  #ifdef CONFIG_ARM64_SVE
>  	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
>  #endif
> +	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
>  	{},
>  };
>  
> diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
> index e9ab7b3ed317..dce971f2c167 100644
> --- a/arch/arm64/kernel/cpuinfo.c
> +++ b/arch/arm64/kernel/cpuinfo.c
> @@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
>  	"uscat",
>  	"ilrcpc",
>  	"flagm",
> +	"ssbs",
>  	NULL
>  };
>  
> -- 
> 2.20.1
>
Ard Biesheuvel Oct. 8, 2019, 2:39 p.m. UTC | #2
On Tue, 8 Oct 2019 at 16:35, Mark Rutland <mark.rutland@arm.com> wrote:
>
> Hi Ard,
>
> I have a few general backport notes below, and one issue with this
> patch/series.
>
> On Fri, Oct 04, 2019 at 02:04:15PM +0200, Ard Biesheuvel wrote:
> > From: Will Deacon <will.deacon@arm.com>
> >
> > Armv8.5 introduces a new PSTATE bit known as Speculative Store Bypass
> > Safe (SSBS) which can be used as a mitigation against Spectre variant 4.
> >
> > Additionally, a CPU may provide instructions to manipulate PSTATE.SSBS
> > directly, so that userspace can toggle the SSBS control without trapping
> > to the kernel.
> >
> > This patch probes for the existence of SSBS and advertise the new instructions
> > to userspace if they exist.
> >
> > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
> > Signed-off-by: Will Deacon <will.deacon@arm.com>
> > Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> > (cherry picked from commit d71be2b6c0e19180b5f80a6d42039cc074a693a2)
>
> This should be the first line, formatted like:
>
> commit d71be2b6c0e19180b5f80a6d42039cc074a693a2 upstream.
>
> ... as documented in Documentation/process/stable-kernel-rules.rst.
> I see that's sometimes not followed, but I think we should stick to that
> unless there's a strong reason not to.
>

Indeed, but I have adopted the

[ Upstream commit xxx ]

style in the meantime in [0] (and [1])


> If you had to make any substantial changes, I'd recommend a note above
> your S-o-B, e.g
>
> [v4.9: Renamed foo() to bar() to match xxyyzz()]
>

Sure.

> ... and regardless I usually add a [vX.Y backport] trailing my S-o-B to
> make it clear from the log where that happened.
>
> For this patch specifically, I believe that you also need to backport
> commit:
>
>   f54dada8274643e3 ("arm64: fix SSBS sanitization")
>
> ... which fixes the SSBS bit being cleared across sigreturns.
>

Coming up in 05/16


> Otherwise, this patch looks good to me.
>


Thanks.


[0] https://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git/log/?h=ssbs-v4.19-stable
[1] https://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git/log/?h=ssbs-v4.14-stable
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 25ce9056cf64..c3de0bbf0e9a 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -52,7 +52,8 @@ 
 #define ARM64_MISMATCHED_CACHE_TYPE		31
 #define ARM64_HAS_STAGE2_FWB			32
 #define ARM64_WORKAROUND_1463225		33
+#define ARM64_SSBS				34
 
-#define ARM64_NCAPS				34
+#define ARM64_NCAPS				35
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index c1470931b897..2fc6242baf11 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -419,6 +419,7 @@ 
 #define SYS_ICH_LR15_EL2		__SYS__LR8_EL2(7)
 
 /* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_DSSBS	(1UL << 44)
 #define SCTLR_ELx_EE    (1 << 25)
 #define SCTLR_ELx_IESB	(1 << 21)
 #define SCTLR_ELx_WXN	(1 << 19)
@@ -439,7 +440,7 @@ 
 			 (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
 			 (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
 			 (1 << 27) | (1 << 30) | (1 << 31) | \
-			 (0xffffffffUL << 32))
+			 (0xffffefffUL << 32))
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #define ENDIAN_SET_EL2		SCTLR_ELx_EE
@@ -453,7 +454,7 @@ 
 #define SCTLR_EL2_SET	(SCTLR_ELx_IESB   | ENDIAN_SET_EL2   | SCTLR_EL2_RES1)
 #define SCTLR_EL2_CLEAR	(SCTLR_ELx_M      | SCTLR_ELx_A    | SCTLR_ELx_C   | \
 			 SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
-			 ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+			 SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
 
 #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
 #error "Inconsistent SCTLR_EL2 set/clear bits"
@@ -477,7 +478,7 @@ 
 			 (1 << 29))
 #define SCTLR_EL1_RES0  ((1 << 6)  | (1 << 10) | (1 << 13) | (1 << 17) | \
 			 (1 << 27) | (1 << 30) | (1 << 31) | \
-			 (0xffffffffUL << 32))
+			 (0xffffefffUL << 32))
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #define ENDIAN_SET_EL1		(SCTLR_EL1_E0E | SCTLR_ELx_EE)
@@ -494,7 +495,7 @@ 
 			 ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
 #define SCTLR_EL1_CLEAR	(SCTLR_ELx_A   | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD    |\
 			 SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
-			 SCTLR_EL1_RES0)
+			 SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
 
 #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
 #error "Inconsistent SCTLR_EL1 set/clear bits"
@@ -544,6 +545,13 @@ 
 #define ID_AA64PFR0_EL0_64BIT_ONLY	0x1
 #define ID_AA64PFR0_EL0_32BIT_64BIT	0x2
 
+/* id_aa64pfr1 */
+#define ID_AA64PFR1_SSBS_SHIFT		4
+
+#define ID_AA64PFR1_SSBS_PSTATE_NI	0
+#define ID_AA64PFR1_SSBS_PSTATE_ONLY	1
+#define ID_AA64PFR1_SSBS_PSTATE_INSNS	2
+
 /* id_aa64mmfr0 */
 #define ID_AA64MMFR0_TGRAN4_SHIFT	28
 #define ID_AA64MMFR0_TGRAN64_SHIFT	24
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 17c65c8f33cb..2bcd6e4f3474 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -48,5 +48,6 @@ 
 #define HWCAP_USCAT		(1 << 25)
 #define HWCAP_ILRCPC		(1 << 26)
 #define HWCAP_FLAGM		(1 << 27)
+#define HWCAP_SSBS		(1 << 28)
 
 #endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 859d63cc99a3..58146d636a83 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -164,6 +164,11 @@  static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
 	ARM64_FTR_END,
 };
 
+static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+	ARM64_FTR_END,
+};
+
 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
 	/*
 	 * We already refuse to boot CPUs that don't support our configured
@@ -379,7 +384,7 @@  static const struct __ftr_reg_entry {
 
 	/* Op1 = 0, CRn = 0, CRm = 4 */
 	ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
-	ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
+	ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
 	ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
 
 	/* Op1 = 0, CRn = 0, CRm = 5 */
@@ -669,7 +674,6 @@  void update_cpu_features(int cpu,
 
 	/*
 	 * EL3 is not our concern.
-	 * ID_AA64PFR1 is currently RES0.
 	 */
 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
 				      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
@@ -1254,6 +1258,16 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.cpu_enable = cpu_enable_hw_dbm,
 	},
 #endif
+	{
+		.desc = "Speculative Store Bypassing Safe (SSBS)",
+		.capability = ARM64_SSBS,
+		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64PFR1_EL1,
+		.field_pos = ID_AA64PFR1_SSBS_SHIFT,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
+	},
 	{},
 };
 
@@ -1299,6 +1313,7 @@  static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
 #ifdef CONFIG_ARM64_SVE
 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
 #endif
+	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
 	{},
 };
 
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index e9ab7b3ed317..dce971f2c167 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -81,6 +81,7 @@  static const char *const hwcap_str[] = {
 	"uscat",
 	"ilrcpc",
 	"flagm",
+	"ssbs",
 	NULL
 };