@@ -1508,15 +1508,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
- /*
- * Nothing required here.
- *
- * In case of arm64, we rely on the firmware mitigation of
- * Speculative Store Bypass as controlled via the ssbd kernel
- * parameter. Whenever the mitigation is enabled, it works
- * for all of the kernel code with no need to provide any
- * additional instructions.
- */
+ /* TODO: emit(A64_SB) */
break;
/* ST: *(size *)(dst + off) = imm */
@@ -2423,7 +2423,19 @@ static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
- return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
+#ifdef ARM64
+ /* In case of arm64, we rely on the firmware mitigation of Speculative
+ * Store Bypass as controlled via the ssbd kernel parameter. Whenever
+ * the mitigation is enabled, it works for all of the kernel code with
+ * no need to provide any additional instructions. Therefore, skip
+ * inserting nospec insns against Spectre v4 if arm64
+ * spectre_v4_mitigations_on/dynamic() is true.
+ */
+ bool spec_v4 = arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE;
+#else
+ bool spec_v4 = true;
+#endif
+ return !spec_v4 || cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
@@ -561,7 +561,7 @@ struct bpf_insn_aux_data {
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
- bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
+ bool nospec_result; /* ensure following insns from executing speculatively */
bool zext_dst; /* this insn zero extends dst reg */
bool needs_zext; /* alu op needs to clear upper bits */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
@@ -4904,7 +4904,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
}
if (sanitize)
- env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
+ env->insn_aux_data[insn_idx].nospec_result = true;
}
err = destroy_if_dynptr_stack_slot(env, state, spi);
@@ -20445,7 +20445,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
if (type == BPF_WRITE &&
- env->insn_aux_data[i + delta].sanitize_stack_spill) {
+ env->insn_aux_data[i + delta].nospec_result) {
struct bpf_insn patch[] = {
*insn,
BPF_ST_NOSPEC(),