@@ -2011,8 +2011,10 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int err;
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
- if (!elem)
- goto err;
+ if (!elem) {
+ err = -ENOMEM;
+ goto unrecoverable_err;
+ }
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
@@ -2022,12 +2024,19 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
env->stack_size++;
err = copy_verifier_state(&elem->st, cur);
if (err)
- goto err;
+ goto unrecoverable_err;
elem->st.speculative |= speculative;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
verbose(env, "The sequence of %d jumps is too complex.\n",
env->stack_size);
- goto err;
+ /* Do not return -EINVAL to prevent main loop from trying to
+ * mitigate this using nospec if we are on a speculative path.
+ * If it was tried anyway, we would encounter an -ENOMEM (from
+ * which we can not recover) again shortly on the next
+ * non-speculative path that has to be checked.
+ */
+ err = -ENOMEM;
+ goto unrecoverable_err;
}
if (elem->st.parent) {
++elem->st.parent->branches;
@@ -2042,12 +2051,14 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
*/
}
return &elem->st;
-err:
+unrecoverable_err:
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
/* pop all elements and return */
while (!pop_stack(env, NULL, NULL, false));
- return NULL;
+ WARN_ON_ONCE(err >= 0);
+ WARN_ON_ONCE(error_recoverable_with_nospec(err));
+ return ERR_PTR(err);
}
#define CALLER_SAVED_REGS 6
@@ -8856,8 +8867,8 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
prev_st = find_prev_entry(env, cur_st->parent, insn_idx);
/* branch out active iter state */
queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
- if (!queued_st)
- return -ENOMEM;
+ if (IS_ERR(queued_st))
+ return PTR_ERR(queued_st);
queued_iter = get_iter_from_state(queued_st, meta);
queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
@@ -10440,8 +10451,8 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
* proceed with next instruction within current frame.
*/
callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false);
- if (!callback_state)
- return -ENOMEM;
+ if (IS_ERR(callback_state))
+ return PTR_ERR(callback_state);
err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb,
callback_state);
@@ -13892,7 +13903,7 @@ sanitize_speculative_path(struct bpf_verifier_env *env,
struct bpf_reg_state *regs;
branch = push_stack(env, next_idx, curr_idx, true);
- if (branch && insn) {
+ if (!IS_ERR(branch) && insn) {
regs = branch->frame[branch->curframe]->regs;
if (BPF_SRC(insn->code) == BPF_K) {
mark_reg_unknown(env, regs, insn->dst_reg);
@@ -13920,7 +13931,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
struct bpf_reg_state tmp;
- bool ret;
+ struct bpf_verifier_state *branch;
int err;
if (can_skip_alu_sanitation(env, insn))
@@ -13993,11 +14004,11 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
tmp = *dst_reg;
copy_register_state(dst_reg, ptr_reg);
}
- ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
- env->insn_idx);
- if (!ptr_is_dst_reg && ret)
+ branch = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+ env->insn_idx);
+ if (!ptr_is_dst_reg && !IS_ERR(branch))
*dst_reg = tmp;
- return !ret ? REASON_STACK : 0;
+ return IS_ERR(branch) ? REASON_STACK : 0;
}
static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
@@ -16246,8 +16257,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
/* branch out 'fallthrough' insn as a new state to explore */
queued_st = push_stack(env, idx + 1, idx, false);
- if (!queued_st)
- return -ENOMEM;
+ if (IS_ERR(queued_st))
+ return PTR_ERR(queued_st);
queued_st->may_goto_depth++;
if (prev_st)
@@ -16311,10 +16322,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
* the fall-through branch for simulation under speculative
* execution.
*/
- if (!env->bypass_spec_v1 &&
- !sanitize_speculative_path(env, insn, *insn_idx + 1,
- *insn_idx))
- return -EFAULT;
+ if (!env->bypass_spec_v1) {
+ struct bpf_verifier_state *branch = sanitize_speculative_path(
+ env, insn, *insn_idx + 1, *insn_idx);
+ if (IS_ERR(branch))
+ return PTR_ERR(branch);
+ }
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch, this_branch->curframe);
*insn_idx += insn->off;
@@ -16324,11 +16337,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
* program will go. If needed, push the goto branch for
* simulation under speculative execution.
*/
- if (!env->bypass_spec_v1 &&
- !sanitize_speculative_path(env, insn,
- *insn_idx + insn->off + 1,
- *insn_idx))
- return -EFAULT;
+ if (!env->bypass_spec_v1) {
+ struct bpf_verifier_state *branch = sanitize_speculative_path(
+ env, insn, *insn_idx + insn->off + 1, *insn_idx);
+ if (IS_ERR(branch))
+ return PTR_ERR(branch);
+ }
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch, this_branch->curframe);
return 0;
@@ -16351,8 +16365,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
false);
- if (!other_branch)
- return -EFAULT;
+ if (IS_ERR(other_branch))
+ return PTR_ERR(other_branch);
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
if (BPF_SRC(insn->code) == BPF_X) {