diff mbox series

[bpf-next,2/3] bpf: refactor checks for range computation

Message ID 20240411173732.221881-2-cupertino.miranda@oracle.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series [bpf-next,1/3] bpf: fix to XOR and OR range computation | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/series_format warning Series does not have a cover letter
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 955 this patch: 955
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 10 maintainers not CCed: john.fastabend@gmail.com jolsa@kernel.org sdf@google.com kpsingh@kernel.org daniel@iogearbox.net martin.lau@linux.dev haoluo@google.com andrii@kernel.org eddyz87@gmail.com song@kernel.org
netdev/build_clang success Errors and warnings before: 955 this patch: 955
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 966 this patch: 966
netdev/checkpatch warning CHECK: Using comparison to false is error prone WARNING: Missing a blank line after declarations WARNING: line length of 83 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc

Commit Message

Cupertino Miranda April 11, 2024, 5:37 p.m. UTC
Split range computation checks in its own function, isolating pessimitic
range set for dst_reg and failing return to a single point.

Signed-off-by: Cupertino Miranda <cupertino.miranda@oracle.com>
---
 kernel/bpf/verifier.c | 141 +++++++++++++++++++++++-------------------
 1 file changed, 77 insertions(+), 64 deletions(-)

Comments

Yonghong Song April 15, 2024, 6:25 p.m. UTC | #1
On 4/11/24 10:37 AM, Cupertino Miranda wrote:
> Split range computation checks in its own function, isolating pessimitic
> range set for dst_reg and failing return to a single point.
>
> Signed-off-by: Cupertino Miranda <cupertino.miranda@oracle.com>
> ---
>   kernel/bpf/verifier.c | 141 +++++++++++++++++++++++-------------------
>   1 file changed, 77 insertions(+), 64 deletions(-)
>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index a219f601569a..7894af2e1bdb 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -13709,6 +13709,82 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
>   	__update_reg_bounds(dst_reg);
>   }
>   
> +static bool is_const_reg_and_valid(struct bpf_reg_state reg, bool alu32,
> +				   bool *valid)
> +{
> +	s64 smin_val = reg.smin_value;
> +	s64 smax_val = reg.smax_value;
> +	u64 umin_val = reg.umin_value;
> +	u64 umax_val = reg.umax_value;
> +
> +	s32 s32_min_val = reg.s32_min_value;
> +	s32 s32_max_val = reg.s32_max_value;
> +	u32 u32_min_val = reg.u32_min_value;
> +	u32 u32_max_val = reg.u32_max_value;
> +
> +	bool known = alu32 ? tnum_subreg_is_const(reg.var_off) :
> +			     tnum_is_const(reg.var_off);
> +
> +	if (alu32) {
> +		if ((known &&
> +		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
> +		      s32_min_val > s32_max_val || u32_min_val > u32_max_val)
> +			*valid &= false;

*valid = false;

> +	} else {
> +		if ((known &&
> +		     (smin_val != smax_val || umin_val != umax_val)) ||
> +		    smin_val > smax_val || umin_val > umax_val)
> +			*valid &= false;

*valid = false;

> +	}
> +
> +	return known;
> +}
> +
> +static bool is_safe_to_compute_dst_reg_ranges(struct bpf_insn *insn,
> +					      struct bpf_reg_state src_reg)
> +{
> +	bool src_known;
> +	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
> +	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
> +	u8 opcode = BPF_OP(insn->code);
> +
> +	bool valid_known = true;
> +	src_known = is_const_reg_and_valid(src_reg, alu32, &valid_known);
> +
> +	/* Taint dst register if offset had invalid bounds
> +	 * derived from e.g. dead branches.
> +	 */
> +	if (valid_known == false)
> +		return false;
> +
> +	switch (opcode) {
> +	case BPF_ADD:
> +	case BPF_SUB:
> +	case BPF_AND:
> +	case BPF_XOR:
> +	case BPF_OR:
> +		return true;
> +
> +	/* Compute range for MUL if the src_reg is known.
> +	 */
> +	case BPF_MUL:
> +		return src_known;
> +
> +	/* Shift operators range is only computable if shift dimension operand
> +	 * is known. Also, shifts greater than 31 or 63 are undefined. This
> +	 * includes shifts by a negative number.
> +	 */
> +	case BPF_LSH:
> +	case BPF_RSH:
> +	case BPF_ARSH:
> +		return src_known && (src_reg.umax_value < insn_bitness);
> +	default:
> +		break;
> +	}
> +
> +	return false;
> +}
> +
>   /* WARNING: This function does calculations on 64-bit values, but the actual
>    * execution may occur on 32-bit values. Therefore, things like bitshifts
>    * need extra checks in the 32-bit case.
> @@ -13720,52 +13796,10 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
>   {
>   	struct bpf_reg_state *regs = cur_regs(env);
>   	u8 opcode = BPF_OP(insn->code);
> -	bool src_known;
> -	s64 smin_val, smax_val;
> -	u64 umin_val, umax_val;
> -	s32 s32_min_val, s32_max_val;
> -	u32 u32_min_val, u32_max_val;
> -	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
>   	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
>   	int ret;
>   
> -	smin_val = src_reg.smin_value;
> -	smax_val = src_reg.smax_value;
> -	umin_val = src_reg.umin_value;
> -	umax_val = src_reg.umax_value;
> -
> -	s32_min_val = src_reg.s32_min_value;
> -	s32_max_val = src_reg.s32_max_value;
> -	u32_min_val = src_reg.u32_min_value;
> -	u32_max_val = src_reg.u32_max_value;
> -
> -	if (alu32) {
> -		src_known = tnum_subreg_is_const(src_reg.var_off);
> -		if ((src_known &&
> -		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
> -		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
> -			/* Taint dst register if offset had invalid bounds
> -			 * derived from e.g. dead branches.
> -			 */
> -			__mark_reg_unknown(env, dst_reg);
> -			return 0;
> -		}
> -	} else {
> -		src_known = tnum_is_const(src_reg.var_off);
> -		if ((src_known &&
> -		     (smin_val != smax_val || umin_val != umax_val)) ||
> -		    smin_val > smax_val || umin_val > umax_val) {
> -			/* Taint dst register if offset had invalid bounds
> -			 * derived from e.g. dead branches.
> -			 */
> -			__mark_reg_unknown(env, dst_reg);
> -			return 0;
> -		}
> -	}
> -
> -	if (!src_known &&
> -	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND &&
> -	    opcode != BPF_XOR && opcode != BPF_OR) {
> +	if (!is_safe_to_compute_dst_reg_ranges(insn, src_reg)) {
>   		__mark_reg_unknown(env, dst_reg);

This is not a precise refactoring. there are some cases like below
which uses mark_reg_unknow().

Let us put the refactoring patch as the first patch in the serious and all
additional changes after that and this will make it easy to review.

>   		return 0;
>   	}
> @@ -13822,39 +13856,18 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
>   		scalar_min_max_xor(dst_reg, &src_reg);
>   		break;
>   	case BPF_LSH:
> -		if (umax_val >= insn_bitness) {
> -			/* Shifts greater than 31 or 63 are undefined.
> -			 * This includes shifts by a negative number.
> -			 */
> -			mark_reg_unknown(env, regs, insn->dst_reg);
> -			break;
> -		}
>   		if (alu32)
>   			scalar32_min_max_lsh(dst_reg, &src_reg);
>   		else
>   			scalar_min_max_lsh(dst_reg, &src_reg);
>   		break;
>   	case BPF_RSH:
> -		if (umax_val >= insn_bitness) {
> -			/* Shifts greater than 31 or 63 are undefined.
> -			 * This includes shifts by a negative number.
> -			 */
> -			mark_reg_unknown(env, regs, insn->dst_reg);
> -			break;
> -		}
>   		if (alu32)
>   			scalar32_min_max_rsh(dst_reg, &src_reg);
>   		else
>   			scalar_min_max_rsh(dst_reg, &src_reg);
>   		break;
>   	case BPF_ARSH:
> -		if (umax_val >= insn_bitness) {
> -			/* Shifts greater than 31 or 63 are undefined.
> -			 * This includes shifts by a negative number.
> -			 */
> -			mark_reg_unknown(env, regs, insn->dst_reg);
> -			break;
> -		}
>   		if (alu32)
>   			scalar32_min_max_arsh(dst_reg, &src_reg);
>   		else
Cupertino Miranda April 16, 2024, 4:12 p.m. UTC | #2
Yonghong Song writes:

> On 4/11/24 10:37 AM, Cupertino Miranda wrote:
>> Split range computation checks in its own function, isolating pessimitic
>> range set for dst_reg and failing return to a single point.
>>
>> Signed-off-by: Cupertino Miranda <cupertino.miranda@oracle.com>
>> ---
>>   kernel/bpf/verifier.c | 141 +++++++++++++++++++++++-------------------
>>   1 file changed, 77 insertions(+), 64 deletions(-)
>>
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index a219f601569a..7894af2e1bdb 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -13709,6 +13709,82 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
>>   	__update_reg_bounds(dst_reg);
>>   }
>>   +static bool is_const_reg_and_valid(struct bpf_reg_state reg, bool alu32,
>> +				   bool *valid)
>> +{
>> +	s64 smin_val = reg.smin_value;
>> +	s64 smax_val = reg.smax_value;
>> +	u64 umin_val = reg.umin_value;
>> +	u64 umax_val = reg.umax_value;
>> +
>> +	s32 s32_min_val = reg.s32_min_value;
>> +	s32 s32_max_val = reg.s32_max_value;
>> +	u32 u32_min_val = reg.u32_min_value;
>> +	u32 u32_max_val = reg.u32_max_value;
>> +
>> +	bool known = alu32 ? tnum_subreg_is_const(reg.var_off) :
>> +			     tnum_is_const(reg.var_off);
>> +
>> +	if (alu32) {
>> +		if ((known &&
>> +		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
>> +		      s32_min_val > s32_max_val || u32_min_val > u32_max_val)
>> +			*valid &= false;
>
> *valid = false;
>
>> +	} else {
>> +		if ((known &&
>> +		     (smin_val != smax_val || umin_val != umax_val)) ||
>> +		    smin_val > smax_val || umin_val > umax_val)
>> +			*valid &= false;
>
> *valid = false;
>
>> +	}
>> +
>> +	return known;
>> +}
>> +
>> +static bool is_safe_to_compute_dst_reg_ranges(struct bpf_insn *insn,
>> +					      struct bpf_reg_state src_reg)
>> +{
>> +	bool src_known;
>> +	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
>> +	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
>> +	u8 opcode = BPF_OP(insn->code);
>> +
>> +	bool valid_known = true;
>> +	src_known = is_const_reg_and_valid(src_reg, alu32, &valid_known);
>> +
>> +	/* Taint dst register if offset had invalid bounds
>> +	 * derived from e.g. dead branches.
>> +	 */
>> +	if (valid_known == false)
>> +		return false;
>> +
>> +	switch (opcode) {
>> +	case BPF_ADD:
>> +	case BPF_SUB:
>> +	case BPF_AND:
>> +	case BPF_XOR:
>> +	case BPF_OR:
>> +		return true;
>> +
>> +	/* Compute range for MUL if the src_reg is known.
>> +	 */
>> +	case BPF_MUL:
>> +		return src_known;
>> +
>> +	/* Shift operators range is only computable if shift dimension operand
>> +	 * is known. Also, shifts greater than 31 or 63 are undefined. This
>> +	 * includes shifts by a negative number.
>> +	 */
>> +	case BPF_LSH:
>> +	case BPF_RSH:
>> +	case BPF_ARSH:
>> +		return src_known && (src_reg.umax_value < insn_bitness);
>> +	default:
>> +		break;
>> +	}
>> +
>> +	return false;
>> +}
>> +
>>   /* WARNING: This function does calculations on 64-bit values, but the actual
>>    * execution may occur on 32-bit values. Therefore, things like bitshifts
>>    * need extra checks in the 32-bit case.
>> @@ -13720,52 +13796,10 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
>>   {
>>   	struct bpf_reg_state *regs = cur_regs(env);
>>   	u8 opcode = BPF_OP(insn->code);
>> -	bool src_known;
>> -	s64 smin_val, smax_val;
>> -	u64 umin_val, umax_val;
>> -	s32 s32_min_val, s32_max_val;
>> -	u32 u32_min_val, u32_max_val;
>> -	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
>>   	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
>>   	int ret;
>>   -	smin_val = src_reg.smin_value;
>> -	smax_val = src_reg.smax_value;
>> -	umin_val = src_reg.umin_value;
>> -	umax_val = src_reg.umax_value;
>> -
>> -	s32_min_val = src_reg.s32_min_value;
>> -	s32_max_val = src_reg.s32_max_value;
>> -	u32_min_val = src_reg.u32_min_value;
>> -	u32_max_val = src_reg.u32_max_value;
>> -
>> -	if (alu32) {
>> -		src_known = tnum_subreg_is_const(src_reg.var_off);
>> -		if ((src_known &&
>> -		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
>> -		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
>> -			/* Taint dst register if offset had invalid bounds
>> -			 * derived from e.g. dead branches.
>> -			 */
>> -			__mark_reg_unknown(env, dst_reg);
>> -			return 0;
>> -		}
>> -	} else {
>> -		src_known = tnum_is_const(src_reg.var_off);
>> -		if ((src_known &&
>> -		     (smin_val != smax_val || umin_val != umax_val)) ||
>> -		    smin_val > smax_val || umin_val > umax_val) {
>> -			/* Taint dst register if offset had invalid bounds
>> -			 * derived from e.g. dead branches.
>> -			 */
>> -			__mark_reg_unknown(env, dst_reg);
>> -			return 0;
>> -		}
>> -	}
>> -
>> -	if (!src_known &&
>> -	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND &&
>> -	    opcode != BPF_XOR && opcode != BPF_OR) {
>> +	if (!is_safe_to_compute_dst_reg_ranges(insn, src_reg)) {
>>   		__mark_reg_unknown(env, dst_reg);
>
> This is not a precise refactoring. there are some cases like below
> which uses mark_reg_unknow().
Oh, right I miss those underscores above. :(
Will make sure to cover that.
>
> Let us put the refactoring patch as the first patch in the serious and all
> additional changes after that and this will make it easy to review.
>
>>   		return 0;
>>   	}
>> @@ -13822,39 +13856,18 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
>>   		scalar_min_max_xor(dst_reg, &src_reg);
>>   		break;
>>   	case BPF_LSH:
>> -		if (umax_val >= insn_bitness) {
>> -			/* Shifts greater than 31 or 63 are undefined.
>> -			 * This includes shifts by a negative number.
>> -			 */
>> -			mark_reg_unknown(env, regs, insn->dst_reg);
>> -			break;
>> -		}
>>   		if (alu32)
>>   			scalar32_min_max_lsh(dst_reg, &src_reg);
>>   		else
>>   			scalar_min_max_lsh(dst_reg, &src_reg);
>>   		break;
>>   	case BPF_RSH:
>> -		if (umax_val >= insn_bitness) {
>> -			/* Shifts greater than 31 or 63 are undefined.
>> -			 * This includes shifts by a negative number.
>> -			 */
>> -			mark_reg_unknown(env, regs, insn->dst_reg);
>> -			break;
>> -		}
>>   		if (alu32)
>>   			scalar32_min_max_rsh(dst_reg, &src_reg);
>>   		else
>>   			scalar_min_max_rsh(dst_reg, &src_reg);
>>   		break;
>>   	case BPF_ARSH:
>> -		if (umax_val >= insn_bitness) {
>> -			/* Shifts greater than 31 or 63 are undefined.
>> -			 * This includes shifts by a negative number.
>> -			 */
>> -			mark_reg_unknown(env, regs, insn->dst_reg);
>> -			break;
>> -		}
>>   		if (alu32)
>>   			scalar32_min_max_arsh(dst_reg, &src_reg);
>>   		else
diff mbox series

Patch

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a219f601569a..7894af2e1bdb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13709,6 +13709,82 @@  static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
 	__update_reg_bounds(dst_reg);
 }
 
+static bool is_const_reg_and_valid(struct bpf_reg_state reg, bool alu32,
+				   bool *valid)
+{
+	s64 smin_val = reg.smin_value;
+	s64 smax_val = reg.smax_value;
+	u64 umin_val = reg.umin_value;
+	u64 umax_val = reg.umax_value;
+
+	s32 s32_min_val = reg.s32_min_value;
+	s32 s32_max_val = reg.s32_max_value;
+	u32 u32_min_val = reg.u32_min_value;
+	u32 u32_max_val = reg.u32_max_value;
+
+	bool known = alu32 ? tnum_subreg_is_const(reg.var_off) :
+			     tnum_is_const(reg.var_off);
+
+	if (alu32) {
+		if ((known &&
+		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
+		      s32_min_val > s32_max_val || u32_min_val > u32_max_val)
+			*valid &= false;
+	} else {
+		if ((known &&
+		     (smin_val != smax_val || umin_val != umax_val)) ||
+		    smin_val > smax_val || umin_val > umax_val)
+			*valid &= false;
+	}
+
+	return known;
+}
+
+static bool is_safe_to_compute_dst_reg_ranges(struct bpf_insn *insn,
+					      struct bpf_reg_state src_reg)
+{
+	bool src_known;
+	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
+	u8 opcode = BPF_OP(insn->code);
+
+	bool valid_known = true;
+	src_known = is_const_reg_and_valid(src_reg, alu32, &valid_known);
+
+	/* Taint dst register if offset had invalid bounds
+	 * derived from e.g. dead branches.
+	 */
+	if (valid_known == false)
+		return false;
+
+	switch (opcode) {
+	case BPF_ADD:
+	case BPF_SUB:
+	case BPF_AND:
+	case BPF_XOR:
+	case BPF_OR:
+		return true;
+
+	/* Compute range for MUL if the src_reg is known.
+	 */
+	case BPF_MUL:
+		return src_known;
+
+	/* Shift operators range is only computable if shift dimension operand
+	 * is known. Also, shifts greater than 31 or 63 are undefined. This
+	 * includes shifts by a negative number.
+	 */
+	case BPF_LSH:
+	case BPF_RSH:
+	case BPF_ARSH:
+		return src_known && (src_reg.umax_value < insn_bitness);
+	default:
+		break;
+	}
+
+	return false;
+}
+
 /* WARNING: This function does calculations on 64-bit values, but the actual
  * execution may occur on 32-bit values. Therefore, things like bitshifts
  * need extra checks in the 32-bit case.
@@ -13720,52 +13796,10 @@  static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 {
 	struct bpf_reg_state *regs = cur_regs(env);
 	u8 opcode = BPF_OP(insn->code);
-	bool src_known;
-	s64 smin_val, smax_val;
-	u64 umin_val, umax_val;
-	s32 s32_min_val, s32_max_val;
-	u32 u32_min_val, u32_max_val;
-	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
 	int ret;
 
-	smin_val = src_reg.smin_value;
-	smax_val = src_reg.smax_value;
-	umin_val = src_reg.umin_value;
-	umax_val = src_reg.umax_value;
-
-	s32_min_val = src_reg.s32_min_value;
-	s32_max_val = src_reg.s32_max_value;
-	u32_min_val = src_reg.u32_min_value;
-	u32_max_val = src_reg.u32_max_value;
-
-	if (alu32) {
-		src_known = tnum_subreg_is_const(src_reg.var_off);
-		if ((src_known &&
-		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
-		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
-			/* Taint dst register if offset had invalid bounds
-			 * derived from e.g. dead branches.
-			 */
-			__mark_reg_unknown(env, dst_reg);
-			return 0;
-		}
-	} else {
-		src_known = tnum_is_const(src_reg.var_off);
-		if ((src_known &&
-		     (smin_val != smax_val || umin_val != umax_val)) ||
-		    smin_val > smax_val || umin_val > umax_val) {
-			/* Taint dst register if offset had invalid bounds
-			 * derived from e.g. dead branches.
-			 */
-			__mark_reg_unknown(env, dst_reg);
-			return 0;
-		}
-	}
-
-	if (!src_known &&
-	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND &&
-	    opcode != BPF_XOR && opcode != BPF_OR) {
+	if (!is_safe_to_compute_dst_reg_ranges(insn, src_reg)) {
 		__mark_reg_unknown(env, dst_reg);
 		return 0;
 	}
@@ -13822,39 +13856,18 @@  static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 		scalar_min_max_xor(dst_reg, &src_reg);
 		break;
 	case BPF_LSH:
-		if (umax_val >= insn_bitness) {
-			/* Shifts greater than 31 or 63 are undefined.
-			 * This includes shifts by a negative number.
-			 */
-			mark_reg_unknown(env, regs, insn->dst_reg);
-			break;
-		}
 		if (alu32)
 			scalar32_min_max_lsh(dst_reg, &src_reg);
 		else
 			scalar_min_max_lsh(dst_reg, &src_reg);
 		break;
 	case BPF_RSH:
-		if (umax_val >= insn_bitness) {
-			/* Shifts greater than 31 or 63 are undefined.
-			 * This includes shifts by a negative number.
-			 */
-			mark_reg_unknown(env, regs, insn->dst_reg);
-			break;
-		}
 		if (alu32)
 			scalar32_min_max_rsh(dst_reg, &src_reg);
 		else
 			scalar_min_max_rsh(dst_reg, &src_reg);
 		break;
 	case BPF_ARSH:
-		if (umax_val >= insn_bitness) {
-			/* Shifts greater than 31 or 63 are undefined.
-			 * This includes shifts by a negative number.
-			 */
-			mark_reg_unknown(env, regs, insn->dst_reg);
-			break;
-		}
 		if (alu32)
 			scalar32_min_max_arsh(dst_reg, &src_reg);
 		else