diff mbox series

[bpf-next,1/3] bpf: allow ctx writes using BPF_ST_MEM instruction

Message ID 20230302225507.3413720-2-eddyz87@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: allow ctx writes using BPF_ST_MEM instruction | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 46 this patch: 46
netdev/cc_maintainers warning 15 maintainers not CCed: pabeni@redhat.com linux-kselftest@vger.kernel.org davem@davemloft.net jolsa@kernel.org john.fastabend@gmail.com mykolal@fb.com kpsingh@kernel.org song@kernel.org memxor@gmail.com haoluo@google.com edumazet@google.com kuba@kernel.org netdev@vger.kernel.org shuah@kernel.org sdf@google.com
netdev/build_clang success Errors and warnings before: 14 this patch: 14
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 46 this patch: 46
netdev/checkpatch warning CHECK: Lines should not end with a '(' WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 12 this patch: 12
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 fail Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-PR success PR summary

Commit Message

Eduard Zingerman March 2, 2023, 10:55 p.m. UTC
Lift verifier restriction to use BPF_ST_MEM instructions to write to
context data structures. This requires the following changes:
 - verifier.c:do_check() for BPF_ST updated to:
   - no longer forbid writes to registers of type PTR_TO_CTX;
   - track dst_reg type in the env->insn_aux_data[...].ptr_type field
     (same way it is done for BPF_STX and BPF_LDX instructions).
 - verifier.c:convert_ctx_access() and various callbacks invoked by
   it are updated to handled BPF_ST instruction alongside BPF_STX.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 kernel/bpf/cgroup.c                        | 49 ++++++++------
 kernel/bpf/verifier.c                      | 79 +++++++++++-----------
 net/core/filter.c                          | 79 ++++++++++++----------
 tools/testing/selftests/bpf/verifier/ctx.c | 11 ---
 4 files changed, 114 insertions(+), 104 deletions(-)

Comments

Alexei Starovoitov March 3, 2023, 8:21 p.m. UTC | #1
On Fri, Mar 03, 2023 at 12:55:05AM +0200, Eduard Zingerman wrote:
> -			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
> -
> -			if (*prev_src_type == NOT_INIT) {
> -				/* saw a valid insn
> -				 * dst_reg = *(u32 *)(src_reg + off)
> -				 * save type to validate intersecting paths
> -				 */
> -				*prev_src_type = src_reg_type;
> -
> -			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
> -				/* ABuser program is trying to use the same insn
> -				 * dst_reg = *(u32*) (src_reg + off)
> -				 * with different pointer types:
> -				 * src_reg == ctx in one branch and
> -				 * src_reg == stack|map in some other branch.
> -				 * Reject it.
> -				 */
> -				verbose(env, "same insn cannot be used with different pointers\n");
> -				return -EINVAL;

There is a merge conflict with this part.
LDX is now handled slightly differently comparing to STX.

> -			}
> -
> +			err = save_aux_ptr_type(env, src_reg_type);
> +			if (err)
> +				return err;
>  		} else if (class == BPF_STX) {
> -			enum bpf_reg_type *prev_dst_type, dst_reg_type;
> +			enum bpf_reg_type dst_reg_type;
>  
>  			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
>  				err = check_atomic(env, env->insn_idx, insn);
> @@ -14712,16 +14719,12 @@ static int do_check(struct bpf_verifier_env *env)
>  			if (err)
>  				return err;
>  
> -			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
> -
> -			if (*prev_dst_type == NOT_INIT) {
> -				*prev_dst_type = dst_reg_type;
> -			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
> -				verbose(env, "same insn cannot be used with different pointers\n");
> -				return -EINVAL;
> -			}
> -
> +			err = save_aux_ptr_type(env, dst_reg_type);
> +			if (err)
> +				return err;
>  		} else if (class == BPF_ST) {
> +			enum bpf_reg_type dst_reg_type;
> +
>  			if (BPF_MODE(insn->code) != BPF_MEM ||
>  			    insn->src_reg != BPF_REG_0) {
>  				verbose(env, "BPF_ST uses reserved fields\n");
> @@ -14732,12 +14735,7 @@ static int do_check(struct bpf_verifier_env *env)
>  			if (err)
>  				return err;
>  
> -			if (is_ctx_reg(env, insn->dst_reg)) {
> -				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
> -					insn->dst_reg,
> -					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
> -				return -EACCES;
> -			}
> +			dst_reg_type = regs[insn->dst_reg].type;
>  
>  			/* check that memory (dst_reg + off) is writeable */
>  			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
> @@ -14746,6 +14744,9 @@ static int do_check(struct bpf_verifier_env *env)
>  			if (err)
>  				return err;
>  
> +			err = save_aux_ptr_type(env, dst_reg_type);
> +			if (err)
> +				return err;
>  		} else if (class == BPF_JMP || class == BPF_JMP32) {
>  			u8 opcode = BPF_OP(insn->code);
>  
> @@ -15871,7 +15872,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
>  			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
>  			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
>  			type = BPF_WRITE;
> -			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
> +			ctx_access = true;

I think 'ctx_access' variable can be removed, since it will be always true.

>  		} else {
>  			continue;
>  		}
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 1d6f165923bf..8e819b8464e8 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -9264,11 +9264,15 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
>  #endif
>  
>  	/* <store>: skb->tstamp = tstamp */
> -	*insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg,
> -			      offsetof(struct sk_buff, tstamp));
> +	*insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_DW | BPF_MEM,
> +			       skb_reg, value_reg, offsetof(struct sk_buff, tstamp), si->imm);
>  	return insn;
>  }
>  
> +#define BPF_COPY_STORE(size, si, off)					\
> +	BPF_RAW_INSN((si)->code | (size) | BPF_MEM,			\
> +		     (si)->dst_reg, (si)->src_reg, (off), (si)->imm)
> +

Could you explain the "copy store" name?
I don't understand what it means.
It emits either STX or ST insn, right?
Maybe BPF_EMIT_STORE ?

>  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
>  				  const struct bpf_insn *si,
>  				  struct bpf_insn *insn_buf,
> @@ -9298,9 +9302,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
>  
>  	case offsetof(struct __sk_buff, priority):
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> -					      bpf_target_off(struct sk_buff, priority, 4,
> -							     target_size));
> +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> +						 bpf_target_off(struct sk_buff, priority, 4,
> +								target_size));
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
>  					      bpf_target_off(struct sk_buff, priority, 4,
> @@ -9331,9 +9335,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
>  
>  	case offsetof(struct __sk_buff, mark):
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> -					      bpf_target_off(struct sk_buff, mark, 4,
> -							     target_size));
> +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> +						 bpf_target_off(struct sk_buff, mark, 4,
> +								target_size));
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
>  					      bpf_target_off(struct sk_buff, mark, 4,
> @@ -9352,11 +9356,16 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
>  
>  	case offsetof(struct __sk_buff, queue_mapping):
>  		if (type == BPF_WRITE) {
> -			*insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
> -			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
> -					      bpf_target_off(struct sk_buff,
> -							     queue_mapping,
> -							     2, target_size));
> +			u32 off = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size);
> +
> +			if (BPF_CLASS(si->code) == BPF_ST && si->imm >= NO_QUEUE_MAPPING) {
> +				*insn++ = BPF_JMP_A(0); /* noop */
> +				break;
> +			}
> +
> +			if (BPF_CLASS(si->code) == BPF_STX)
> +				*insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
> +			*insn++ = BPF_COPY_STORE(BPF_H, si, off);
>  		} else {
>  			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
>  					      bpf_target_off(struct sk_buff,
> @@ -9392,8 +9401,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
>  		off += offsetof(struct sk_buff, cb);
>  		off += offsetof(struct qdisc_skb_cb, data);
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
> -					      si->src_reg, off);
> +			*insn++ = BPF_COPY_STORE(BPF_SIZE(si->code), si, off);
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
>  					      si->src_reg, off);
> @@ -9408,8 +9416,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
>  		off += offsetof(struct qdisc_skb_cb, tc_classid);
>  		*target_size = 2;
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
> -					      si->src_reg, off);
> +			*insn++ = BPF_COPY_STORE(BPF_H, si, off);
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
>  					      si->src_reg, off);
> @@ -9442,9 +9449,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
>  	case offsetof(struct __sk_buff, tc_index):
>  #ifdef CONFIG_NET_SCHED
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
> -					      bpf_target_off(struct sk_buff, tc_index, 2,
> -							     target_size));
> +			*insn++ = BPF_COPY_STORE(BPF_H, si,
> +						 bpf_target_off(struct sk_buff, tc_index, 2,
> +								target_size));
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
>  					      bpf_target_off(struct sk_buff, tc_index, 2,
> @@ -9645,8 +9652,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
>  		BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
>  
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> -					offsetof(struct sock, sk_bound_dev_if));
> +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> +						 offsetof(struct sock, sk_bound_dev_if));
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
>  				      offsetof(struct sock, sk_bound_dev_if));
> @@ -9656,8 +9663,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
>  		BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
>  
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> -					offsetof(struct sock, sk_mark));
> +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> +						 offsetof(struct sock, sk_mark));
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
>  				      offsetof(struct sock, sk_mark));
> @@ -9667,8 +9674,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
>  		BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
>  
>  		if (type == BPF_WRITE)
> -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> -					offsetof(struct sock, sk_priority));
> +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> +						 offsetof(struct sock, sk_priority));
>  		else
>  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
>  				      offsetof(struct sock, sk_priority));
> @@ -9933,10 +9940,12 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
>  				      offsetof(S, TF));			       \
>  		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,	       \
>  				      si->dst_reg, offsetof(S, F));	       \
> -		*insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg,	       \
> +		*insn++ = BPF_RAW_INSN(SIZE | BPF_MEM | BPF_CLASS(si->code),   \
> +				       tmp_reg, si->src_reg,		       \

the macro didn't work here because of 'tmp_reg' ?

>  			bpf_target_off(NS, NF, sizeof_field(NS, NF),	       \
>  				       target_size)			       \
> -				+ OFF);					       \
> +				       + OFF,				       \
> +				       si->imm);			       \
>  		*insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg,	       \
Eduard Zingerman March 3, 2023, 9:17 p.m. UTC | #2
On Fri, 2023-03-03 at 12:21 -0800, Alexei Starovoitov wrote:
> On Fri, Mar 03, 2023 at 12:55:05AM +0200, Eduard Zingerman wrote:
> > -			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
> > -
> > -			if (*prev_src_type == NOT_INIT) {
> > -				/* saw a valid insn
> > -				 * dst_reg = *(u32 *)(src_reg + off)
> > -				 * save type to validate intersecting paths
> > -				 */
> > -				*prev_src_type = src_reg_type;
> > -
> > -			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
> > -				/* ABuser program is trying to use the same insn
> > -				 * dst_reg = *(u32*) (src_reg + off)
> > -				 * with different pointer types:
> > -				 * src_reg == ctx in one branch and
> > -				 * src_reg == stack|map in some other branch.
> > -				 * Reject it.
> > -				 */
> > -				verbose(env, "same insn cannot be used with different pointers\n");
> > -				return -EINVAL;
> 
> There is a merge conflict with this part.
> LDX is now handled slightly differently comparing to STX.

Merge seems not complicated, will send v2 shortly.

> 
> > -			}
> > -
> > +			err = save_aux_ptr_type(env, src_reg_type);
> > +			if (err)
> > +				return err;
> >  		} else if (class == BPF_STX) {
> > -			enum bpf_reg_type *prev_dst_type, dst_reg_type;
> > +			enum bpf_reg_type dst_reg_type;
> >  
> >  			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
> >  				err = check_atomic(env, env->insn_idx, insn);
> > @@ -14712,16 +14719,12 @@ static int do_check(struct bpf_verifier_env *env)
> >  			if (err)
> >  				return err;
> >  
> > -			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
> > -
> > -			if (*prev_dst_type == NOT_INIT) {
> > -				*prev_dst_type = dst_reg_type;
> > -			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
> > -				verbose(env, "same insn cannot be used with different pointers\n");
> > -				return -EINVAL;
> > -			}
> > -
> > +			err = save_aux_ptr_type(env, dst_reg_type);
> > +			if (err)
> > +				return err;
> >  		} else if (class == BPF_ST) {
> > +			enum bpf_reg_type dst_reg_type;
> > +
> >  			if (BPF_MODE(insn->code) != BPF_MEM ||
> >  			    insn->src_reg != BPF_REG_0) {
> >  				verbose(env, "BPF_ST uses reserved fields\n");
> > @@ -14732,12 +14735,7 @@ static int do_check(struct bpf_verifier_env *env)
> >  			if (err)
> >  				return err;
> >  
> > -			if (is_ctx_reg(env, insn->dst_reg)) {
> > -				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
> > -					insn->dst_reg,
> > -					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
> > -				return -EACCES;
> > -			}
> > +			dst_reg_type = regs[insn->dst_reg].type;
> >  
> >  			/* check that memory (dst_reg + off) is writeable */
> >  			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
> > @@ -14746,6 +14744,9 @@ static int do_check(struct bpf_verifier_env *env)
> >  			if (err)
> >  				return err;
> >  
> > +			err = save_aux_ptr_type(env, dst_reg_type);
> > +			if (err)
> > +				return err;
> >  		} else if (class == BPF_JMP || class == BPF_JMP32) {
> >  			u8 opcode = BPF_OP(insn->code);
> >  
> > @@ -15871,7 +15872,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
> >  			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
> >  			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
> >  			type = BPF_WRITE;
> > -			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
> > +			ctx_access = true;
> 
> I think 'ctx_access' variable can be removed, since it will be always true.

Sorry, missed this, will remove in v2.

> 
> >  		} else {
> >  			continue;
> >  		}
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index 1d6f165923bf..8e819b8464e8 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -9264,11 +9264,15 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
> >  #endif
> >  
> >  	/* <store>: skb->tstamp = tstamp */
> > -	*insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg,
> > -			      offsetof(struct sk_buff, tstamp));
> > +	*insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_DW | BPF_MEM,
> > +			       skb_reg, value_reg, offsetof(struct sk_buff, tstamp), si->imm);
> >  	return insn;
> >  }
> >  
> > +#define BPF_COPY_STORE(size, si, off)					\
> > +	BPF_RAW_INSN((si)->code | (size) | BPF_MEM,			\
> > +		     (si)->dst_reg, (si)->src_reg, (off), (si)->imm)
> > +
> 
> Could you explain the "copy store" name?

I want to replicate registers, code and immediate operand from `si`,
hence the word "copy".
The more descriptive name might be `BPF_CLONE_STORE`.

> I don't understand what it means.
> It emits either STX or ST insn, right?
> Maybe BPF_EMIT_STORE ?

Can use `BPF_EMIT_STORE` one as well. 

> 
> >  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
> >  				  const struct bpf_insn *si,
> >  				  struct bpf_insn *insn_buf,
> > @@ -9298,9 +9302,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
> >  
> >  	case offsetof(struct __sk_buff, priority):
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> > -					      bpf_target_off(struct sk_buff, priority, 4,
> > -							     target_size));
> > +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> > +						 bpf_target_off(struct sk_buff, priority, 4,
> > +								target_size));
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> >  					      bpf_target_off(struct sk_buff, priority, 4,
> > @@ -9331,9 +9335,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
> >  
> >  	case offsetof(struct __sk_buff, mark):
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> > -					      bpf_target_off(struct sk_buff, mark, 4,
> > -							     target_size));
> > +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> > +						 bpf_target_off(struct sk_buff, mark, 4,
> > +								target_size));
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> >  					      bpf_target_off(struct sk_buff, mark, 4,
> > @@ -9352,11 +9356,16 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
> >  
> >  	case offsetof(struct __sk_buff, queue_mapping):
> >  		if (type == BPF_WRITE) {
> > -			*insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
> > -			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
> > -					      bpf_target_off(struct sk_buff,
> > -							     queue_mapping,
> > -							     2, target_size));
> > +			u32 off = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size);
> > +
> > +			if (BPF_CLASS(si->code) == BPF_ST && si->imm >= NO_QUEUE_MAPPING) {
> > +				*insn++ = BPF_JMP_A(0); /* noop */
> > +				break;
> > +			}
> > +
> > +			if (BPF_CLASS(si->code) == BPF_STX)
> > +				*insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
> > +			*insn++ = BPF_COPY_STORE(BPF_H, si, off);
> >  		} else {
> >  			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
> >  					      bpf_target_off(struct sk_buff,
> > @@ -9392,8 +9401,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
> >  		off += offsetof(struct sk_buff, cb);
> >  		off += offsetof(struct qdisc_skb_cb, data);
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
> > -					      si->src_reg, off);
> > +			*insn++ = BPF_COPY_STORE(BPF_SIZE(si->code), si, off);
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
> >  					      si->src_reg, off);
> > @@ -9408,8 +9416,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
> >  		off += offsetof(struct qdisc_skb_cb, tc_classid);
> >  		*target_size = 2;
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
> > -					      si->src_reg, off);
> > +			*insn++ = BPF_COPY_STORE(BPF_H, si, off);
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
> >  					      si->src_reg, off);
> > @@ -9442,9 +9449,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
> >  	case offsetof(struct __sk_buff, tc_index):
> >  #ifdef CONFIG_NET_SCHED
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
> > -					      bpf_target_off(struct sk_buff, tc_index, 2,
> > -							     target_size));
> > +			*insn++ = BPF_COPY_STORE(BPF_H, si,
> > +						 bpf_target_off(struct sk_buff, tc_index, 2,
> > +								target_size));
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
> >  					      bpf_target_off(struct sk_buff, tc_index, 2,
> > @@ -9645,8 +9652,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
> >  		BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
> >  
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> > -					offsetof(struct sock, sk_bound_dev_if));
> > +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> > +						 offsetof(struct sock, sk_bound_dev_if));
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> >  				      offsetof(struct sock, sk_bound_dev_if));
> > @@ -9656,8 +9663,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
> >  		BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
> >  
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> > -					offsetof(struct sock, sk_mark));
> > +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> > +						 offsetof(struct sock, sk_mark));
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> >  				      offsetof(struct sock, sk_mark));
> > @@ -9667,8 +9674,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
> >  		BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
> >  
> >  		if (type == BPF_WRITE)
> > -			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
> > -					offsetof(struct sock, sk_priority));
> > +			*insn++ = BPF_COPY_STORE(BPF_W, si,
> > +						 offsetof(struct sock, sk_priority));
> >  		else
> >  			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> >  				      offsetof(struct sock, sk_priority));
> > @@ -9933,10 +9940,12 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
> >  				      offsetof(S, TF));			       \
> >  		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,	       \
> >  				      si->dst_reg, offsetof(S, F));	       \
> > -		*insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg,	       \
> > +		*insn++ = BPF_RAW_INSN(SIZE | BPF_MEM | BPF_CLASS(si->code),   \
> > +				       tmp_reg, si->src_reg,		       \
> 
> the macro didn't work here because of 'tmp_reg' ?

Yes, macro uses (si)->dst_reg in this position.
There are 11 places where this macro applies.
There are 4 places where `tmp_reg` is used for destination:
- 2 in cgroup.c
- 2 in filter.c

I opted not to add new macro to common headers (given that it has very
narrow purpose and not very descriptive name) and use BPF_RAW_INSN in
these cases.

[...]
Eduard Zingerman March 3, 2023, 10:56 p.m. UTC | #3
On Fri, 2023-03-03 at 12:21 -0800, Alexei Starovoitov wrote:
> On Fri, Mar 03, 2023 at 12:55:05AM +0200, Eduard Zingerman wrote:
> > -			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
> > -
> > -			if (*prev_src_type == NOT_INIT) {
> > -				/* saw a valid insn
> > -				 * dst_reg = *(u32 *)(src_reg + off)
> > -				 * save type to validate intersecting paths
> > -				 */
> > -				*prev_src_type = src_reg_type;
> > -
> > -			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
> > -				/* ABuser program is trying to use the same insn
> > -				 * dst_reg = *(u32*) (src_reg + off)
> > -				 * with different pointer types:
> > -				 * src_reg == ctx in one branch and
> > -				 * src_reg == stack|map in some other branch.
> > -				 * Reject it.
> > -				 */
> > -				verbose(env, "same insn cannot be used with different pointers\n");
> > -				return -EINVAL;
> 
> There is a merge conflict with this part.
> LDX is now handled slightly differently comparing to STX.

I changed save_aux_ptr_type() as below:

  static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
  			     bool allow_trust_missmatch)
  {
  	enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type;
    ...
  	if (*prev_type == NOT_INIT) {
        ...
  	} else if (reg_type_mismatch(type, *prev_type)) {
  		/* Abuser program is trying to use the same insn
         * ...
  		 */
  		if (allow_trust_missmatch &&
  		    base_type(type) == PTR_TO_BTF_ID &&
  		    base_type(*prev_type) == PTR_TO_BTF_ID) {
  			/*
  			 * Have to support a use case when one path through
  			 * the program yields TRUSTED pointer while another
  			 * is UNTRUSTED. Fallback to UNTRUSTED to generate
  			 * BPF_PROBE_MEM.
  			 */
  			*prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
  		} else {
  			verbose(env, "same insn cannot be used with different pointers\n");
  			return -EINVAL;
  		}
  	}
  
  	return 0;
  }
  
But I don't understand why is it allowed to dereference untrusted
pointers for LDX but not for ST/STX.
  
[...]
diff mbox series

Patch

diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index bf2fdb33fb31..a57f1b44dc6c 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -2223,10 +2223,12 @@  static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
 				treg, si->dst_reg,
 				offsetof(struct bpf_sysctl_kern, ppos));
-			*insn++ = BPF_STX_MEM(
-				BPF_SIZEOF(u32), treg, si->src_reg,
+			*insn++ = BPF_RAW_INSN(
+				BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32),
+				treg, si->src_reg,
 				bpf_ctx_narrow_access_offset(
-					0, sizeof(u32), sizeof(loff_t)));
+					0, sizeof(u32), sizeof(loff_t)),
+				si->imm);
 			*insn++ = BPF_LDX_MEM(
 				BPF_DW, treg, si->dst_reg,
 				offsetof(struct bpf_sysctl_kern, tmp_reg));
@@ -2376,10 +2378,17 @@  static bool cg_sockopt_is_valid_access(int off, int size,
 	return true;
 }
 
-#define CG_SOCKOPT_ACCESS_FIELD(T, F)					\
-	T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),			\
-	  si->dst_reg, si->src_reg,					\
-	  offsetof(struct bpf_sockopt_kern, F))
+#define CG_SOCKOPT_READ_FIELD(F)					\
+	BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),	\
+		    si->dst_reg, si->src_reg,				\
+		    offsetof(struct bpf_sockopt_kern, F))
+
+#define CG_SOCKOPT_WRITE_FIELD(F)					\
+	BPF_RAW_INSN((BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F) |	\
+		      BPF_MEM | BPF_CLASS(si->code)),			\
+		     si->dst_reg, si->src_reg,				\
+		     offsetof(struct bpf_sockopt_kern, F),		\
+		     si->imm)
 
 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
 					 const struct bpf_insn *si,
@@ -2391,25 +2400,25 @@  static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
 
 	switch (si->off) {
 	case offsetof(struct bpf_sockopt, sk):
-		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
+		*insn++ = CG_SOCKOPT_READ_FIELD(sk);
 		break;
 	case offsetof(struct bpf_sockopt, level):
 		if (type == BPF_WRITE)
-			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
+			*insn++ = CG_SOCKOPT_WRITE_FIELD(level);
 		else
-			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
+			*insn++ = CG_SOCKOPT_READ_FIELD(level);
 		break;
 	case offsetof(struct bpf_sockopt, optname):
 		if (type == BPF_WRITE)
-			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
+			*insn++ = CG_SOCKOPT_WRITE_FIELD(optname);
 		else
-			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
+			*insn++ = CG_SOCKOPT_READ_FIELD(optname);
 		break;
 	case offsetof(struct bpf_sockopt, optlen):
 		if (type == BPF_WRITE)
-			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
+			*insn++ = CG_SOCKOPT_WRITE_FIELD(optlen);
 		else
-			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
+			*insn++ = CG_SOCKOPT_READ_FIELD(optlen);
 		break;
 	case offsetof(struct bpf_sockopt, retval):
 		BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
@@ -2429,9 +2438,11 @@  static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
 					      treg, treg,
 					      offsetof(struct task_struct, bpf_ctx));
-			*insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
-					      treg, si->src_reg,
-					      offsetof(struct bpf_cg_run_ctx, retval));
+			*insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM |
+					       BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
+					       treg, si->src_reg,
+					       offsetof(struct bpf_cg_run_ctx, retval),
+					       si->imm);
 			*insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
 		} else {
@@ -2447,10 +2458,10 @@  static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
 		}
 		break;
 	case offsetof(struct bpf_sockopt, optval):
-		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
+		*insn++ = CG_SOCKOPT_READ_FIELD(optval);
 		break;
 	case offsetof(struct bpf_sockopt, optval_end):
-		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
+		*insn++ = CG_SOCKOPT_READ_FIELD(optval_end);
 		break;
 	}
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5cb8b623f639..ee6885703589 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14524,6 +14524,31 @@  static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
 			       !reg_type_mismatch_ok(prev));
 }
 
+static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type)
+{
+	enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type;
+
+	if (*prev_type == NOT_INIT) {
+		/* Saw a valid insn
+		 * dst_reg = *(u32 *)(src_reg + off)
+		 * save type to validate intersecting paths
+		 */
+		*prev_type = type;
+	} else if (reg_type_mismatch(type, *prev_type)) {
+		/* Abuser program is trying to use the same insn
+		 * dst_reg = *(u32*) (src_reg + off)
+		 * with different pointer types:
+		 * src_reg == ctx in one branch and
+		 * src_reg == stack|map in some other branch.
+		 * Reject it.
+		 */
+		verbose(env, "same insn cannot be used with different pointers\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int do_check(struct bpf_verifier_env *env)
 {
 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
@@ -14633,7 +14658,7 @@  static int do_check(struct bpf_verifier_env *env)
 				return err;
 
 		} else if (class == BPF_LDX) {
-			enum bpf_reg_type *prev_src_type, src_reg_type;
+			enum bpf_reg_type src_reg_type;
 
 			/* check for reserved fields is already done */
 
@@ -14657,29 +14682,11 @@  static int do_check(struct bpf_verifier_env *env)
 			if (err)
 				return err;
 
-			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
-
-			if (*prev_src_type == NOT_INIT) {
-				/* saw a valid insn
-				 * dst_reg = *(u32 *)(src_reg + off)
-				 * save type to validate intersecting paths
-				 */
-				*prev_src_type = src_reg_type;
-
-			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
-				/* ABuser program is trying to use the same insn
-				 * dst_reg = *(u32*) (src_reg + off)
-				 * with different pointer types:
-				 * src_reg == ctx in one branch and
-				 * src_reg == stack|map in some other branch.
-				 * Reject it.
-				 */
-				verbose(env, "same insn cannot be used with different pointers\n");
-				return -EINVAL;
-			}
-
+			err = save_aux_ptr_type(env, src_reg_type);
+			if (err)
+				return err;
 		} else if (class == BPF_STX) {
-			enum bpf_reg_type *prev_dst_type, dst_reg_type;
+			enum bpf_reg_type dst_reg_type;
 
 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
 				err = check_atomic(env, env->insn_idx, insn);
@@ -14712,16 +14719,12 @@  static int do_check(struct bpf_verifier_env *env)
 			if (err)
 				return err;
 
-			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
-
-			if (*prev_dst_type == NOT_INIT) {
-				*prev_dst_type = dst_reg_type;
-			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
-				verbose(env, "same insn cannot be used with different pointers\n");
-				return -EINVAL;
-			}
-
+			err = save_aux_ptr_type(env, dst_reg_type);
+			if (err)
+				return err;
 		} else if (class == BPF_ST) {
+			enum bpf_reg_type dst_reg_type;
+
 			if (BPF_MODE(insn->code) != BPF_MEM ||
 			    insn->src_reg != BPF_REG_0) {
 				verbose(env, "BPF_ST uses reserved fields\n");
@@ -14732,12 +14735,7 @@  static int do_check(struct bpf_verifier_env *env)
 			if (err)
 				return err;
 
-			if (is_ctx_reg(env, insn->dst_reg)) {
-				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
-					insn->dst_reg,
-					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
-				return -EACCES;
-			}
+			dst_reg_type = regs[insn->dst_reg].type;
 
 			/* check that memory (dst_reg + off) is writeable */
 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
@@ -14746,6 +14744,9 @@  static int do_check(struct bpf_verifier_env *env)
 			if (err)
 				return err;
 
+			err = save_aux_ptr_type(env, dst_reg_type);
+			if (err)
+				return err;
 		} else if (class == BPF_JMP || class == BPF_JMP32) {
 			u8 opcode = BPF_OP(insn->code);
 
@@ -15871,7 +15872,7 @@  static int convert_ctx_accesses(struct bpf_verifier_env *env)
 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
 			type = BPF_WRITE;
-			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
+			ctx_access = true;
 		} else {
 			continue;
 		}
diff --git a/net/core/filter.c b/net/core/filter.c
index 1d6f165923bf..8e819b8464e8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -9264,11 +9264,15 @@  static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
 #endif
 
 	/* <store>: skb->tstamp = tstamp */
-	*insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg,
-			      offsetof(struct sk_buff, tstamp));
+	*insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_DW | BPF_MEM,
+			       skb_reg, value_reg, offsetof(struct sk_buff, tstamp), si->imm);
 	return insn;
 }
 
+#define BPF_COPY_STORE(size, si, off)					\
+	BPF_RAW_INSN((si)->code | (size) | BPF_MEM,			\
+		     (si)->dst_reg, (si)->src_reg, (off), (si)->imm)
+
 static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 				  const struct bpf_insn *si,
 				  struct bpf_insn *insn_buf,
@@ -9298,9 +9302,9 @@  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 
 	case offsetof(struct __sk_buff, priority):
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-					      bpf_target_off(struct sk_buff, priority, 4,
-							     target_size));
+			*insn++ = BPF_COPY_STORE(BPF_W, si,
+						 bpf_target_off(struct sk_buff, priority, 4,
+								target_size));
 		else
 			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
 					      bpf_target_off(struct sk_buff, priority, 4,
@@ -9331,9 +9335,9 @@  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 
 	case offsetof(struct __sk_buff, mark):
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-					      bpf_target_off(struct sk_buff, mark, 4,
-							     target_size));
+			*insn++ = BPF_COPY_STORE(BPF_W, si,
+						 bpf_target_off(struct sk_buff, mark, 4,
+								target_size));
 		else
 			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
 					      bpf_target_off(struct sk_buff, mark, 4,
@@ -9352,11 +9356,16 @@  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 
 	case offsetof(struct __sk_buff, queue_mapping):
 		if (type == BPF_WRITE) {
-			*insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
-			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
-					      bpf_target_off(struct sk_buff,
-							     queue_mapping,
-							     2, target_size));
+			u32 off = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size);
+
+			if (BPF_CLASS(si->code) == BPF_ST && si->imm >= NO_QUEUE_MAPPING) {
+				*insn++ = BPF_JMP_A(0); /* noop */
+				break;
+			}
+
+			if (BPF_CLASS(si->code) == BPF_STX)
+				*insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
+			*insn++ = BPF_COPY_STORE(BPF_H, si, off);
 		} else {
 			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
 					      bpf_target_off(struct sk_buff,
@@ -9392,8 +9401,7 @@  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 		off += offsetof(struct sk_buff, cb);
 		off += offsetof(struct qdisc_skb_cb, data);
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
-					      si->src_reg, off);
+			*insn++ = BPF_COPY_STORE(BPF_SIZE(si->code), si, off);
 		else
 			*insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
 					      si->src_reg, off);
@@ -9408,8 +9416,7 @@  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 		off += offsetof(struct qdisc_skb_cb, tc_classid);
 		*target_size = 2;
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
-					      si->src_reg, off);
+			*insn++ = BPF_COPY_STORE(BPF_H, si, off);
 		else
 			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
 					      si->src_reg, off);
@@ -9442,9 +9449,9 @@  static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 	case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
-					      bpf_target_off(struct sk_buff, tc_index, 2,
-							     target_size));
+			*insn++ = BPF_COPY_STORE(BPF_H, si,
+						 bpf_target_off(struct sk_buff, tc_index, 2,
+								target_size));
 		else
 			*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
 					      bpf_target_off(struct sk_buff, tc_index, 2,
@@ -9645,8 +9652,8 @@  u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
 		BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
 
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-					offsetof(struct sock, sk_bound_dev_if));
+			*insn++ = BPF_COPY_STORE(BPF_W, si,
+						 offsetof(struct sock, sk_bound_dev_if));
 		else
 			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
 				      offsetof(struct sock, sk_bound_dev_if));
@@ -9656,8 +9663,8 @@  u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
 		BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
 
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-					offsetof(struct sock, sk_mark));
+			*insn++ = BPF_COPY_STORE(BPF_W, si,
+						 offsetof(struct sock, sk_mark));
 		else
 			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
 				      offsetof(struct sock, sk_mark));
@@ -9667,8 +9674,8 @@  u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
 		BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
 
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-					offsetof(struct sock, sk_priority));
+			*insn++ = BPF_COPY_STORE(BPF_W, si,
+						 offsetof(struct sock, sk_priority));
 		else
 			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
 				      offsetof(struct sock, sk_priority));
@@ -9933,10 +9940,12 @@  static u32 xdp_convert_ctx_access(enum bpf_access_type type,
 				      offsetof(S, TF));			       \
 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,	       \
 				      si->dst_reg, offsetof(S, F));	       \
-		*insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg,	       \
+		*insn++ = BPF_RAW_INSN(SIZE | BPF_MEM | BPF_CLASS(si->code),   \
+				       tmp_reg, si->src_reg,		       \
 			bpf_target_off(NS, NF, sizeof_field(NS, NF),	       \
 				       target_size)			       \
-				+ OFF);					       \
+				       + OFF,				       \
+				       si->imm);			       \
 		*insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg,	       \
 				      offsetof(S, TF));			       \
 	} while (0)
@@ -10171,9 +10180,11 @@  static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 						struct bpf_sock_ops_kern, sk),\
 				      reg, si->dst_reg,			      \
 				      offsetof(struct bpf_sock_ops_kern, sk));\
-		*insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),	      \
-				      reg, si->src_reg,			      \
-				      offsetof(OBJ, OBJ_FIELD));	      \
+		*insn++ = BPF_RAW_INSN(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD) |     \
+				       BPF_MEM | BPF_CLASS(si->code),	      \
+				       reg, si->src_reg,		      \
+				       offsetof(OBJ, OBJ_FIELD),	      \
+				       si->imm);			      \
 		*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg,		      \
 				      offsetof(struct bpf_sock_ops_kern,      \
 					       temp));			      \
@@ -10205,8 +10216,7 @@  static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 		off -= offsetof(struct bpf_sock_ops, replylong[0]);
 		off += offsetof(struct bpf_sock_ops_kern, replylong[0]);
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
-					      off);
+			*insn++ = BPF_COPY_STORE(BPF_W, si, off);
 		else
 			*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
 					      off);
@@ -10563,8 +10573,7 @@  static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
 		off += offsetof(struct sk_buff, cb);
 		off += offsetof(struct sk_skb_cb, data);
 		if (type == BPF_WRITE)
-			*insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
-					      si->src_reg, off);
+			*insn++ = BPF_COPY_STORE(BPF_SIZE(si->code), si, off);
 		else
 			*insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
 					      si->src_reg, off);
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
index c8eaf0536c24..2fd31612c0b8 100644
--- a/tools/testing/selftests/bpf/verifier/ctx.c
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -1,14 +1,3 @@ 
-{
-	"context stores via ST",
-	.insns = {
-	BPF_MOV64_IMM(BPF_REG_0, 0),
-	BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
-	BPF_EXIT_INSN(),
-	},
-	.errstr = "BPF_ST stores into R1 ctx is not allowed",
-	.result = REJECT,
-	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
-},
 {
 	"context stores via BPF_ATOMIC",
 	.insns = {