diff mbox series

[bpf-next,v4,06/11] bpf: Add BPF_FETCH field / create atomic_fetch_add instruction

Message ID 20201207160734.2345502-7-jackmanb@google.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series Atomics for eBPF | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 15663 this patch: 15663
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning CHECK: Lines should not end with a '(' CHECK: No space is necessary after a cast WARNING: line length of 109 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 15328 this patch: 15328
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Brendan Jackman Dec. 7, 2020, 4:07 p.m. UTC
The BPF_FETCH field can be set in bpf_insn.imm, for BPF_ATOMIC
instructions, in order to have the previous value of the
atomically-modified memory location loaded into the src register
after an atomic op is carried out.

Suggested-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Brendan Jackman <jackmanb@google.com>
---
 arch/x86/net/bpf_jit_comp.c    |  4 ++++
 include/linux/filter.h         |  1 +
 include/uapi/linux/bpf.h       |  3 +++
 kernel/bpf/core.c              | 13 +++++++++++++
 kernel/bpf/disasm.c            |  7 +++++++
 kernel/bpf/verifier.c          | 33 ++++++++++++++++++++++++---------
 tools/include/linux/filter.h   | 11 +++++++++++
 tools/include/uapi/linux/bpf.h |  3 +++
 8 files changed, 66 insertions(+), 9 deletions(-)

Comments

Yonghong Song Dec. 8, 2020, 1:41 a.m. UTC | #1
On 12/7/20 8:07 AM, Brendan Jackman wrote:
> The BPF_FETCH field can be set in bpf_insn.imm, for BPF_ATOMIC
> instructions, in order to have the previous value of the
> atomically-modified memory location loaded into the src register
> after an atomic op is carried out.
> 
> Suggested-by: Yonghong Song <yhs@fb.com>
> Signed-off-by: Brendan Jackman <jackmanb@google.com>
> ---
>   arch/x86/net/bpf_jit_comp.c    |  4 ++++
>   include/linux/filter.h         |  1 +
>   include/uapi/linux/bpf.h       |  3 +++
>   kernel/bpf/core.c              | 13 +++++++++++++
>   kernel/bpf/disasm.c            |  7 +++++++
>   kernel/bpf/verifier.c          | 33 ++++++++++++++++++++++++---------
>   tools/include/linux/filter.h   | 11 +++++++++++
>   tools/include/uapi/linux/bpf.h |  3 +++
>   8 files changed, 66 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
[...]

> index f345f12c1ff8..4e0100ba52c2 100644
> --- a/tools/include/linux/filter.h
> +++ b/tools/include/linux/filter.h
> @@ -173,6 +173,7 @@
>    * Atomic operations:
>    *
>    *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
> + *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
>    */
>   
>   #define BPF_ATOMIC64(OP, DST, SRC, OFF)				\
> @@ -201,6 +202,16 @@
>   		.off   = OFF,					\
>   		.imm   = BPF_ADD })
>   
> +/* Atomic memory add with fetch, src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
> +
> +#define BPF_ATOMIC_FETCH_ADD(SIZE, DST, SRC, OFF)		\
> +	((struct bpf_insn) {					\
> +		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
> +		.dst_reg = DST,					\
> +		.src_reg = SRC,					\
> +		.off   = OFF,					\
> +		.imm   = BPF_ADD | BPF_FETCH })

Not sure whether it is a good idea or not to fold this into BPF_ATOMIC 
macro. At least you can define BPF_ATOMIC macro and
#define BPF_ATOMIC_FETCH_ADD(SIZE, DST, SRC, OFF)		\
     BPF_ATOMIC(SIZE, DST, SRC, OFF, BPF_ADD | BPF_FETCH)

to avoid too many code duplications?

> +
>   /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
>   
>   #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index 98161e2d389f..d5389119291e 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -44,6 +44,9 @@
>   #define BPF_CALL	0x80	/* function call */
>   #define BPF_EXIT	0x90	/* function return */
>   
> +/* atomic op type fields (stored in immediate) */
> +#define BPF_FETCH	0x01	/* fetch previous value into src reg */
> +
>   /* Register numbers */
>   enum {
>   	BPF_REG_0 = 0,
>
John Fastabend Dec. 8, 2020, 5:31 a.m. UTC | #2
Brendan Jackman wrote:
> The BPF_FETCH field can be set in bpf_insn.imm, for BPF_ATOMIC
> instructions, in order to have the previous value of the
> atomically-modified memory location loaded into the src register
> after an atomic op is carried out.
> 
> Suggested-by: Yonghong Song <yhs@fb.com>
> Signed-off-by: Brendan Jackman <jackmanb@google.com>
> ---

I like Yonghong suggestion 

 #define BPF_ATOMIC_FETCH_ADD(SIZE, DST, SRC, OFF)               \
     BPF_ATOMIC(SIZE, DST, SRC, OFF, BPF_ADD | BPF_FETCH)

otherwise LGTM. One observation to consider below.

Acked-by: John Fastabend <john.fastabend@gmail.com>

>  arch/x86/net/bpf_jit_comp.c    |  4 ++++
>  include/linux/filter.h         |  1 +
>  include/uapi/linux/bpf.h       |  3 +++
>  kernel/bpf/core.c              | 13 +++++++++++++
>  kernel/bpf/disasm.c            |  7 +++++++
>  kernel/bpf/verifier.c          | 33 ++++++++++++++++++++++++---------
>  tools/include/linux/filter.h   | 11 +++++++++++
>  tools/include/uapi/linux/bpf.h |  3 +++
>  8 files changed, 66 insertions(+), 9 deletions(-)

[...]

> @@ -3652,8 +3656,20 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
>  		return err;
>  
>  	/* check whether we can write into the same memory */
> -	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
> -				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
> +	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
> +			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
> +	if (err)
> +		return err;
> +
> +	if (!(insn->imm & BPF_FETCH))
> +		return 0;
> +
> +	/* check and record load of old value into src reg  */
> +	err = check_reg_arg(env, insn->src_reg, DST_OP);

This will mark the reg unknown. I think this is fine here. Might be nice
to carry bounds through though if possible

> +	if (err)
> +		return err;
> +
> +	return 0;
>  }
>
Brendan Jackman Dec. 8, 2020, 9:31 a.m. UTC | #3
On Mon, Dec 07, 2020 at 05:41:05PM -0800, Yonghong Song wrote:
> 
> 
> On 12/7/20 8:07 AM, Brendan Jackman wrote:
> > The BPF_FETCH field can be set in bpf_insn.imm, for BPF_ATOMIC
> > instructions, in order to have the previous value of the
> > atomically-modified memory location loaded into the src register
> > after an atomic op is carried out.
> > 
> > Suggested-by: Yonghong Song <yhs@fb.com>
> > Signed-off-by: Brendan Jackman <jackmanb@google.com>
> > ---
> >   arch/x86/net/bpf_jit_comp.c    |  4 ++++
> >   include/linux/filter.h         |  1 +
> >   include/uapi/linux/bpf.h       |  3 +++
> >   kernel/bpf/core.c              | 13 +++++++++++++
> >   kernel/bpf/disasm.c            |  7 +++++++
> >   kernel/bpf/verifier.c          | 33 ++++++++++++++++++++++++---------
> >   tools/include/linux/filter.h   | 11 +++++++++++
> >   tools/include/uapi/linux/bpf.h |  3 +++
> >   8 files changed, 66 insertions(+), 9 deletions(-)
> > 
> > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> [...]
> 
> > index f345f12c1ff8..4e0100ba52c2 100644
> > --- a/tools/include/linux/filter.h
> > +++ b/tools/include/linux/filter.h
> > @@ -173,6 +173,7 @@
> >    * Atomic operations:
> >    *
> >    *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
> > + *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
> >    */
> >   #define BPF_ATOMIC64(OP, DST, SRC, OFF)				\
> > @@ -201,6 +202,16 @@
> >   		.off   = OFF,					\
> >   		.imm   = BPF_ADD })
> > +/* Atomic memory add with fetch, src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
> > +
> > +#define BPF_ATOMIC_FETCH_ADD(SIZE, DST, SRC, OFF)		\
> > +	((struct bpf_insn) {					\
> > +		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
> > +		.dst_reg = DST,					\
> > +		.src_reg = SRC,					\
> > +		.off   = OFF,					\
> > +		.imm   = BPF_ADD | BPF_FETCH })
> 
> Not sure whether it is a good idea or not to fold this into BPF_ATOMIC
> macro. At least you can define BPF_ATOMIC macro and
> #define BPF_ATOMIC_FETCH_ADD(SIZE, DST, SRC, OFF)		\
>     BPF_ATOMIC(SIZE, DST, SRC, OFF, BPF_ADD | BPF_FETCH)
> 
> to avoid too many code duplications?

Oops.. I intended to totally get rid these and folded them into
BPF_ATOMIC{64,32}! OK, let's combine all of them into a single macro.
It will have to be called something slightly awkward like
BPF_ATOMIC_INSN because BPF_ATOMIC is the name of the BPF_OP.

> 
> > +
> >   /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
> >   #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
> > diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> > index 98161e2d389f..d5389119291e 100644
> > --- a/tools/include/uapi/linux/bpf.h
> > +++ b/tools/include/uapi/linux/bpf.h
> > @@ -44,6 +44,9 @@
> >   #define BPF_CALL	0x80	/* function call */
> >   #define BPF_EXIT	0x90	/* function return */
> > +/* atomic op type fields (stored in immediate) */
> > +#define BPF_FETCH	0x01	/* fetch previous value into src reg */
> > +
> >   /* Register numbers */
> >   enum {
> >   	BPF_REG_0 = 0,
> >
Brendan Jackman Dec. 8, 2020, 9:59 a.m. UTC | #4
On Mon, Dec 07, 2020 at 09:31:40PM -0800, John Fastabend wrote:
> Brendan Jackman wrote:
> > The BPF_FETCH field can be set in bpf_insn.imm, for BPF_ATOMIC
> > instructions, in order to have the previous value of the
> > atomically-modified memory location loaded into the src register
> > after an atomic op is carried out.
> > 
> > Suggested-by: Yonghong Song <yhs@fb.com>
> > Signed-off-by: Brendan Jackman <jackmanb@google.com>
> > ---
> 
> I like Yonghong suggestion 
> 
>  #define BPF_ATOMIC_FETCH_ADD(SIZE, DST, SRC, OFF)               \
>      BPF_ATOMIC(SIZE, DST, SRC, OFF, BPF_ADD | BPF_FETCH)
> 
> otherwise LGTM. One observation to consider below.
> 
> Acked-by: John Fastabend <john.fastabend@gmail.com>
> 
> >  arch/x86/net/bpf_jit_comp.c    |  4 ++++
> >  include/linux/filter.h         |  1 +
> >  include/uapi/linux/bpf.h       |  3 +++
> >  kernel/bpf/core.c              | 13 +++++++++++++
> >  kernel/bpf/disasm.c            |  7 +++++++
> >  kernel/bpf/verifier.c          | 33 ++++++++++++++++++++++++---------
> >  tools/include/linux/filter.h   | 11 +++++++++++
> >  tools/include/uapi/linux/bpf.h |  3 +++
> >  8 files changed, 66 insertions(+), 9 deletions(-)
> 
> [...]
> 
> > @@ -3652,8 +3656,20 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
> >  		return err;
> >  
> >  	/* check whether we can write into the same memory */
> > -	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
> > -				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
> > +	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
> > +			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
> > +	if (err)
> > +		return err;
> > +
> > +	if (!(insn->imm & BPF_FETCH))
> > +		return 0;
> > +
> > +	/* check and record load of old value into src reg  */
> > +	err = check_reg_arg(env, insn->src_reg, DST_OP);
> 
> This will mark the reg unknown. I think this is fine here. Might be nice
> to carry bounds through though if possible

Ah, I hadn't thought of this. I think if I move this check_reg_arg to be
before the first check_mem_access, and then (when BPF_FETCH) set the
val_regno arg to load_reg, then the bounds from memory would get
propagated back to the register:

if (insn->imm & BPF_FETCH) {
	if (insn->imm == BPF_CMPXCHG)
		load_reg = BPF_REG_0;
	else
		load_reg = insn->src_reg;
	err = check_reg_arg(env, load_reg, DST_OP);
	if (err)
		return err;
} else {
	load_reg = -1;
}
/* check wether we can read the memory */
err = check_mem_access(env, insn_index, insn->dst_reg, insn->off
		       BPF_SIZE(insn->code), BPF_READ,
		       load_reg, // <--
		       true);

Is that the kind of thing you had in mind?

> > +	if (err)
> > +		return err;
> > +
> > +	return 0;
> >  }
> >
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index b1829a534da1..eea7d8b0bb12 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -811,6 +811,10 @@  static int emit_atomic(u8 **pprog, u8 atomic_op,
 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
 		EMIT1(simple_alu_opcodes[atomic_op]);
 		break;
+	case BPF_ADD | BPF_FETCH:
+		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
+		EMIT2(0x0F, 0xC1);
+		break;
 	default:
 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
 		return -EFAULT;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 45be19408f68..b5258bca10d2 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -264,6 +264,7 @@  static inline bool insn_is_zext(const struct bpf_insn *insn)
  * Atomic operations:
  *
  *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
+ *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
  */
 
 #define BPF_ATOMIC64(OP, DST, SRC, OFF)				\
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 98161e2d389f..d5389119291e 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -44,6 +44,9 @@ 
 #define BPF_CALL	0x80	/* function call */
 #define BPF_EXIT	0x90	/* function return */
 
+/* atomic op type fields (stored in immediate) */
+#define BPF_FETCH	0x01	/* fetch previous value into src reg */
+
 /* Register numbers */
 enum {
 	BPF_REG_0 = 0,
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3abc6b250b18..61e93eb7d363 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1624,16 +1624,29 @@  static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
 			/* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
 			atomic_add((u32) SRC, (atomic_t *)(unsigned long)
 				   (DST + insn->off));
+			break;
+		case BPF_ADD | BPF_FETCH:
+			SRC = (u32) atomic_fetch_add(
+				(u32) SRC,
+				(atomic_t *)(unsigned long) (DST + insn->off));
+			break;
 		default:
 			goto default_label;
 		}
 		CONT;
+
 	STX_ATOMIC_DW:
 		switch (IMM) {
 		case BPF_ADD:
 			/* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
 			atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
 				     (DST + insn->off));
+			break;
+		case BPF_ADD | BPF_FETCH:
+			SRC = (u64) atomic64_fetch_add(
+				(u64) SRC,
+				(atomic64_t *)(s64) (DST + insn->off));
+			break;
 		default:
 			goto default_label;
 		}
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index 37c8d6e9b4cc..d2e20f6d0516 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -160,6 +160,13 @@  void print_bpf_insn(const struct bpf_insn_cbs *cbs,
 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
 				insn->dst_reg, insn->off,
 				insn->src_reg);
+		} else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+			   insn->imm == (BPF_ADD | BPF_FETCH)) {
+			verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_add((%s *)(r%d %+d), r%d)\n",
+				insn->code, insn->src_reg,
+				BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
+				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+				insn->dst_reg, insn->off, insn->src_reg);
 		} else {
 			verbose(cbs->private_data, "BUG_%02x\n", insn->code);
 		}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 745c53df0485..f8c4e809297d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3610,7 +3610,11 @@  static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
 {
 	int err;
 
-	if (insn->imm != BPF_ADD) {
+	switch (insn->imm) {
+	case BPF_ADD:
+	case BPF_ADD | BPF_FETCH:
+		break;
+	default:
 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
 		return -EINVAL;
 	}
@@ -3652,8 +3656,20 @@  static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
 		return err;
 
 	/* check whether we can write into the same memory */
-	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
+	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
+	if (err)
+		return err;
+
+	if (!(insn->imm & BPF_FETCH))
+		return 0;
+
+	/* check and record load of old value into src reg  */
+	err = check_reg_arg(env, insn->src_reg, DST_OP);
+	if (err)
+		return err;
+
+	return 0;
 }
 
 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
@@ -9527,12 +9543,6 @@  static int do_check(struct bpf_verifier_env *env)
 		} else if (class == BPF_STX) {
 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
 
-			if (((BPF_MODE(insn->code) != BPF_MEM &&
-			      BPF_MODE(insn->code) != BPF_ATOMIC) || insn->imm != 0)) {
-				verbose(env, "BPF_STX uses reserved fields\n");
-				return -EINVAL;
-			}
-
 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
 				err = check_atomic(env, env->insn_idx, insn);
 				if (err)
@@ -9541,6 +9551,11 @@  static int do_check(struct bpf_verifier_env *env)
 				continue;
 			}
 
+			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
+				verbose(env, "BPF_STX uses reserved fields\n");
+				return -EINVAL;
+			}
+
 			/* check src1 operand */
 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
 			if (err)
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index f345f12c1ff8..4e0100ba52c2 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -173,6 +173,7 @@ 
  * Atomic operations:
  *
  *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
+ *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
  */
 
 #define BPF_ATOMIC64(OP, DST, SRC, OFF)				\
@@ -201,6 +202,16 @@ 
 		.off   = OFF,					\
 		.imm   = BPF_ADD })
 
+/* Atomic memory add with fetch, src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
+
+#define BPF_ATOMIC_FETCH_ADD(SIZE, DST, SRC, OFF)		\
+	((struct bpf_insn) {					\
+		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
+		.dst_reg = DST,					\
+		.src_reg = SRC,					\
+		.off   = OFF,					\
+		.imm   = BPF_ADD | BPF_FETCH })
+
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 98161e2d389f..d5389119291e 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -44,6 +44,9 @@ 
 #define BPF_CALL	0x80	/* function call */
 #define BPF_EXIT	0x90	/* function return */
 
+/* atomic op type fields (stored in immediate) */
+#define BPF_FETCH	0x01	/* fetch previous value into src reg */
+
 /* Register numbers */
 enum {
 	BPF_REG_0 = 0,