diff mbox series

[bpf-next,v2,2/2] bpf, arm64: calculate offset as byte-offset for bpf line info

Message ID 20220125105707.292449-3-houtao1@huawei.com (mailing list archive)
State New, archived
Headers show
Series bpf, arm64: fix bpf line info | expand

Commit Message

Hou Tao Jan. 25, 2022, 10:57 a.m. UTC
insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is calculated
in instruction granularity instead of bytes granularity, but bpf
line info requires byte offset, so fixing it by calculating offset
as byte-offset.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 arch/arm64/net/bpf_jit_comp.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

Comments

Martin KaFai Lau Feb. 2, 2022, 6:45 p.m. UTC | #1
On Tue, Jan 25, 2022 at 06:57:07PM +0800, Hou Tao wrote:
> insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is calculated
> in instruction granularity instead of bytes granularity, but bpf
> line info requires byte offset, so fixing it by calculating offset
> as byte-offset.
> 
> Signed-off-by: Hou Tao <houtao1@huawei.com>
> ---
>  arch/arm64/net/bpf_jit_comp.c | 13 ++++++++-----
>  1 file changed, 8 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 6a83f3070985..7b94e0c5e134 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -152,10 +152,12 @@ static inline int bpf2a64_offset(int bpf_insn, int off,
>  	bpf_insn++;
>  	/*
>  	 * Whereas arm64 branch instructions encode the offset
> -	 * from the branch itself, so we must subtract 1 from the
> +	 * from the branch itself, so we must subtract 4 from the
>  	 * instruction offset.
>  	 */
> -	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
> +	return (ctx->offset[bpf_insn + off] -
> +		(ctx->offset[bpf_insn] - AARCH64_INSN_SIZE)) /
> +		AARCH64_INSN_SIZE;
Is it another bug fix? It does not seem to be related
to the change described in the commit message.

>  }
>  
>  static void jit_fill_hole(void *area, unsigned int size)
> @@ -946,13 +948,14 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
>  		const struct bpf_insn *insn = &prog->insnsi[i];
>  		int ret;
>  
> +		/* BPF line info needs byte-offset instead of insn-offset */
>  		if (ctx->image == NULL)
> -			ctx->offset[i] = ctx->idx;
> +			ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
>  		ret = build_insn(insn, ctx, extra_pass);
>  		if (ret > 0) {
>  			i++;
>  			if (ctx->image == NULL)
> -				ctx->offset[i] = ctx->idx;
> +				ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
>  			continue;
>  		}
>  		if (ret)
> @@ -964,7 +967,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
>  	 * instruction (end of program)
>  	 */
>  	if (ctx->image == NULL)
> -		ctx->offset[i] = ctx->idx;
> +		ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
Changes in this function makes sense.
Hou Tao Feb. 3, 2022, 4:54 a.m. UTC | #2
Hi,

> 
> On Tue, Jan 25, 2022 at 06:57:07PM +0800, Hou Tao wrote:
> > insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is calculated
> > in instruction granularity instead of bytes granularity, but bpf
> > line info requires byte offset, so fixing it by calculating offset
> > as byte-offset.
> > 
> > Signed-off-by: Hou Tao <houtao1@huawei.com>
> > ---
> >  arch/arm64/net/bpf_jit_comp.c | 13 ++++++++-----
> >  1 file changed, 8 insertions(+), 5 deletions(-)
> > 
> > diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> > index 6a83f3070985..7b94e0c5e134 100644
> > --- a/arch/arm64/net/bpf_jit_comp.c
> > +++ b/arch/arm64/net/bpf_jit_comp.c
> > @@ -152,10 +152,12 @@ static inline int bpf2a64_offset(int bpf_insn, int off,
> >  	bpf_insn++;
> >  	/*
> >  	 * Whereas arm64 branch instructions encode the offset
> > -	 * from the branch itself, so we must subtract 1 from the
> > +	 * from the branch itself, so we must subtract 4 from the
> >  	 * instruction offset.
> >  	 */
> > -	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
> > +	return (ctx->offset[bpf_insn + off] -
> > +		(ctx->offset[bpf_insn] - AARCH64_INSN_SIZE)) /
> > +		AARCH64_INSN_SIZE;
> Is it another bug fix? It does not seem to be related
> to the change described in the commit message.
>
No, because ctx->offset is byte-offset now, but bpf2a64_offset()
needs to return instruction offset instead of byte offset, so the
calculation needs update. But i should not update the comment because
it is right. Will post v3 to fix it.

> >  }
> >  
> >  static void jit_fill_hole(void *area, unsigned int size)
> > @@ -946,13 +948,14 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
> >  		const struct bpf_insn *insn = &prog->insnsi[i];
> >  		int ret;
> >  
> > +		/* BPF line info needs byte-offset instead of insn-offset */
> >  		if (ctx->image == NULL)
> > -			ctx->offset[i] = ctx->idx;
> > +			ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
> >  		ret = build_insn(insn, ctx, extra_pass);
> >  		if (ret > 0) {
> >  			i++;
> >  			if (ctx->image == NULL)
> > -				ctx->offset[i] = ctx->idx;
> > +				ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
> >  			continue;
> >  		}
> >  		if (ret)
> > @@ -964,7 +967,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
> >  	 * instruction (end of program)
> >  	 */
> >  	if (ctx->image == NULL)
> > -		ctx->offset[i] = ctx->idx;
> > +		ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
> Changes in this function makes sense.
>
diff mbox series

Patch

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 6a83f3070985..7b94e0c5e134 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -152,10 +152,12 @@  static inline int bpf2a64_offset(int bpf_insn, int off,
 	bpf_insn++;
 	/*
 	 * Whereas arm64 branch instructions encode the offset
-	 * from the branch itself, so we must subtract 1 from the
+	 * from the branch itself, so we must subtract 4 from the
 	 * instruction offset.
 	 */
-	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
+	return (ctx->offset[bpf_insn + off] -
+		(ctx->offset[bpf_insn] - AARCH64_INSN_SIZE)) /
+		AARCH64_INSN_SIZE;
 }
 
 static void jit_fill_hole(void *area, unsigned int size)
@@ -946,13 +948,14 @@  static int build_body(struct jit_ctx *ctx, bool extra_pass)
 		const struct bpf_insn *insn = &prog->insnsi[i];
 		int ret;
 
+		/* BPF line info needs byte-offset instead of insn-offset */
 		if (ctx->image == NULL)
-			ctx->offset[i] = ctx->idx;
+			ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
 		ret = build_insn(insn, ctx, extra_pass);
 		if (ret > 0) {
 			i++;
 			if (ctx->image == NULL)
-				ctx->offset[i] = ctx->idx;
+				ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
 			continue;
 		}
 		if (ret)
@@ -964,7 +967,7 @@  static int build_body(struct jit_ctx *ctx, bool extra_pass)
 	 * instruction (end of program)
 	 */
 	if (ctx->image == NULL)
-		ctx->offset[i] = ctx->idx;
+		ctx->offset[i] = ctx->idx * AARCH64_INSN_SIZE;
 
 	return 0;
 }