diff mbox series

[bpf] bpf, arm64: calculate offset as byte-offset for bpf line info

Message ID 20220104014236.1512639-1-houtao1@huawei.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series [bpf] bpf, arm64: calculate offset as byte-offset for bpf line info | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf
netdev/fixes_present success Fixes tag present in non-next series
netdev/subject_prefix success Link
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 3 maintainers not CCed: kpsingh@kernel.org john.fastabend@gmail.com songliubraving@fb.com
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 68 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-PR fail PR summary
bpf/vmtest-bpf fail VM_Test

Commit Message

Hou Tao Jan. 4, 2022, 1:42 a.m. UTC
The bpf line info for arm64 is broken due to two reasons:
(1) insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is
    calculated in instruction granularity instead of bytes
    granularity.
(2) insn_to_jit_off only considers the body itself and ignores
    prologue before the body.

So fix it by calculating offset as byte-offset and do build_prologue()
first in the first JIT pass.

Fixes: 37ab566c178d ("bpf: arm64: Enable arm64 jit to provide bpf_line_info")
Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 arch/arm64/net/bpf_jit_comp.c | 27 +++++++++++++++++----------
 1 file changed, 17 insertions(+), 10 deletions(-)

Comments

Daniel Borkmann Jan. 6, 2022, 10 p.m. UTC | #1
On 1/4/22 2:42 AM, Hou Tao wrote:
> The bpf line info for arm64 is broken due to two reasons:
> (1) insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is
>      calculated in instruction granularity instead of bytes
>      granularity.
> (2) insn_to_jit_off only considers the body itself and ignores
>      prologue before the body.
> 
> So fix it by calculating offset as byte-offset and do build_prologue()
> first in the first JIT pass.
> 
> Fixes: 37ab566c178d ("bpf: arm64: Enable arm64 jit to provide bpf_line_info")
> Signed-off-by: Hou Tao <houtao1@huawei.com>
> ---
>   arch/arm64/net/bpf_jit_comp.c | 27 +++++++++++++++++----------
>   1 file changed, 17 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 148ca51325bb..d7a6d4b523c9 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -24,6 +24,8 @@
>   
>   #include "bpf_jit.h"
>   
> +#define INSN_SZ (sizeof(u32))
> +
>   #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
>   #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
>   #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
> @@ -154,10 +156,11 @@ static inline int bpf2a64_offset(int bpf_insn, int off,
>   	bpf_insn++;
>   	/*
>   	 * Whereas arm64 branch instructions encode the offset
> -	 * from the branch itself, so we must subtract 1 from the
> +	 * from the branch itself, so we must subtract 4 from the
>   	 * instruction offset.
>   	 */
> -	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
> +	return (ctx->offset[bpf_insn + off] -
> +		(ctx->offset[bpf_insn] - INSN_SZ)) / INSN_SZ;
>   }
>   
>   static void jit_fill_hole(void *area, unsigned int size)
> @@ -955,13 +958,14 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
>   		const struct bpf_insn *insn = &prog->insnsi[i];
>   		int ret;
>   
> +		/* BPF line info needs byte-offset instead of insn-offset */
>   		if (ctx->image == NULL)
> -			ctx->offset[i] = ctx->idx;
> +			ctx->offset[i] = ctx->idx * INSN_SZ;
>   		ret = build_insn(insn, ctx, extra_pass);
>   		if (ret > 0) {
>   			i++;
>   			if (ctx->image == NULL)
> -				ctx->offset[i] = ctx->idx;
> +				ctx->offset[i] = ctx->idx * INSN_SZ;
>   			continue;
>   		}
>   		if (ret)
> @@ -973,7 +977,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
>   	 * instruction (end of program)
>   	 */
>   	if (ctx->image == NULL)
> -		ctx->offset[i] = ctx->idx;
> +		ctx->offset[i] = ctx->idx * INSN_SZ;
>   
>   	return 0;
>   }
> @@ -1058,15 +1062,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>   		goto out_off;
>   	}
>   
> -	/* 1. Initial fake pass to compute ctx->idx. */
> -
> -	/* Fake pass to fill in ctx->offset. */
> -	if (build_body(&ctx, extra_pass)) {
> +	/*
> +	 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
> +	 *
> +	 * BPF line info needs ctx->offset[i] to be the byte offset
> +	 * of instruction[i] in jited image, so build prologue first.
> +	 */
> +	if (build_prologue(&ctx, was_classic)) {
>   		prog = orig_prog;
>   		goto out_off;
>   	}
>   
> -	if (build_prologue(&ctx, was_classic)) {
> +	if (build_body(&ctx, extra_pass)) {
>   		prog = orig_prog;
>   		goto out_off;

Could you split this into two logical patches? Both 1/2 seem independent
of each other and should have been rather 2 patches instead of 1.

Did you check if also other JITs could be affected?

Thanks,
Daniel
Hou Tao Jan. 19, 2022, 2:45 p.m. UTC | #2
Hi,

On 1/7/2022 6:00 AM, Daniel Borkmann wrote:
> On 1/4/22 2:42 AM, Hou Tao wrote:
>> The bpf line info for arm64 is broken due to two reasons:
>> (1) insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is
>>      calculated in instruction granularity instead of bytes
>>      granularity.
>> (2) insn_to_jit_off only considers the body itself and ignores
>>      prologue before the body.
>>
>> So fix it by calculating offset as byte-offset and do build_prologue()
>> first in the first JIT pass.
>>
[snip]
>> -    /* Fake pass to fill in ctx->offset. */
>> -    if (build_body(&ctx, extra_pass)) {
>> +    /*
>> +     * 1. Initial fake pass to compute ctx->idx and ctx->offset.
>> +     *
>> +     * BPF line info needs ctx->offset[i] to be the byte offset
>> +     * of instruction[i] in jited image, so build prologue first.
>> +     */
>> +    if (build_prologue(&ctx, was_classic)) {
>>           prog = orig_prog;
>>           goto out_off;
>>       }
>>   -    if (build_prologue(&ctx, was_classic)) {
>> +    if (build_body(&ctx, extra_pass)) {
>>           prog = orig_prog;
>>           goto out_off;
>
> Could you split this into two logical patches? Both 1/2 seem independent
> of each other and should have been rather 2 patches instead of 1.
>
Sorry for the later reply. Splitting into two patches make sense for me. Will do
it in v2.
> Did you check if also other JITs could be affected?
It seems sparc also doesn't represent offset by bytes and I can check other
arches as well,
but it is sad that I don't have the environments for these arches.

> Thanks,
> Daniel
> .
diff mbox series

Patch

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 148ca51325bb..d7a6d4b523c9 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -24,6 +24,8 @@ 
 
 #include "bpf_jit.h"
 
+#define INSN_SZ (sizeof(u32))
+
 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
@@ -154,10 +156,11 @@  static inline int bpf2a64_offset(int bpf_insn, int off,
 	bpf_insn++;
 	/*
 	 * Whereas arm64 branch instructions encode the offset
-	 * from the branch itself, so we must subtract 1 from the
+	 * from the branch itself, so we must subtract 4 from the
 	 * instruction offset.
 	 */
-	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
+	return (ctx->offset[bpf_insn + off] -
+		(ctx->offset[bpf_insn] - INSN_SZ)) / INSN_SZ;
 }
 
 static void jit_fill_hole(void *area, unsigned int size)
@@ -955,13 +958,14 @@  static int build_body(struct jit_ctx *ctx, bool extra_pass)
 		const struct bpf_insn *insn = &prog->insnsi[i];
 		int ret;
 
+		/* BPF line info needs byte-offset instead of insn-offset */
 		if (ctx->image == NULL)
-			ctx->offset[i] = ctx->idx;
+			ctx->offset[i] = ctx->idx * INSN_SZ;
 		ret = build_insn(insn, ctx, extra_pass);
 		if (ret > 0) {
 			i++;
 			if (ctx->image == NULL)
-				ctx->offset[i] = ctx->idx;
+				ctx->offset[i] = ctx->idx * INSN_SZ;
 			continue;
 		}
 		if (ret)
@@ -973,7 +977,7 @@  static int build_body(struct jit_ctx *ctx, bool extra_pass)
 	 * instruction (end of program)
 	 */
 	if (ctx->image == NULL)
-		ctx->offset[i] = ctx->idx;
+		ctx->offset[i] = ctx->idx * INSN_SZ;
 
 	return 0;
 }
@@ -1058,15 +1062,18 @@  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		goto out_off;
 	}
 
-	/* 1. Initial fake pass to compute ctx->idx. */
-
-	/* Fake pass to fill in ctx->offset. */
-	if (build_body(&ctx, extra_pass)) {
+	/*
+	 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
+	 *
+	 * BPF line info needs ctx->offset[i] to be the byte offset
+	 * of instruction[i] in jited image, so build prologue first.
+	 */
+	if (build_prologue(&ctx, was_classic)) {
 		prog = orig_prog;
 		goto out_off;
 	}
 
-	if (build_prologue(&ctx, was_classic)) {
+	if (build_body(&ctx, extra_pass)) {
 		prog = orig_prog;
 		goto out_off;
 	}