Message ID | 20220716125108.1011206-2-pulehui@huawei.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | cleanup for data casting | expand |
On 7/16/22 5:51 AM, Pu Lehui wrote: > Memory addresses are conceptually unsigned, (unsigned long) casting > makes more sense, so let's make a change for conceptual uniformity > and there is no functional change. > > Signed-off-by: Pu Lehui <pulehui@huawei.com> Only a few in bpf system, agree that we can do the change so in the future we can recommend 'unsigned long' vs. 'long' casting based on existing code base. Acked-by: Yonghong Song <yhs@fb.com> > --- > kernel/bpf/core.c | 2 +- > kernel/bpf/helpers.c | 6 +++--- > kernel/bpf/syscall.c | 2 +- > kernel/bpf/verifier.c | 6 +++--- > 4 files changed, 8 insertions(+), 8 deletions(-) > > diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c > index cfb8a50a9f12..e14b399dd408 100644 > --- a/kernel/bpf/core.c > +++ b/kernel/bpf/core.c > @@ -1954,7 +1954,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) > CONT; \ > LDX_PROBE_MEM_##SIZEOP: \ > bpf_probe_read_kernel(&DST, sizeof(SIZE), \ > - (const void *)(long) (SRC + insn->off)); \ > + (const void *)(unsigned long) (SRC + insn->off)); \ > DST = *((SIZE *)&DST); \ > CONT; > > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c > index a1c84d256f83..92c01dd007a6 100644 > --- a/kernel/bpf/helpers.c > +++ b/kernel/bpf/helpers.c > @@ -903,7 +903,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, > err = snprintf(tmp_buf, > (tmp_buf_end - tmp_buf), > "%pB", > - (void *)(long)raw_args[num_spec]); > + (void *)(unsigned long)raw_args[num_spec]); > tmp_buf += (err + 1); > } > > @@ -929,7 +929,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, > goto out; > } > > - unsafe_ptr = (char *)(long)raw_args[num_spec]; > + unsafe_ptr = (char *)(unsigned long)raw_args[num_spec]; > err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, > sizeof_cur_ip); > if (err < 0) > @@ -966,7 +966,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, > goto out; > } > > - unsafe_ptr = (char *)(long)raw_args[num_spec]; > + unsafe_ptr = (char *)(unsigned long)raw_args[num_spec]; > err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, > fmt_ptype, > tmp_buf_end - tmp_buf); > diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c > index 83c7136c5788..d1380473e620 100644 > --- a/kernel/bpf/syscall.c > +++ b/kernel/bpf/syscall.c > @@ -5108,7 +5108,7 @@ BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) > bpf_prog_put(prog); > return -EBUSY; > } > - attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); > + attr->test.retval = bpf_prog_run(prog, (void *) (unsigned long) attr->test.ctx_in); > __bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx); > bpf_prog_put(prog); > return 0; > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index c59c3df0fea6..d91f17598833 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -4445,7 +4445,7 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) > err = map->ops->map_direct_value_addr(map, &addr, off); > if (err) > return err; > - ptr = (void *)(long)addr + off; > + ptr = (void *)(unsigned long)addr + off; > > switch (size) { > case sizeof(u8): > @@ -6113,7 +6113,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, > return err; > } > > - str_ptr = (char *)(long)(map_addr); > + str_ptr = (char *)(unsigned long)(map_addr); > if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { > verbose(env, "string is not zero-terminated\n"); > return -EINVAL; > @@ -7099,7 +7099,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, > verbose(env, "verifier bug\n"); > return -EFAULT; > } > - fmt = (char *)(long)fmt_addr + fmt_map_off; > + fmt = (char *)(unsigned long)fmt_addr + fmt_map_off; > > /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we > * can focus on validating the format specifiers.
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index cfb8a50a9f12..e14b399dd408 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1954,7 +1954,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) CONT; \ LDX_PROBE_MEM_##SIZEOP: \ bpf_probe_read_kernel(&DST, sizeof(SIZE), \ - (const void *)(long) (SRC + insn->off)); \ + (const void *)(unsigned long) (SRC + insn->off)); \ DST = *((SIZE *)&DST); \ CONT; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index a1c84d256f83..92c01dd007a6 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -903,7 +903,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, err = snprintf(tmp_buf, (tmp_buf_end - tmp_buf), "%pB", - (void *)(long)raw_args[num_spec]); + (void *)(unsigned long)raw_args[num_spec]); tmp_buf += (err + 1); } @@ -929,7 +929,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, goto out; } - unsafe_ptr = (char *)(long)raw_args[num_spec]; + unsafe_ptr = (char *)(unsigned long)raw_args[num_spec]; err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, sizeof_cur_ip); if (err < 0) @@ -966,7 +966,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, goto out; } - unsafe_ptr = (char *)(long)raw_args[num_spec]; + unsafe_ptr = (char *)(unsigned long)raw_args[num_spec]; err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, fmt_ptype, tmp_buf_end - tmp_buf); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 83c7136c5788..d1380473e620 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -5108,7 +5108,7 @@ BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) bpf_prog_put(prog); return -EBUSY; } - attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); + attr->test.retval = bpf_prog_run(prog, (void *) (unsigned long) attr->test.ctx_in); __bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx); bpf_prog_put(prog); return 0; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c59c3df0fea6..d91f17598833 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4445,7 +4445,7 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) err = map->ops->map_direct_value_addr(map, &addr, off); if (err) return err; - ptr = (void *)(long)addr + off; + ptr = (void *)(unsigned long)addr + off; switch (size) { case sizeof(u8): @@ -6113,7 +6113,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, return err; } - str_ptr = (char *)(long)(map_addr); + str_ptr = (char *)(unsigned long)(map_addr); if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { verbose(env, "string is not zero-terminated\n"); return -EINVAL; @@ -7099,7 +7099,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, verbose(env, "verifier bug\n"); return -EFAULT; } - fmt = (char *)(long)fmt_addr + fmt_map_off; + fmt = (char *)(unsigned long)fmt_addr + fmt_map_off; /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we * can focus on validating the format specifiers.
Memory addresses are conceptually unsigned, (unsigned long) casting makes more sense, so let's make a change for conceptual uniformity and there is no functional change. Signed-off-by: Pu Lehui <pulehui@huawei.com> --- kernel/bpf/core.c | 2 +- kernel/bpf/helpers.c | 6 +++--- kernel/bpf/syscall.c | 2 +- kernel/bpf/verifier.c | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-)