@@ -23,12 +23,12 @@ struct ctl_table;
struct ctl_table_header;
struct task_struct;
-unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
- const struct bpf_insn *insn);
-unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
- const struct bpf_insn *insn);
-unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
- const struct bpf_insn *insn);
+u64 __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn);
+u64 __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn);
+u64 __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn);
#ifdef CONFIG_CGROUP_BPF
@@ -59,8 +59,8 @@ typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
-typedef unsigned int (*bpf_func_t)(const void *,
- const struct bpf_insn *);
+typedef u64 (*bpf_func_t)(const void *,
+ const struct bpf_insn *);
struct bpf_iter_seq_info {
const struct seq_operations *seq_ops;
bpf_iter_init_seq_priv_t init_seq_private;
@@ -1004,7 +1004,7 @@ struct bpf_dispatcher {
struct bpf_ksym ksym;
};
-static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
+static __always_inline __nocfi u64 bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
bpf_func_t bpf_func)
@@ -1049,7 +1049,7 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
#define DEFINE_BPF_DISPATCHER(name) \
notrace BPF_DISPATCHER_ATTRIBUTES \
- noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
+ noinline __nocfi u64 bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func) \
@@ -1062,7 +1062,7 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
#define DECLARE_BPF_DISPATCHER(name) \
- unsigned int bpf_dispatcher_##name##_func( \
+ u64 bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func); \
@@ -1265,7 +1265,7 @@ struct bpf_prog {
u8 tag[BPF_TAG_SIZE];
struct bpf_prog_stats __percpu *stats;
int __percpu *active;
- unsigned int (*bpf_func)(const void *ctx,
+ u64 (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
@@ -1619,9 +1619,9 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
/* BPF program asks to set CN on the packet. */
#define BPF_RET_SET_CN (1 << 0)
-typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
+typedef u64 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
-static __always_inline u32
+static __always_inline u64
bpf_prog_run_array(const struct bpf_prog_array *array,
const void *ctx, bpf_prog_run_fn run_prog)
{
@@ -1629,7 +1629,7 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
const struct bpf_prog *prog;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
- u32 ret = 1;
+ u64 ret = 1;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
@@ -1659,7 +1659,7 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
* section and disable preemption for that program alone, so it can access
* rcu-protected dynamically sized maps.
*/
-static __always_inline u32
+static __always_inline u64
bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog)
{
@@ -1668,7 +1668,7 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
- u32 ret = 1;
+ u64 ret = 1;
might_fault();
@@ -573,16 +573,16 @@ extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct
enum bpf_access_type atype, u32 *next_btf_id,
enum bpf_type_flag *flag);
-typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
- const struct bpf_insn *insnsi,
- unsigned int (*bpf_func)(const void *,
- const struct bpf_insn *));
+typedef u64 (*bpf_dispatcher_fn)(const void *ctx,
+ const struct bpf_insn *insnsi,
+ u64 (*bpf_func)(const void *,
+ const struct bpf_insn *));
-static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+static __always_inline u64 __bpf_prog_run(const struct bpf_prog *prog,
const void *ctx,
bpf_dispatcher_fn dfunc)
{
- u32 ret;
+ u64 ret;
cant_migrate();
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
@@ -602,7 +602,7 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
return ret;
}
-static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+static __always_inline u64 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
{
return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
}
@@ -615,10 +615,10 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
* invocation of a BPF program does not require reentrancy protection
* against a BPF program which is invoked from a preempting task.
*/
-static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+static inline u64 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
const void *ctx)
{
- u32 ret;
+ u64 ret;
migrate_disable();
ret = bpf_prog_run(prog, ctx);
@@ -714,13 +714,13 @@ static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
}
/* Must be invoked with migration disabled */
-static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
+static inline u64 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
const void *ctx)
{
const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
- u32 res;
+ u64 res;
if (unlikely(prog->cb_access)) {
memcpy(cb_saved, cb_data, sizeof(cb_saved));
@@ -735,10 +735,10 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res;
}
-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+static inline u64 bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
- u32 res;
+ u64 res;
migrate_disable();
res = __bpf_prog_run_save_cb(prog, skb);
@@ -746,11 +746,11 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res;
}
-static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
+static inline u64 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
- u32 res;
+ u64 res;
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
@@ -63,8 +63,8 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
return run_ctx.retval;
}
-unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
- const struct bpf_insn *insn)
+u64 __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn)
{
const struct bpf_prog *shim_prog;
struct sock *sk;
@@ -85,8 +85,8 @@ unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
return ret;
}
-unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
- const struct bpf_insn *insn)
+u64 __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn)
{
const struct bpf_prog *shim_prog;
struct socket *sock;
@@ -107,8 +107,8 @@ unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
return ret;
}
-unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
- const struct bpf_insn *insn)
+u64 __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn)
{
const struct bpf_prog *shim_prog;
struct cgroup *cgrp;
@@ -2004,7 +2004,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
#define DEFINE_BPF_PROG_RUN(stack_size) \
-static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
+static u64 PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
{ \
u64 stack[stack_size / sizeof(u64)]; \
u64 regs[MAX_BPF_EXT_REG] = {}; \
@@ -2048,8 +2048,8 @@ EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
-static unsigned int (*interpreters[])(const void *ctx,
- const struct bpf_insn *insn) = {
+static u64 (*interpreters[])(const void *ctx,
+ const struct bpf_insn *insn) = {
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
@@ -2074,8 +2074,8 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
}
#else
-static unsigned int __bpf_prog_ret0_warn(const void *ctx,
- const struct bpf_insn *insn)
+static u64 __bpf_prog_ret0_warn(const void *ctx,
+ const struct bpf_insn *insn)
{
/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
* is not working properly, so warn about it!
@@ -2210,8 +2210,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
}
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
-static unsigned int __bpf_prog_ret1(const void *ctx,
- const struct bpf_insn *insn)
+static u64 __bpf_prog_ret1(const void *ctx,
+ const struct bpf_insn *insn)
{
return 1;
}
@@ -246,8 +246,8 @@ static int bpf_prog_offload_translate(struct bpf_prog *prog)
return ret;
}
-static unsigned int bpf_prog_warn_on_exec(const void *ctx,
- const struct bpf_insn *insn)
+static u64 bpf_prog_warn_on_exec(const void *ctx,
+ const struct bpf_insn *insn)
{
WARN(1, "attempt to execute device eBPF program on the host!");
return 0;
@@ -1444,8 +1444,11 @@ static unsigned int fanout_demux_bpf(struct packet_fanout *f,
rcu_read_lock();
prog = rcu_dereference(f->bpf_prog);
- if (prog)
- ret = bpf_prog_run_clear_cb(prog, skb) % num;
+ if (prog) {
+ ret = bpf_prog_run_clear_cb(prog, skb);
+ /* For some architectures, we need to do modulus in 32-bit width */
+ ret %= num;
+ }
rcu_read_unlock();
return ret;