Message ID | 20250121012901.87763-4-kerneljasonxing@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net-timestamp: bpf extension to equip applications transparently | expand |
On 1/20/25 5:28 PM, Jason Xing wrote: > Applying the new member allow_tcp_access in the existing callbacks > where is_fullsock is set to 1 can help us stop UDP socket accessing > struct tcp_sock, or else it could be catastrophe leading to panic. > > For now, those existing callbacks are used only for TCP. I believe > in the short run, we will have timestamping UDP callbacks support. The commit message needs adjustment. UDP is not supported yet, so this change feels like it's unnecessary based on the commit message. However, even without UDP support, the new timestamping callbacks cannot directly write some fields because the sk lock is not held, so this change is needed for TCP timestamping support. To keep it simple, instead of distinguishing between read and write access, we disallow all read/write access to the tcp_sock through the older bpf_sock_ops ctx. The new timestamping callbacks can use newer helpers to read everything from a sk (e.g. bpf_core_cast), so nothing is lost. The "allow_tcp_access" flag is added to indicate that the callback site has a tcp_sock locked. Yes, it will make future UDP support easier because a udp_sock is not a tcp_sock to begin with. > > Signed-off-by: Jason Xing <kerneljasonxing@gmail.com> > --- > include/linux/filter.h | 1 + > include/net/tcp.h | 1 + > net/core/filter.c | 8 ++++---- > net/ipv4/tcp_input.c | 2 ++ > net/ipv4/tcp_output.c | 2 ++ > 5 files changed, 10 insertions(+), 4 deletions(-) > > diff --git a/include/linux/filter.h b/include/linux/filter.h > index a3ea46281595..1b1333a90b4a 100644 > --- a/include/linux/filter.h > +++ b/include/linux/filter.h > @@ -1508,6 +1508,7 @@ struct bpf_sock_ops_kern { > void *skb_data_end; > u8 op; > u8 is_fullsock; > + u8 allow_tcp_access; It is useful to add a comment here to explain the sockops callback has the tcp_sock locked when it is set. > u8 remaining_opt_len; > u64 temp; /* temp and everything after is not > * initialized to 0 before calling > diff --git a/include/net/tcp.h b/include/net/tcp.h > index 5b2b04835688..293047694710 100644 > --- a/include/net/tcp.h > +++ b/include/net/tcp.h > @@ -2649,6 +2649,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) > memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); > if (sk_fullsock(sk)) { > sock_ops.is_fullsock = 1; > + sock_ops.allow_tcp_access = 1; > sock_owned_by_me(sk); > } > > diff --git a/net/core/filter.c b/net/core/filter.c > index 8e2715b7ac8a..fdd305b4cfbb 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -10381,10 +10381,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, > } \ > *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ > struct bpf_sock_ops_kern, \ > - is_fullsock), \ > + allow_tcp_access), \ > fullsock_reg, si->src_reg, \ > offsetof(struct bpf_sock_ops_kern, \ > - is_fullsock)); \ > + allow_tcp_access)); \ > *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ > if (si->dst_reg == si->src_reg) \ > *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ > @@ -10469,10 +10469,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, > temp)); \ > *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ > struct bpf_sock_ops_kern, \ > - is_fullsock), \ > + allow_tcp_access), \ > reg, si->dst_reg, \ > offsetof(struct bpf_sock_ops_kern, \ > - is_fullsock)); \ > + allow_tcp_access)); \ > *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ > *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ > struct bpf_sock_ops_kern, sk),\ > diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c > index eb82e01da911..77185479ed5e 100644 > --- a/net/ipv4/tcp_input.c > +++ b/net/ipv4/tcp_input.c > @@ -169,6 +169,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb) > memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); > sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB; > sock_ops.is_fullsock = 1; > + sock_ops.allow_tcp_access = 1; > sock_ops.sk = sk; > bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb)); > > @@ -185,6 +186,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op, > memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); > sock_ops.op = bpf_op; > sock_ops.is_fullsock = 1; > + sock_ops.allow_tcp_access = 1; > sock_ops.sk = sk; > /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */ > if (skb) > diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c > index 0e5b9a654254..695749807c09 100644 > --- a/net/ipv4/tcp_output.c > +++ b/net/ipv4/tcp_output.c > @@ -522,6 +522,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, > sock_owned_by_me(sk); > > sock_ops.is_fullsock = 1; > + sock_ops.allow_tcp_access = 1; > sock_ops.sk = sk; > } > > @@ -567,6 +568,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, > sock_owned_by_me(sk); > > sock_ops.is_fullsock = 1; > + sock_ops.allow_tcp_access = 1; > sock_ops.sk = sk; > } >
On Sat, Jan 25, 2025 at 7:41 AM Martin KaFai Lau <martin.lau@linux.dev> wrote: > > On 1/20/25 5:28 PM, Jason Xing wrote: > > Applying the new member allow_tcp_access in the existing callbacks > > where is_fullsock is set to 1 can help us stop UDP socket accessing > > struct tcp_sock, or else it could be catastrophe leading to panic. > > > > For now, those existing callbacks are used only for TCP. I believe > > in the short run, we will have timestamping UDP callbacks support. > > The commit message needs adjustment. UDP is not supported yet, so this change > feels like it's unnecessary based on the commit message. However, even without > UDP support, the new timestamping callbacks cannot directly write some fields > because the sk lock is not held, so this change is needed for TCP timestamping Thanks and I will revise them. But I still want to say that the timestamping callbacks after this series are all under the protection of socket lock. > support. > > To keep it simple, instead of distinguishing between read and write access, we > disallow all read/write access to the tcp_sock through the older bpf_sock_ops > ctx. The new timestamping callbacks can use newer helpers to read everything > from a sk (e.g. bpf_core_cast), so nothing is lost. > > The "allow_tcp_access" flag is added to indicate that the callback site has a > tcp_sock locked. Yes, it will make future UDP support easier because a udp_sock > is not a tcp_sock to begin with. I will add them :) > > > > > Signed-off-by: Jason Xing <kerneljasonxing@gmail.com> > > --- > > include/linux/filter.h | 1 + > > include/net/tcp.h | 1 + > > net/core/filter.c | 8 ++++---- > > net/ipv4/tcp_input.c | 2 ++ > > net/ipv4/tcp_output.c | 2 ++ > > 5 files changed, 10 insertions(+), 4 deletions(-) > > > > diff --git a/include/linux/filter.h b/include/linux/filter.h > > index a3ea46281595..1b1333a90b4a 100644 > > --- a/include/linux/filter.h > > +++ b/include/linux/filter.h > > @@ -1508,6 +1508,7 @@ struct bpf_sock_ops_kern { > > void *skb_data_end; > > u8 op; > > u8 is_fullsock; > > + u8 allow_tcp_access; > > It is useful to add a comment here to explain the sockops callback has the > tcp_sock locked when it is set. Got it! Thanks, Jason
On Sat, Jan 25, 2025 at 8:28 AM Jason Xing <kerneljasonxing@gmail.com> wrote: > > On Sat, Jan 25, 2025 at 7:41 AM Martin KaFai Lau <martin.lau@linux.dev> wrote: > > > > On 1/20/25 5:28 PM, Jason Xing wrote: > > > Applying the new member allow_tcp_access in the existing callbacks > > > where is_fullsock is set to 1 can help us stop UDP socket accessing > > > struct tcp_sock, or else it could be catastrophe leading to panic. > > > > > > For now, those existing callbacks are used only for TCP. I believe > > > in the short run, we will have timestamping UDP callbacks support. > > > > The commit message needs adjustment. UDP is not supported yet, so this change > > feels like it's unnecessary based on the commit message. However, even without > > UDP support, the new timestamping callbacks cannot directly write some fields > > because the sk lock is not held, so this change is needed for TCP timestamping > > Thanks and I will revise them. But I still want to say that the > timestamping callbacks after this series are all under the protection > of socket lock. For the record only, I was wrong about the understanding of socket lock like above because there remains cases where this kind of path, say, i40e_intr()->i40e_ptp_tx_hwtstamp()->skb_tstamp_tx()->__skb_tstamp_tx(), will not be protected under the socket lock. With that said, directly accessing tcp_sock is not safe even if the socket type is TCP. Thanks, Jason
diff --git a/include/linux/filter.h b/include/linux/filter.h index a3ea46281595..1b1333a90b4a 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1508,6 +1508,7 @@ struct bpf_sock_ops_kern { void *skb_data_end; u8 op; u8 is_fullsock; + u8 allow_tcp_access; u8 remaining_opt_len; u64 temp; /* temp and everything after is not * initialized to 0 before calling diff --git a/include/net/tcp.h b/include/net/tcp.h index 5b2b04835688..293047694710 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2649,6 +2649,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); if (sk_fullsock(sk)) { sock_ops.is_fullsock = 1; + sock_ops.allow_tcp_access = 1; sock_owned_by_me(sk); } diff --git a/net/core/filter.c b/net/core/filter.c index 8e2715b7ac8a..fdd305b4cfbb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -10381,10 +10381,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, } \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ - is_fullsock), \ + allow_tcp_access), \ fullsock_reg, si->src_reg, \ offsetof(struct bpf_sock_ops_kern, \ - is_fullsock)); \ + allow_tcp_access)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ if (si->dst_reg == si->src_reg) \ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ @@ -10469,10 +10469,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, temp)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ - is_fullsock), \ + allow_tcp_access), \ reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, \ - is_fullsock)); \ + allow_tcp_access)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, sk),\ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb82e01da911..77185479ed5e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -169,6 +169,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb) memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB; sock_ops.is_fullsock = 1; + sock_ops.allow_tcp_access = 1; sock_ops.sk = sk; bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb)); @@ -185,6 +186,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op, memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); sock_ops.op = bpf_op; sock_ops.is_fullsock = 1; + sock_ops.allow_tcp_access = 1; sock_ops.sk = sk; /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */ if (skb) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0e5b9a654254..695749807c09 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -522,6 +522,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, sock_owned_by_me(sk); sock_ops.is_fullsock = 1; + sock_ops.allow_tcp_access = 1; sock_ops.sk = sk; } @@ -567,6 +568,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, sock_owned_by_me(sk); sock_ops.is_fullsock = 1; + sock_ops.allow_tcp_access = 1; sock_ops.sk = sk; }
Applying the new member allow_tcp_access in the existing callbacks where is_fullsock is set to 1 can help us stop UDP socket accessing struct tcp_sock, or else it could be catastrophe leading to panic. For now, those existing callbacks are used only for TCP. I believe in the short run, we will have timestamping UDP callbacks support. Signed-off-by: Jason Xing <kerneljasonxing@gmail.com> --- include/linux/filter.h | 1 + include/net/tcp.h | 1 + net/core/filter.c | 8 ++++---- net/ipv4/tcp_input.c | 2 ++ net/ipv4/tcp_output.c | 2 ++ 5 files changed, 10 insertions(+), 4 deletions(-)