@@ -31,6 +31,8 @@
FN(TCP_AOFAILURE) \
FN(SOCKET_BACKLOG) \
FN(TCP_FLAGS) \
+ FN(TCP_CONNREQNOTACCEPTABLE) \
+ FN(TCP_ABORTONDATA) \
FN(TCP_ZEROWINDOW) \
FN(TCP_OLD_DATA) \
FN(TCP_OVERWINDOW) \
@@ -203,6 +205,10 @@ enum skb_drop_reason {
SKB_DROP_REASON_SOCKET_BACKLOG,
/** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
SKB_DROP_REASON_TCP_FLAGS,
+ /** @SKB_DROP_REASON_TCP_CONNREQNOTACCEPTABLE: con req not acceptable */
+ SKB_DROP_REASON_TCP_CONNREQNOTACCEPTABLE,
+ /** @SKB_DROP_REASON_TCP_ABORTONDATA: abort on data */
+ SKB_DROP_REASON_TCP_ABORTONDATA,
/**
* @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
* see LINUX_MIB_TCPZEROWINDOWDROP
@@ -348,7 +348,8 @@ void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *drop_reason);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -397,7 +398,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
bool *lost_race);
int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
+ struct sk_buff *skb, enum skb_drop_reason *reason);
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
@@ -6616,7 +6616,8 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
* address independent.
*/
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *drop_reason)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -6632,8 +6633,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
goto discard;
case TCP_LISTEN:
- if (th->ack)
+ if (th->ack) {
+ SKB_DR_SET(*drop_reason, TCP_FLAGS);
return 1;
+ }
if (th->rst) {
SKB_DR_SET(reason, TCP_RESET);
@@ -6653,8 +6656,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
local_bh_enable();
rcu_read_unlock();
- if (!acceptable)
+ if (!acceptable) {
+ SKB_DR_SET(*drop_reason, TCP_CONNREQNOTACCEPTABLE);
return 1;
+ }
consume_skb(skb);
return 0;
}
@@ -6704,8 +6709,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
FLAG_NO_CHALLENGE_ACK);
if ((int)reason <= 0) {
- if (sk->sk_state == TCP_SYN_RECV)
+ if (sk->sk_state == TCP_SYN_RECV) {
+ if ((int)reason < 0)
+ *drop_reason = -reason;
return 1; /* send one RST */
+ }
/* accept old ack during closing */
if ((int)reason < 0) {
tcp_send_challenge_ack(sk);
@@ -6781,6 +6789,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (READ_ONCE(tp->linger2) < 0) {
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+ SKB_DR_SET(*drop_reason, TCP_ABORTONDATA);
return 1;
}
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
@@ -6790,6 +6799,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fastopen_active_disable(sk);
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+ SKB_DR_SET(*drop_reason, TCP_ABORTONDATA);
return 1;
}
@@ -6855,6 +6865,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk, skb);
+ SKB_DR_SET(*drop_reason, TCP_ABORTONDATA);
return 1;
}
}
@@ -1918,7 +1918,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (!nsk)
goto discard;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb)) {
+ if (tcp_child_process(sk, nsk, skb, &reason)) {
rsk = nsk;
goto reset;
}
@@ -1927,7 +1927,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb)) {
+ if (tcp_rcv_state_process(sk, skb, &reason)) {
rsk = sk;
goto reset;
}
@@ -2276,7 +2276,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
+ } else if (tcp_child_process(sk, nsk, skb, &drop_reason)) {
tcp_v4_send_reset(nsk, skb);
goto discard_and_relse;
} else {
@@ -912,7 +912,7 @@ EXPORT_SYMBOL(tcp_check_req);
*/
int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb)
+ struct sk_buff *skb, enum skb_drop_reason *reason)
__releases(&((child)->sk_lock.slock))
{
int ret = 0;
@@ -923,7 +923,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
- ret = tcp_rcv_state_process(child, skb);
+ ret = tcp_rcv_state_process(child, skb, reason);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent);
@@ -1657,7 +1657,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
+ if (tcp_child_process(sk, nsk, skb, &reason))
goto reset;
if (opt_skb)
__kfree_skb(opt_skb);
@@ -1666,7 +1666,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb))
+ if (tcp_rcv_state_process(sk, skb, &reason))
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
@@ -1856,7 +1856,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
+ } else if (tcp_child_process(sk, nsk, skb, &drop_reason)) {
tcp_v6_send_reset(nsk, skb);
goto discard_and_relse;
} else {