@@ -1139,12 +1139,18 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
void sk_stream_wait_close(struct sock *sk, long timeo_p);
int sk_stream_error(struct sock *sk, int flags, int err);
-void sk_stream_kill_queues(struct sock *sk);
+void sk_stream_kill_queues_reason(struct sock *sk,
+ enum skb_drop_reason reason);
void sk_set_memalloc(struct sock *sk);
void sk_clear_memalloc(struct sock *sk);
void __sk_flush_backlog(struct sock *sk);
+static inline void sk_stream_kill_queues(struct sock *sk)
+{
+ sk_stream_kill_queues_reason(sk, SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
static inline bool sk_flush_backlog(struct sock *sk)
{
if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
@@ -190,10 +190,11 @@ int sk_stream_error(struct sock *sk, int flags, int err)
}
EXPORT_SYMBOL(sk_stream_error);
-void sk_stream_kill_queues(struct sock *sk)
+void sk_stream_kill_queues_reason(struct sock *sk,
+ enum skb_drop_reason reason)
{
/* First the read buffer. */
- __skb_queue_purge(&sk->sk_receive_queue);
+ __skb_queue_purge_reason(&sk->sk_receive_queue, reason);
/* Next, the write queue. */
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
@@ -209,4 +210,4 @@ void sk_stream_kill_queues(struct sock *sk)
* have gone away, only the net layer knows can touch it.
*/
}
-EXPORT_SYMBOL(sk_stream_kill_queues);
+EXPORT_SYMBOL(sk_stream_kill_queues_reason);