diff mbox series

[bpf,v2,2/2] xsk: change the tx writeable condition

Message ID 4fd58d473f4548dc6e9e24ea9876c802d5d584b4.1606285978.git.xuanzhuo@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series xsk: fix for xsk_poll writeable | expand

Checks

Context Check Description
netdev/apply fail Patch does not apply to bpf
netdev/tree_selection success Clearly marked for bpf

Commit Message

Xuan Zhuo Nov. 25, 2020, 6:48 a.m. UTC
Modify the tx writeable condition from the queue is not full to the
number of present tx queues is less than the half of the total number
of queues. Because the tx queue not full is a very short time, this will
cause a large number of EPOLLOUT events, and cause a large number of
process wake up.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 net/xdp/xsk.c       | 16 +++++++++++++---
 net/xdp/xsk_queue.h |  6 ++++++
 2 files changed, 19 insertions(+), 3 deletions(-)

Comments

Magnus Karlsson Nov. 27, 2020, 8:22 a.m. UTC | #1
On Wed, Nov 25, 2020 at 7:49 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> Modify the tx writeable condition from the queue is not full to the
> number of present tx queues is less than the half of the total number
> of queues. Because the tx queue not full is a very short time, this will
> cause a large number of EPOLLOUT events, and cause a large number of
> process wake up.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

Thank you Xuan!

Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>

> ---
>  net/xdp/xsk.c       | 16 +++++++++++++---
>  net/xdp/xsk_queue.h |  6 ++++++
>  2 files changed, 19 insertions(+), 3 deletions(-)
>
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 0df8651..22e35e9 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
>         return 0;
>  }
>
> +static bool xsk_tx_writeable(struct xdp_sock *xs)
> +{
> +       if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
> +               return false;
> +
> +       return true;
> +}
> +
>  static bool xsk_is_bound(struct xdp_sock *xs)
>  {
>         if (READ_ONCE(xs->state) == XSK_BOUND) {
> @@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
>         rcu_read_lock();
>         list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
>                 __xskq_cons_release(xs->tx);
> -               xs->sk.sk_write_space(&xs->sk);
> +               if (xsk_tx_writeable(xs))
> +                       xs->sk.sk_write_space(&xs->sk);
>         }
>         rcu_read_unlock();
>  }
> @@ -499,7 +508,8 @@ static int xsk_generic_xmit(struct sock *sk)
>
>  out:
>         if (sent_frame)
> -               sk->sk_write_space(sk);
> +               if (xsk_tx_writeable(xs))
> +                       sk->sk_write_space(sk);
>
>         mutex_unlock(&xs->mutex);
>         return err;
> @@ -556,7 +566,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
>
>         if (xs->rx && !xskq_prod_is_empty(xs->rx))
>                 mask |= EPOLLIN | EPOLLRDNORM;
> -       if (xs->tx && !xskq_cons_is_full(xs->tx))
> +       if (xs->tx && xsk_tx_writeable(xs))
>                 mask |= EPOLLOUT | EPOLLWRNORM;
>
>         return mask;
> diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
> index b936c46..b655004 100644
> --- a/net/xdp/xsk_queue.h
> +++ b/net/xdp/xsk_queue.h
> @@ -307,6 +307,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
>                 q->nentries;
>  }
>
> +static inline __u64 xskq_cons_present_entries(struct xsk_queue *q)
> +{
> +       /* No barriers needed since data is not accessed */
> +       return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
> +}
> +
>  /* Functions for producers */
>
>  static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
> --
> 1.8.3.1
>
Daniel Borkmann Nov. 27, 2020, 9:29 p.m. UTC | #2
On 11/25/20 7:48 AM, Xuan Zhuo wrote:
> Modify the tx writeable condition from the queue is not full to the
> number of present tx queues is less than the half of the total number
> of queues. Because the tx queue not full is a very short time, this will
> cause a large number of EPOLLOUT events, and cause a large number of
> process wake up.
> 
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

This one doesn't apply cleanly against bpf tree, please rebase. Small comment
inline while looking over the patch:

> ---
>   net/xdp/xsk.c       | 16 +++++++++++++---
>   net/xdp/xsk_queue.h |  6 ++++++
>   2 files changed, 19 insertions(+), 3 deletions(-)
> 
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 0df8651..22e35e9 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
>   	return 0;
>   }
>   
> +static bool xsk_tx_writeable(struct xdp_sock *xs)
> +{
> +	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
> +		return false;
> +
> +	return true;
> +}
> +
>   static bool xsk_is_bound(struct xdp_sock *xs)
>   {
>   	if (READ_ONCE(xs->state) == XSK_BOUND) {
> @@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
>   	rcu_read_lock();
>   	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
>   		__xskq_cons_release(xs->tx);
> -		xs->sk.sk_write_space(&xs->sk);
> +		if (xsk_tx_writeable(xs))
> +			xs->sk.sk_write_space(&xs->sk);
>   	}
>   	rcu_read_unlock();
>   }
> @@ -499,7 +508,8 @@ static int xsk_generic_xmit(struct sock *sk)
>   
>   out:
>   	if (sent_frame)
> -		sk->sk_write_space(sk);
> +		if (xsk_tx_writeable(xs))
> +			sk->sk_write_space(sk);
>   
>   	mutex_unlock(&xs->mutex);
>   	return err;
> @@ -556,7 +566,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
>   
>   	if (xs->rx && !xskq_prod_is_empty(xs->rx))
>   		mask |= EPOLLIN | EPOLLRDNORM;
> -	if (xs->tx && !xskq_cons_is_full(xs->tx))
> +	if (xs->tx && xsk_tx_writeable(xs))
>   		mask |= EPOLLOUT | EPOLLWRNORM;
>   
>   	return mask;
> diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
> index b936c46..b655004 100644
> --- a/net/xdp/xsk_queue.h
> +++ b/net/xdp/xsk_queue.h
> @@ -307,6 +307,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
>   		q->nentries;
>   }
>   
> +static inline __u64 xskq_cons_present_entries(struct xsk_queue *q)

Types prefixed with __ are mainly for user-space facing things like uapi headers,
so in-kernel should be u64. Is there a reason this is not done as u32 (and thus
same as producer and producer)?

> +{
> +	/* No barriers needed since data is not accessed */
> +	return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
> +}
> +
>   /* Functions for producers */
>   
>   static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
>
diff mbox series

Patch

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 0df8651..22e35e9 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -211,6 +211,14 @@  static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
 	return 0;
 }
 
+static bool xsk_tx_writeable(struct xdp_sock *xs)
+{
+	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
+		return false;
+
+	return true;
+}
+
 static bool xsk_is_bound(struct xdp_sock *xs)
 {
 	if (READ_ONCE(xs->state) == XSK_BOUND) {
@@ -296,7 +304,8 @@  void xsk_tx_release(struct xsk_buff_pool *pool)
 	rcu_read_lock();
 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
 		__xskq_cons_release(xs->tx);
-		xs->sk.sk_write_space(&xs->sk);
+		if (xsk_tx_writeable(xs))
+			xs->sk.sk_write_space(&xs->sk);
 	}
 	rcu_read_unlock();
 }
@@ -499,7 +508,8 @@  static int xsk_generic_xmit(struct sock *sk)
 
 out:
 	if (sent_frame)
-		sk->sk_write_space(sk);
+		if (xsk_tx_writeable(xs))
+			sk->sk_write_space(sk);
 
 	mutex_unlock(&xs->mutex);
 	return err;
@@ -556,7 +566,7 @@  static __poll_t xsk_poll(struct file *file, struct socket *sock,
 
 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
 		mask |= EPOLLIN | EPOLLRDNORM;
-	if (xs->tx && !xskq_cons_is_full(xs->tx))
+	if (xs->tx && xsk_tx_writeable(xs))
 		mask |= EPOLLOUT | EPOLLWRNORM;
 
 	return mask;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index b936c46..b655004 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -307,6 +307,12 @@  static inline bool xskq_cons_is_full(struct xsk_queue *q)
 		q->nentries;
 }
 
+static inline __u64 xskq_cons_present_entries(struct xsk_queue *q)
+{
+	/* No barriers needed since data is not accessed */
+	return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
+}
+
 /* Functions for producers */
 
 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)