Message ID | 20230918093304.367826-7-tushar.vyavahare@intel.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | Add a test for SHARED_UMEM feature | expand |
On Mon, 18 Sept 2023 at 11:15, Tushar Vyavahare <tushar.vyavahare@intel.com> wrote: > > Update send_pkts() to handle multiple sockets for sending packets. > Multiple TX sockets are utilized alternately based on the batch size for > improve packet transmission. I do not know if it is "improved" ;-), but it is good to test sending from multiple sockets. Please make that clearer. > Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com> > --- > tools/testing/selftests/bpf/xskxceiver.c | 64 +++++++++++++++++------- > 1 file changed, 45 insertions(+), 19 deletions(-) > > diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c > index e67032f04a74..0ef0575c095c 100644 > --- a/tools/testing/selftests/bpf/xskxceiver.c > +++ b/tools/testing/selftests/bpf/xskxceiver.c > @@ -1204,13 +1204,13 @@ static int receive_pkts(struct test_spec *test) > return TEST_PASS; > } > > -static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout) > +static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout) > { > u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len; > - struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream; > - struct xsk_socket_info *xsk = ifobject->xsk; > + struct pkt_stream *pkt_stream = xsk->pkt_stream; > struct xsk_umem_info *umem = ifobject->umem; > bool use_poll = ifobject->use_poll; > + struct pollfd fds = { }; > int ret; > > buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len); > @@ -1222,9 +1222,12 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo > return TEST_CONTINUE; > } > > + fds.fd = xsk_socket__fd(xsk->xsk); > + fds.events = POLLOUT; > + > while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { > if (use_poll) { > - ret = poll(fds, 1, POLL_TMOUT); > + ret = poll(&fds, 1, POLL_TMOUT); > if (timeout) { > if (ret < 0) { > ksft_print_msg("ERROR: [%s] Poll error %d\n", > @@ -1303,7 +1306,7 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo > xsk->outstanding_tx += valid_frags; > > if (use_poll) { > - ret = poll(fds, 1, POLL_TMOUT); > + ret = poll(&fds, 1, POLL_TMOUT); > if (ret <= 0) { > if (ret == 0 && timeout) > return TEST_PASS; > @@ -1349,27 +1352,50 @@ static int wait_for_tx_completion(struct xsk_socket_info *xsk) > return TEST_PASS; > } > > +bool all_packets_sent(struct test_spec *test, unsigned long *bitmap) > +{ > + if (test_bit(test->nb_sockets, bitmap)) > + return true; This does not seem to be correct. You are testing one bit here, but are you not supposed to test that all bits have been set? > + > + return false; > +} > + > static int send_pkts(struct test_spec *test, struct ifobject *ifobject) > { > - struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream; > bool timeout = !is_umem_valid(test->ifobj_rx); > - struct pollfd fds = { }; > - u32 ret; > + u32 i, ret; > > - fds.fd = xsk_socket__fd(ifobject->xsk->xsk); > - fds.events = POLLOUT; > + DECLARE_BITMAP(bitmap, MAX_SOCKETS); Should be with the declarations in RCT order. > > - while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { > - ret = __send_pkts(ifobject, &fds, timeout); > - if (ret == TEST_CONTINUE && !test->fail) > - continue; > - if ((ret || test->fail) && !timeout) > - return TEST_FAILURE; > - if (ret == TEST_PASS && timeout) > - return ret; > + while (!(all_packets_sent(test, bitmap))) { > + for (i = 0; i < test->nb_sockets; i++) { > + struct pkt_stream *pkt_stream; > + > + pkt_stream = ifobject->xsk_arr[i].pkt_stream; > + if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) { Can pkt_stream be NULL? > + __test_and_set_bit((1 << i), bitmap); test_and_set? You are not testing anything here so it is enough to just set it. > + continue; > + } > + ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout); > + if (ret == TEST_CONTINUE && !test->fail) > + continue; > + > + if ((ret || test->fail) && !timeout) > + return TEST_FAILURE; > + > + if (ret == TEST_PASS && timeout) > + return ret; > + > + ret = wait_for_tx_completion(&ifobject->xsk_arr[i]); > + if ((ret || test->fail) && !timeout) > + return TEST_FAILURE; > + > + if (ret == TEST_PASS && timeout) > + return ret; Why testing the same things before and after wait_for_tx_completion? Should it not be fine to just do it in one place? > + } > } > > - return wait_for_tx_completion(ifobject->xsk); > + return TEST_PASS; > } > > static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats) > -- > 2.34.1 > >
> -----Original Message----- > From: Magnus Karlsson <magnus.karlsson@gmail.com> > Sent: Thursday, September 21, 2023 12:31 PM > To: Vyavahare, Tushar <tushar.vyavahare@intel.com> > Cc: bpf@vger.kernel.org; netdev@vger.kernel.org; bjorn@kernel.org; > Karlsson, Magnus <magnus.karlsson@intel.com>; Fijalkowski, Maciej > <maciej.fijalkowski@intel.com>; jonathan.lemon@gmail.com; > davem@davemloft.net; kuba@kernel.org; pabeni@redhat.com; > ast@kernel.org; daniel@iogearbox.net; Sarkar, Tirthendu > <tirthendu.sarkar@intel.com> > Subject: Re: [PATCH bpf-next 6/8] selftests/xsk: iterate over all the sockets in > the send pkts function > > On Mon, 18 Sept 2023 at 11:15, Tushar Vyavahare > <tushar.vyavahare@intel.com> wrote: > > > > Update send_pkts() to handle multiple sockets for sending packets. > > Multiple TX sockets are utilized alternately based on the batch size > > for improve packet transmission. > > I do not know if it is "improved" ;-), but it is good to test sending from > multiple sockets. Please make that clearer. > > > Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com> > > --- > > tools/testing/selftests/bpf/xskxceiver.c | 64 > > +++++++++++++++++------- > > 1 file changed, 45 insertions(+), 19 deletions(-) > > > > diff --git a/tools/testing/selftests/bpf/xskxceiver.c > > b/tools/testing/selftests/bpf/xskxceiver.c > > index e67032f04a74..0ef0575c095c 100644 > > --- a/tools/testing/selftests/bpf/xskxceiver.c > > +++ b/tools/testing/selftests/bpf/xskxceiver.c > > @@ -1204,13 +1204,13 @@ static int receive_pkts(struct test_spec *test) > > return TEST_PASS; > > } > > > > -static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, > > bool timeout) > > +static int __send_pkts(struct ifobject *ifobject, struct > > +xsk_socket_info *xsk, bool timeout) > > { > > u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len; > > - struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream; > > - struct xsk_socket_info *xsk = ifobject->xsk; > > + struct pkt_stream *pkt_stream = xsk->pkt_stream; > > struct xsk_umem_info *umem = ifobject->umem; > > bool use_poll = ifobject->use_poll; > > + struct pollfd fds = { }; > > int ret; > > > > buffer_len = pkt_get_buffer_len(umem, > > pkt_stream->max_pkt_len); @@ -1222,9 +1222,12 @@ static int > __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo > > return TEST_CONTINUE; > > } > > > > + fds.fd = xsk_socket__fd(xsk->xsk); > > + fds.events = POLLOUT; > > + > > while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < > BATCH_SIZE) { > > if (use_poll) { > > - ret = poll(fds, 1, POLL_TMOUT); > > + ret = poll(&fds, 1, POLL_TMOUT); > > if (timeout) { > > if (ret < 0) { > > ksft_print_msg("ERROR: [%s] > > Poll error %d\n", @@ -1303,7 +1306,7 @@ static int __send_pkts(struct > ifobject *ifobject, struct pollfd *fds, bool timeo > > xsk->outstanding_tx += valid_frags; > > > > if (use_poll) { > > - ret = poll(fds, 1, POLL_TMOUT); > > + ret = poll(&fds, 1, POLL_TMOUT); > > if (ret <= 0) { > > if (ret == 0 && timeout) > > return TEST_PASS; @@ -1349,27 +1352,50 > > @@ static int wait_for_tx_completion(struct xsk_socket_info *xsk) > > return TEST_PASS; > > } > > > > +bool all_packets_sent(struct test_spec *test, unsigned long *bitmap) > > +{ > > + if (test_bit(test->nb_sockets, bitmap)) > > + return true; > > This does not seem to be correct. You are testing one bit here, but are you > not supposed to test that all bits have been set? > Yes, I will fix that. > > + > > + return false; > > +} > > + > > static int send_pkts(struct test_spec *test, struct ifobject > > *ifobject) { > > - struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream; > > bool timeout = !is_umem_valid(test->ifobj_rx); > > - struct pollfd fds = { }; > > - u32 ret; > > + u32 i, ret; > > > > - fds.fd = xsk_socket__fd(ifobject->xsk->xsk); > > - fds.events = POLLOUT; > > + DECLARE_BITMAP(bitmap, MAX_SOCKETS); > > Should be with the declarations in RCT order. > Yes, I will do. > > > > - while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { > > - ret = __send_pkts(ifobject, &fds, timeout); > > - if (ret == TEST_CONTINUE && !test->fail) > > - continue; > > - if ((ret || test->fail) && !timeout) > > - return TEST_FAILURE; > > - if (ret == TEST_PASS && timeout) > > - return ret; > > + while (!(all_packets_sent(test, bitmap))) { > > + for (i = 0; i < test->nb_sockets; i++) { > > + struct pkt_stream *pkt_stream; > > + > > + pkt_stream = ifobject->xsk_arr[i].pkt_stream; > > + if (!pkt_stream || pkt_stream->current_pkt_nb > > + >= pkt_stream->nb_pkts) { > > Can pkt_stream be NULL? > Yes, in the swap_xsk_resources() function, we are setting 'pkt_stream' to NULL. [patch 4 change] > > + __test_and_set_bit((1 << i), bitmap); > > test_and_set? You are not testing anything here so it is enough to just set it. > > > + continue; > > + } > > + ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout); > > + if (ret == TEST_CONTINUE && !test->fail) > > + continue; > > + > > + if ((ret || test->fail) && !timeout) > > + return TEST_FAILURE; > > + > > + if (ret == TEST_PASS && timeout) > > + return ret; > > + > > + ret = wait_for_tx_completion(&ifobject->xsk_arr[i]); > > + if ((ret || test->fail) && !timeout) > > + return TEST_FAILURE; > > + > > + if (ret == TEST_PASS && timeout) > > + return ret; > > Why testing the same things before and after wait_for_tx_completion? > Should it not be fine to just do it in one place? > I will change it. > > + } > > } > > > > - return wait_for_tx_completion(ifobject->xsk); > > + return TEST_PASS; > > } > > > > static int get_xsk_stats(struct xsk_socket *xsk, struct > > xdp_statistics *stats) > > -- > > 2.34.1 > > > >
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index e67032f04a74..0ef0575c095c 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1204,13 +1204,13 @@ static int receive_pkts(struct test_spec *test) return TEST_PASS; } -static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout) +static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout) { u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len; - struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream; - struct xsk_socket_info *xsk = ifobject->xsk; + struct pkt_stream *pkt_stream = xsk->pkt_stream; struct xsk_umem_info *umem = ifobject->umem; bool use_poll = ifobject->use_poll; + struct pollfd fds = { }; int ret; buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len); @@ -1222,9 +1222,12 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo return TEST_CONTINUE; } + fds.fd = xsk_socket__fd(xsk->xsk); + fds.events = POLLOUT; + while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { if (use_poll) { - ret = poll(fds, 1, POLL_TMOUT); + ret = poll(&fds, 1, POLL_TMOUT); if (timeout) { if (ret < 0) { ksft_print_msg("ERROR: [%s] Poll error %d\n", @@ -1303,7 +1306,7 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo xsk->outstanding_tx += valid_frags; if (use_poll) { - ret = poll(fds, 1, POLL_TMOUT); + ret = poll(&fds, 1, POLL_TMOUT); if (ret <= 0) { if (ret == 0 && timeout) return TEST_PASS; @@ -1349,27 +1352,50 @@ static int wait_for_tx_completion(struct xsk_socket_info *xsk) return TEST_PASS; } +bool all_packets_sent(struct test_spec *test, unsigned long *bitmap) +{ + if (test_bit(test->nb_sockets, bitmap)) + return true; + + return false; +} + static int send_pkts(struct test_spec *test, struct ifobject *ifobject) { - struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream; bool timeout = !is_umem_valid(test->ifobj_rx); - struct pollfd fds = { }; - u32 ret; + u32 i, ret; - fds.fd = xsk_socket__fd(ifobject->xsk->xsk); - fds.events = POLLOUT; + DECLARE_BITMAP(bitmap, MAX_SOCKETS); - while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { - ret = __send_pkts(ifobject, &fds, timeout); - if (ret == TEST_CONTINUE && !test->fail) - continue; - if ((ret || test->fail) && !timeout) - return TEST_FAILURE; - if (ret == TEST_PASS && timeout) - return ret; + while (!(all_packets_sent(test, bitmap))) { + for (i = 0; i < test->nb_sockets; i++) { + struct pkt_stream *pkt_stream; + + pkt_stream = ifobject->xsk_arr[i].pkt_stream; + if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) { + __test_and_set_bit((1 << i), bitmap); + continue; + } + ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout); + if (ret == TEST_CONTINUE && !test->fail) + continue; + + if ((ret || test->fail) && !timeout) + return TEST_FAILURE; + + if (ret == TEST_PASS && timeout) + return ret; + + ret = wait_for_tx_completion(&ifobject->xsk_arr[i]); + if ((ret || test->fail) && !timeout) + return TEST_FAILURE; + + if (ret == TEST_PASS && timeout) + return ret; + } } - return wait_for_tx_completion(ifobject->xsk); + return TEST_PASS; } static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
Update send_pkts() to handle multiple sockets for sending packets. Multiple TX sockets are utilized alternately based on the batch size for improve packet transmission. Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com> --- tools/testing/selftests/bpf/xskxceiver.c | 64 +++++++++++++++++------- 1 file changed, 45 insertions(+), 19 deletions(-)