@@ -126,7 +126,6 @@ static void pthread_init_mutex(void)
pthread_mutex_init(&sync_mutex, NULL);
pthread_mutex_init(&sync_mutex_tx, NULL);
pthread_cond_init(&signal_rx_condition, NULL);
- pthread_cond_init(&signal_tx_condition, NULL);
}
static void pthread_destroy_mutex(void)
@@ -134,7 +133,6 @@ static void pthread_destroy_mutex(void)
pthread_mutex_destroy(&sync_mutex);
pthread_mutex_destroy(&sync_mutex_tx);
pthread_cond_destroy(&signal_rx_condition);
- pthread_cond_destroy(&signal_tx_condition);
}
static void *memset32_htonl(void *dest, u32 val, u32 size)
@@ -755,8 +753,7 @@ static void worker_pkt_validate(void)
}
}
-static void thread_common_ops(struct ifobject *ifobject, void *bufs, pthread_mutex_t *mutexptr,
- atomic_int *spinningptr)
+static void thread_common_ops(struct ifobject *ifobject, void *bufs, pthread_mutex_t *mutexptr)
{
int ctr = 0;
int ret;
@@ -781,13 +778,15 @@ static void thread_common_ops(struct ifobject *ifobject, void *bufs, pthread_mut
*/
pthread_mutex_lock(mutexptr);
while (ret && ctr < SOCK_RECONF_CTR) {
- atomic_store(spinningptr, 1);
+ if (ifobject->fv.vector == rx)
+ atomic_store(&spinning_rx, 1);
xsk_configure_umem(ifobject, bufs, num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE);
ret = xsk_configure_socket(ifobject);
usleep(USLEEP_MAX);
ctr++;
}
- atomic_store(spinningptr, 0);
+ if (ifobject->fv.vector == rx)
+ atomic_store(&spinning_rx, 0);
pthread_mutex_unlock(mutexptr);
if (ctr >= SOCK_RECONF_CTR)
@@ -809,7 +808,7 @@ static void *worker_testapp_validate_tx(void *arg)
void *bufs = NULL;
if (!bidi_pass)
- thread_common_ops(ifobject, bufs, &sync_mutex_tx, &spinning_tx);
+ thread_common_ops(ifobject, bufs, &sync_mutex_tx);
while (atomic_load(&spinning_rx) && spinningrxctr < SOCK_RECONF_CTR) {
spinningrxctr++;
@@ -847,7 +846,7 @@ static void *worker_testapp_validate_rx(void *arg)
void *bufs = NULL;
if (!bidi_pass)
- thread_common_ops(ifobject, bufs, &sync_mutex_tx, &spinning_rx);
+ thread_common_ops(ifobject, bufs, &sync_mutex_tx);
if (stat_test_type != STAT_TEST_RX_FILL_EMPTY)
xsk_populate_fill_ring(ifobject->umem);
@@ -144,12 +144,10 @@ struct ifobject {
static struct ifobject *ifdict[MAX_INTERFACES];
/*threads*/
-atomic_int spinning_tx;
atomic_int spinning_rx;
pthread_mutex_t sync_mutex;
pthread_mutex_t sync_mutex_tx;
pthread_cond_t signal_rx_condition;
-pthread_cond_t signal_tx_condition;
pthread_t t0, t1;
pthread_attr_t attr;
Tx thread needs to be started after the Rx side is fully initialized so that packets are not xmitted until xsk Rx socket is ready to be used. It can be observed that atomic variable spinning_tx is not checked from Rx side in any way, so thread_common_ops can be modified to only address the spinning_rx. This means that spinning_tx can be removed altogheter. signal_tx_condition is never utilized, so simply remove it. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> --- tools/testing/selftests/bpf/xdpxceiver.c | 15 +++++++-------- tools/testing/selftests/bpf/xdpxceiver.h | 2 -- 2 files changed, 7 insertions(+), 10 deletions(-)