@@ -708,7 +708,7 @@ static void test_rr(void)
if (!ASSERT_OK_PTR(skel, "open_and_load: rr"))
return;
- test_bpf_sched(skel->obj, "rr", WITH_DATA, WITH_DATA);
+ test_bpf_sched(skel->maps.rr, "rr", WITH_DATA, WITH_DATA);
mptcp_bpf_rr__destroy(skel);
}
@@ -34,42 +34,37 @@ SEC("struct_ops")
int BPF_PROG(bpf_rr_get_subflow, struct mptcp_sock *msk,
struct mptcp_sched_data *data)
{
- struct mptcp_subflow_context *subflow;
+ struct mptcp_subflow_context *subflow, *next;
struct mptcp_rr_storage *ptr;
- struct sock *last_snd = NULL;
- int nr = 0;
ptr = bpf_sk_storage_get(&mptcp_rr_map, msk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return -1;
- last_snd = ptr->last_snd;
+ next = bpf_mptcp_subflow_ctx(msk->first);
+ if (!ptr->last_snd)
+ goto out;
- for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
- subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
- if (!last_snd || !subflow)
- break;
-
- if (mptcp_subflow_tcp_sock(subflow) == last_snd) {
- if (i + 1 == MPTCP_SUBFLOWS_MAX ||
- !bpf_mptcp_subflow_ctx_by_pos(data, i + 1))
+ bpf_for_each(mptcp_subflow, subflow, msk) {
+ if (mptcp_subflow_tcp_sock(subflow) == ptr->last_snd) {
+ subflow = bpf_iter_mptcp_subflow_next(&___it);
+ if (!subflow)
break;
- nr = i + 1;
+ next = subflow;
break;
}
}
- subflow = bpf_mptcp_subflow_ctx_by_pos(data, nr);
- if (!subflow)
- return -1;
- mptcp_subflow_set_scheduled(subflow, true);
- ptr->last_snd = mptcp_subflow_tcp_sock(subflow);
+out:
+ next = bpf_core_cast(next, struct mptcp_subflow_context);
+ mptcp_subflow_set_scheduled(next, true);
+ ptr->last_snd = mptcp_subflow_tcp_sock(next);
return 0;
}
-SEC(".struct_ops")
+SEC(".struct_ops.link")
struct mptcp_sched_ops rr = {
.init = (void *)mptcp_sched_rr_init,
.release = (void *)mptcp_sched_rr_release,