@@ -750,7 +750,7 @@ static void test_burst(void)
if (!ASSERT_OK_PTR(skel, "open_and_load: burst"))
return;
- test_bpf_sched(skel->obj, "burst", WITH_DATA, WITH_DATA);
+ test_bpf_sched(skel->maps.burst, "burst", WITH_DATA, WITH_DATA);
mptcp_bpf_burst__destroy(skel);
}
@@ -9,10 +9,14 @@ char _license[] SEC("license") = "GPL";
#define MPTCP_SEND_BURST_SIZE 65428
+#define SSK_MODE_ACTIVE 0
+#define SSK_MODE_BACKUP 1
+#define SSK_MODE_MAX 2
+
#define min(a, b) ((a) < (b) ? (a) : (b))
struct bpf_subflow_send_info {
- __u8 subflow_id;
+ struct mptcp_subflow_context *subflow;
__u64 linger_time;
};
@@ -23,10 +27,6 @@ extern bool tcp_stream_memory_free(const struct sock *sk, int wake) __ksym;
extern bool bpf_mptcp_subflow_queues_empty(struct sock *sk) __ksym;
extern void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) __ksym;
-#define SSK_MODE_ACTIVE 0
-#define SSK_MODE_BACKUP 1
-#define SSK_MODE_MAX 2
-
static __always_inline __u64 div_u64(__u64 dividend, __u32 divisor)
{
return dividend / divisor;
@@ -67,8 +67,7 @@ void BPF_PROG(mptcp_sched_burst_release, struct mptcp_sock *msk)
{
}
-static int bpf_burst_get_send(struct mptcp_sock *msk,
- struct mptcp_sched_data *data)
+static int bpf_burst_get_send(struct mptcp_sock *msk)
{
struct bpf_subflow_send_info send_info[SSK_MODE_MAX];
struct mptcp_subflow_context *subflow;
@@ -80,21 +79,16 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
/* pick the subflow with the lower wmem/wspace ratio */
for (i = 0; i < SSK_MODE_MAX; ++i) {
- send_info[i].subflow_id = MPTCP_SUBFLOWS_MAX;
+ send_info[i].subflow = NULL;
send_info[i].linger_time = -1;
}
- for (i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
- bool backup;
-
- subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
- if (!subflow)
- break;
-
- backup = subflow->backup || subflow->request_bkup;
+ bpf_for_each(mptcp_subflow, subflow, msk) {
+ bool backup = subflow->backup || subflow->request_bkup;
ssk = mptcp_subflow_tcp_sock(subflow);
- if (!mptcp_subflow_active(subflow))
+ if (!mptcp_subflow_active(subflow) ||
+ !sk_stream_memory_free(ssk))
continue;
nr_active += !backup;
@@ -109,7 +103,7 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
linger_time = div_u64((__u64)ssk->sk_wmem_queued << 32, pace);
if (linger_time < send_info[backup].linger_time) {
- send_info[backup].subflow_id = i;
+ send_info[backup].subflow = subflow;
send_info[backup].linger_time = linger_time;
}
}
@@ -117,13 +111,14 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
/* pick the best backup if no other subflow is active */
if (!nr_active)
- send_info[SSK_MODE_ACTIVE].subflow_id = send_info[SSK_MODE_BACKUP].subflow_id;
+ send_info[SSK_MODE_ACTIVE].subflow = send_info[SSK_MODE_BACKUP].subflow;
- subflow = bpf_mptcp_subflow_ctx_by_pos(data, send_info[SSK_MODE_ACTIVE].subflow_id);
+ subflow = bpf_core_cast(send_info[SSK_MODE_ACTIVE].subflow,
+ struct mptcp_subflow_context);
if (!subflow)
return -1;
ssk = mptcp_subflow_tcp_sock(subflow);
- if (!ssk || !sk_stream_memory_free(ssk))
+ if (!ssk)
return -1;
burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
@@ -141,23 +136,18 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
return 0;
}
-static int bpf_burst_get_retrans(struct mptcp_sock *msk,
- struct mptcp_sched_data *data)
+static int bpf_burst_get_retrans(struct mptcp_sock *msk)
{
- int backup = MPTCP_SUBFLOWS_MAX, pick = MPTCP_SUBFLOWS_MAX, subflow_id;
+ struct sock *backup = NULL, *pick = NULL;
struct mptcp_subflow_context *subflow;
int min_stale_count = INT_MAX;
- struct sock *ssk;
- for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
- subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
- if (!subflow)
- break;
+ bpf_for_each(mptcp_subflow, subflow, msk) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (!mptcp_subflow_active(subflow))
continue;
- ssk = mptcp_subflow_tcp_sock(subflow);
/* still data outstanding at TCP level? skip this */
if (!tcp_rtx_and_write_queues_empty(ssk)) {
mptcp_pm_subflow_chk_stale(msk, ssk);
@@ -166,23 +156,23 @@ static int bpf_burst_get_retrans(struct mptcp_sock *msk,
}
if (subflow->backup || subflow->request_bkup) {
- if (backup == MPTCP_SUBFLOWS_MAX)
- backup = i;
+ if (!backup)
+ backup = ssk;
continue;
}
- if (pick == MPTCP_SUBFLOWS_MAX)
- pick = i;
+ if (!pick)
+ pick = ssk;
}
- if (pick < MPTCP_SUBFLOWS_MAX) {
- subflow_id = pick;
+ if (pick)
goto out;
- }
- subflow_id = min_stale_count > 1 ? backup : MPTCP_SUBFLOWS_MAX;
+ pick = min_stale_count > 1 ? backup : NULL;
out:
- subflow = bpf_mptcp_subflow_ctx_by_pos(data, subflow_id);
+ if (!pick)
+ return -1;
+ subflow = bpf_mptcp_subflow_ctx(pick);
if (!subflow)
return -1;
mptcp_subflow_set_scheduled(subflow, true);
@@ -194,11 +184,11 @@ int BPF_PROG(bpf_burst_get_subflow, struct mptcp_sock *msk,
struct mptcp_sched_data *data)
{
if (data->reinject)
- return bpf_burst_get_retrans(msk, data);
- return bpf_burst_get_send(msk, data);
+ return bpf_burst_get_retrans(msk);
+ return bpf_burst_get_send(msk);
}
-SEC(".struct_ops")
+SEC(".struct_ops.link")
struct mptcp_sched_ops burst = {
.init = (void *)mptcp_sched_burst_init,
.release = (void *)mptcp_sched_burst_release,