diff mbox series

[mptcp-next,RFC,4/4] selftests/bpf: mptcp RR: send 1 MSS on each subflow

Message ID 20240527-sched_per_packet-v1-4-09a41d405f7c@gmail.com (mailing list archive)
State Needs ACK
Headers show
Series mptcp: update scheduler API | expand

Checks

Context Check Description
matttbe/build success Build and static analysis OK
matttbe/checkpatch warning total: 0 errors, 0 warnings, 1 checks, 35 lines checked
matttbe/shellcheck success MPTCP selftests files have not been modified
matttbe/KVM_Validation__normal success Success! ✅
matttbe/KVM_Validation__debug success Success! ✅
matttbe/KVM_Validation__btf__only_bpftest_all_ success Success! ✅

Commit Message

Gregory Detal May 27, 2024, 1:17 p.m. UTC
This uses the helper and APIs to schedule one MSS on alternance
on each subflow.

This now really acts at a round-robin scheduler: packets are equally
balanced on each path

Signed-off-by: Gregory Detal <gregory.detal@gmail.com>
---
 tools/testing/selftests/bpf/progs/mptcp_bpf.h    |  1 +
 tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c | 18 ++++++++++++++++++
 2 files changed, 19 insertions(+)
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf.h b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
index 782f36ed027e..a289746666dd 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf.h
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
@@ -6,6 +6,7 @@ 
 #include <bpf/bpf_core_read.h>
 
 #define MPTCP_SUBFLOWS_MAX 8
+#define MPTCP_SCHED_FLAG_RESCHEDULE (1 << 0)
 
 extern void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
 					bool scheduled) __ksym;
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
index 638ea6aa63b7..42c11fa483b1 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
@@ -69,10 +69,28 @@  int BPF_PROG(bpf_rr_get_subflow, struct mptcp_sock *msk,
 	return 0;
 }
 
+SEC("struct_ops")
+void BPF_PROG(bpf_rr_push, struct mptcp_sock *msk,
+	      struct mptcp_subflow_context *subflow,
+	      struct mptcp_sched_chunk *chunk)
+{
+	struct tcp_sock *tp = bpf_skc_to_tcp_sock(mptcp_subflow_tcp_sock(subflow));
+
+	if (!tp) {
+		/* Should not happen, in that case let default behavior. */
+		return;
+	}
+
+	/* Make sure to reschedule for each MSS. */
+	chunk->limit = tp->mss_cache;
+	chunk->flags |= MPTCP_SCHED_FLAG_RESCHEDULE;
+}
+
 SEC(".struct_ops")
 struct mptcp_sched_ops rr = {
 	.init		= (void *)mptcp_sched_rr_init,
 	.release	= (void *)mptcp_sched_rr_release,
 	.get_subflow	= (void *)bpf_rr_get_subflow,
+	.push		= (void *)bpf_rr_push,
 	.name		= "bpf_rr",
 };