@@ -1232,6 +1232,16 @@ enum {
#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0)
#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
+enum {
+ TCA_TAPRIO_TC_ENTRY_UNSPEC,
+ TCA_TAPRIO_TC_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_TC_ENTRY_MAX_SDU, /* u32 */
+
+ /* add new constants above here */
+ __TCA_TAPRIO_TC_ENTRY_CNT,
+ TCA_TAPRIO_TC_ENTRY_MAX = (__TCA_TAPRIO_TC_ENTRY_CNT - 1)
+};
+
enum {
TCA_TAPRIO_ATTR_UNSPEC,
TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
@@ -1245,6 +1255,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
+ TCA_TAPRIO_ATTR_TC_ENTRY, /* nest */
__TCA_TAPRIO_ATTR_MAX,
};
@@ -150,6 +150,15 @@ value should always be greater than the delta specified in the
.BR etf(8)
qdisc.
+.TP
+max-sdu
+.br
+Specifies an array containing at most 16 elements, one per traffic class, which
+corresponds to the queueMaxSDU table from IEEE 802.1Q-2018. Each array element
+represents the maximum L2 payload size that can egress that traffic class.
+Elements that are not filled in default to 0. The value 0 means that the
+traffic class can send packets up to the port's maximum MTU in size.
+
.SH EXAMPLES
The following example shows how an traffic schedule with three traffic
@@ -151,13 +151,32 @@ static struct sched_entry *create_entry(uint32_t gatemask, uint32_t interval, ui
return e;
}
+static void add_tc_entries(struct nlmsghdr *n,
+ __u32 max_sdu[TC_QOPT_MAX_QUEUE])
+{
+ struct rtattr *l;
+ __u32 tc;
+
+ for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
+ l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED);
+
+ addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_INDEX, &tc, sizeof(tc));
+ addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
+ &max_sdu[tc], sizeof(max_sdu[tc]));
+
+ addattr_nest_end(n, l);
+ }
+}
+
static int taprio_parse_opt(struct qdisc_util *qu, int argc,
char **argv, struct nlmsghdr *n, const char *dev)
{
+ __u32 max_sdu[TC_QOPT_MAX_QUEUE] = { };
__s32 clockid = CLOCKID_INVALID;
struct tc_mqprio_qopt opt = { };
__s64 cycle_time_extension = 0;
struct list_head sched_entries;
+ bool have_tc_entries = false;
struct rtattr *tail, *l;
__u32 taprio_flags = 0;
__u32 txtime_delay = 0;
@@ -211,6 +230,18 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
free(tmp);
idx++;
}
+ } else if (strcmp(*argv, "max-sdu") == 0) {
+ while (idx < TC_QOPT_MAX_QUEUE && NEXT_ARG_OK()) {
+ NEXT_ARG();
+ if (get_u32(&max_sdu[idx], *argv, 10)) {
+ PREV_ARG();
+ break;
+ }
+ idx++;
+ }
+ for ( ; idx < TC_QOPT_MAX_QUEUE; idx++)
+ max_sdu[idx] = 0;
+ have_tc_entries = true;
} else if (strcmp(*argv, "sched-entry") == 0) {
uint32_t mask, interval;
struct sched_entry *e;
@@ -341,6 +372,9 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
addattr_l(n, 1024, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
&cycle_time_extension, sizeof(cycle_time_extension));
+ if (have_tc_entries)
+ add_tc_entries(n, max_sdu);
+
l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST | NLA_F_NESTED);
err = add_sched_list(&sched_entries, n);
@@ -430,6 +464,59 @@ static int print_schedule(FILE *f, struct rtattr **tb)
return 0;
}
+static void dump_tc_entry(__u32 max_sdu[TC_QOPT_MAX_QUEUE],
+ struct rtattr *item, bool *have_tc_entries)
+{
+ struct rtattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1];
+ __u32 tc, val = 0;
+
+ parse_rtattr_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, item);
+
+ if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
+ fprintf(stderr, "Missing tc entry index\n");
+ return;
+ }
+
+ tc = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
+
+ if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
+ val = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
+
+ max_sdu[tc] = val;
+
+ *have_tc_entries = true;
+}
+
+static void dump_tc_entries(FILE *f, struct rtattr *opt)
+{
+ __u32 max_sdu[TC_QOPT_MAX_QUEUE] = {};
+ bool have_tc_entries = false;
+ struct rtattr *i;
+ int tc, rem;
+
+ for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
+ max_sdu[tc] = 0;
+
+ rem = RTA_PAYLOAD(opt);
+
+ for (i = RTA_DATA(opt); RTA_OK(i, rem); i = RTA_NEXT(i, rem)) {
+ if (i->rta_type != (TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED))
+ continue;
+
+ dump_tc_entry(max_sdu, i, &have_tc_entries);
+ }
+
+ if (!have_tc_entries)
+ return;
+
+ open_json_array(PRINT_ANY, "max-sdu");
+ for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
+ print_uint(PRINT_ANY, NULL, " %u", max_sdu[tc]);
+ close_json_array(PRINT_ANY, "");
+
+ print_nl();
+}
+
static int taprio_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
{
struct rtattr *tb[TCA_TAPRIO_ATTR_MAX + 1];
@@ -501,6 +588,8 @@ static int taprio_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
close_json_object();
}
+ dump_tc_entries(f, opt);
+
return 0;
}
The 802.1Q queueMaxSDU table is technically implemented in Linux as the TCA_TAPRIO_TC_ENTRY_MAX_SDU attribute of the TCA_TAPRIO_ATTR_TC_ENTRY nest. Multiple TCA_TAPRIO_ATTR_TC_ENTRY nests may appear in the netlink message, one per traffic class. Other configuration items that are per traffic class are also supposed to go there. This is done for future extensibility of the netlink interface (I have the feeling that the struct tc_mqprio_qopt passed through TCA_TAPRIO_ATTR_PRIOMAP is not exactly extensible, which kind of defeats the purpose of using netlink). But otherwise, the max-sdu is parsed from the user, and printed, just like any other fixed-size 16 element array. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> --- This is the user space counterpart of: https://patchwork.kernel.org/project/netdevbpf/cover/20220914153303.1792444-1-vladimir.oltean@nxp.com/ include/uapi/linux/pkt_sched.h | 11 +++++ man/man8/tc-taprio.8 | 9 ++++ tc/q_taprio.c | 89 ++++++++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+)