diff mbox series

[v2,iproute2-next,2/2] taprio: support dumping and setting per-tc max SDU

Message ID 20221004120028.679586-2-vladimir.oltean@nxp.com (mailing list archive)
State Changes Requested
Delegated to: David Ahern
Headers show
Series [v2,iproute2-next,1/2] taprio: don't print the clockid if invalid | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Vladimir Oltean Oct. 4, 2022, noon UTC
The 802.1Q queueMaxSDU table is technically implemented in Linux as
the TCA_TAPRIO_TC_ENTRY_MAX_SDU attribute of the TCA_TAPRIO_ATTR_TC_ENTRY
nest. Multiple TCA_TAPRIO_ATTR_TC_ENTRY nests may appear in the netlink
message, one per traffic class. Other configuration items that are per
traffic class are also supposed to go there.

This is done for future extensibility of the netlink interface (I have
the feeling that the struct tc_mqprio_qopt passed through
TCA_TAPRIO_ATTR_PRIOMAP is not exactly extensible, which kind of defeats
the purpose of using netlink). But otherwise, the max-sdu is parsed from
the user, and printed, just like any other fixed-size 16 element array.

I've modified the example for a fully offloaded configuration (flags 2)
to also show a max-sdu use case. The gate intervals were 0x80 (for TC 7),
0xa0 (for TCs 7 and 5) and 0xdf (for TCs 7, 6, 4, 3, 2, 1, 0).
I modified the last gate to exclude TC 7 (df -> 5f), so that TC 7 now
only interferes with TC 5.

Output after running the full offload command from the man page example
(the new attribute is "max-sdu"):

$ tc qdisc show dev swp0 root
qdisc taprio 8002: root tc 8 map 0 1 2 3 4 5 6 7 0 0 0 0 0 0 0 0
queues offset 0 count 1 offset 1 count 1 offset 2 count 1 offset 3 count 1 offset 4 count 1 offset 5 count 1 offset 6 count 1 offset 7 count 1
 flags 0x2      base-time 200 cycle-time 100000 cycle-time-extension 0
        index 0 cmd S gatemask 0x80 interval 20000
        index 1 cmd S gatemask 0xa0 interval 20000
        index 2 cmd S gatemask 0x5f interval 60000
max-sdu 0 0 0 0 0 200 0 0 0 0 0 0 0 0 0 0

$ tc -j -p qdisc show dev eno0 root
[ {
        "kind": "taprio",
        "handle": "8002:",
        "root": true,
        "options": {
            "tc": 8,
            "map": [ 0,1,2,3,4,5,6,7,0,0,0,0,0,0,0,0 ],
            "queues": [ {
                    "offset": 0,
                    "count": 1
                },{
                    "offset": 1,
                    "count": 1
                },{
                    "offset": 2,
                    "count": 1
                },{
                    "offset": 3,
                    "count": 1
                },{
                    "offset": 4,
                    "count": 1
                },{
                    "offset": 5,
                    "count": 1
                },{
                    "offset": 6,
                    "count": 1
                },{
                    "offset": 7,
                    "count": 1
                } ],
            "flags": "0x2",
            "base_time": 200,
            "cycle_time": 100000,
            "cycle_time_extension": 0,
            "schedule": [ {
                    "index": 0,
                    "cmd": "S",
                    "gatemask": "0x80",
                    "interval": 20000
                },{
                    "index": 1,
                    "cmd": "S",
                    "gatemask": "0xa0",
                    "interval": 20000
                },{
                    "index": 2,
                    "cmd": "S",
                    "gatemask": "0x5f",
                    "interval": 60000
                } ],
            "max-sdu": [ 0,0,0,0,0,200,0,0,0,0,0,0,0,0,0,0 ]
        }
    } ]

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
---
v1->v2:
- add info to man page
- expand documentation

v1 at:
https://patchwork.kernel.org/project/netdevbpf/patch/20220914200706.1961613-1-vladimir.oltean@nxp.com/

 man/man8/tc-taprio.8 | 18 ++++++++-
 tc/q_taprio.c        | 89 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 105 insertions(+), 2 deletions(-)

Comments

David Ahern Oct. 9, 2022, 8:07 p.m. UTC | #1
On 10/4/22 6:00 AM, Vladimir Oltean wrote:
> diff --git a/tc/q_taprio.c b/tc/q_taprio.c
> index e3af3f3fa047..45f82be1f50a 100644
> --- a/tc/q_taprio.c
> +++ b/tc/q_taprio.c
> @@ -151,13 +151,32 @@ static struct sched_entry *create_entry(uint32_t gatemask, uint32_t interval, ui
>  	return e;
>  }
>  
> +static void add_tc_entries(struct nlmsghdr *n,
> +			   __u32 max_sdu[TC_QOPT_MAX_QUEUE])
> +{
> +	struct rtattr *l;
> +	__u32 tc;
> +
> +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
> +		l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED);
> +
> +		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_INDEX, &tc, sizeof(tc));
> +		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
> +			  &max_sdu[tc], sizeof(max_sdu[tc]));
> +
> +		addattr_nest_end(n, l);


Why the full TC_QOPT_MAX_QUEUE? the parse_opt knows the index of the
last entry used.

> +	}
> +}
> +
>  static int taprio_parse_opt(struct qdisc_util *qu, int argc,
>  			    char **argv, struct nlmsghdr *n, const char *dev)
>  {
> +	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = { };
>  	__s32 clockid = CLOCKID_INVALID;
>  	struct tc_mqprio_qopt opt = { };
>  	__s64 cycle_time_extension = 0;
>  	struct list_head sched_entries;
> +	bool have_tc_entries = false;
>  	struct rtattr *tail, *l;
>  	__u32 taprio_flags = 0;
>  	__u32 txtime_delay = 0;
> @@ -211,6 +230,18 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
>  				free(tmp);
>  				idx++;
>  			}
> +		} else if (strcmp(*argv, "max-sdu") == 0) {
> +			while (idx < TC_QOPT_MAX_QUEUE && NEXT_ARG_OK()) {
> +				NEXT_ARG();
> +				if (get_u32(&max_sdu[idx], *argv, 10)) {
> +					PREV_ARG();
> +					break;
> +				}
> +				idx++;
> +			}
> +			for ( ; idx < TC_QOPT_MAX_QUEUE; idx++)
> +				max_sdu[idx] = 0;

max_sdu is initialized to 0 and you have "have_tc_entries" to detect
multiple options on the command line.

> +			have_tc_entries = true;
>  		} else if (strcmp(*argv, "sched-entry") == 0) {
>  			uint32_t mask, interval;
>  			struct sched_entry *e;
> @@ -341,6 +372,9 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
>  		addattr_l(n, 1024, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
>  			  &cycle_time_extension, sizeof(cycle_time_extension));
>  
> +	if (have_tc_entries)
> +		add_tc_entries(n, max_sdu);
> +
>  	l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST | NLA_F_NESTED);
>  
>  	err = add_sched_list(&sched_entries, n);
> @@ -430,6 +464,59 @@ static int print_schedule(FILE *f, struct rtattr **tb)
>  	return 0;
>  }
>  
> +static void dump_tc_entry(__u32 max_sdu[TC_QOPT_MAX_QUEUE],
> +			  struct rtattr *item, bool *have_tc_entries)
> +{
> +	struct rtattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1];
> +	__u32 tc, val = 0;
> +
> +	parse_rtattr_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, item);
> +
> +	if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
> +		fprintf(stderr, "Missing tc entry index\n");
> +		return;
> +	}
> +
> +	tc = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
> +
> +	if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
> +		val = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
> +
> +	max_sdu[tc] = val;
> +
> +	*have_tc_entries = true;
> +}
> +
> +static void dump_tc_entries(FILE *f, struct rtattr *opt)
> +{
> +	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = {};
> +	bool have_tc_entries = false;
> +	struct rtattr *i;
> +	int tc, rem;
> +
> +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
> +		max_sdu[tc] = 0;

max_sdu is initialized to 0 above when it is declared.

> +
> +	rem = RTA_PAYLOAD(opt);
> +
> +	for (i = RTA_DATA(opt); RTA_OK(i, rem); i = RTA_NEXT(i, rem)) {
> +		if (i->rta_type != (TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED))
> +			continue;
> +
> +		dump_tc_entry(max_sdu, i, &have_tc_entries);
> +	}
> +
> +	if (!have_tc_entries)
> +		return;
> +
> +	open_json_array(PRINT_ANY, "max-sdu");
> +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)

you can know the max index so why not use it here?
Vladimir Oltean Oct. 12, 2022, 9:53 a.m. UTC | #2
On Sun, Oct 09, 2022 at 02:07:37PM -0600, David Ahern wrote:
> On 10/4/22 6:00 AM, Vladimir Oltean wrote:
> > diff --git a/tc/q_taprio.c b/tc/q_taprio.c
> > index e3af3f3fa047..45f82be1f50a 100644
> > --- a/tc/q_taprio.c
> > +++ b/tc/q_taprio.c
> > @@ -151,13 +151,32 @@ static struct sched_entry *create_entry(uint32_t gatemask, uint32_t interval, ui
> >  	return e;
> >  }
> >  
> > +static void add_tc_entries(struct nlmsghdr *n,
> > +			   __u32 max_sdu[TC_QOPT_MAX_QUEUE])
> > +{
> > +	struct rtattr *l;
> > +	__u32 tc;
> > +
> > +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
> > +		l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED);
> > +
> > +		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_INDEX, &tc, sizeof(tc));
> > +		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
> > +			  &max_sdu[tc], sizeof(max_sdu[tc]));
> > +
> > +		addattr_nest_end(n, l);
> 
> Why the full TC_QOPT_MAX_QUEUE? the parse_opt knows the index of the
> last entry used.

It doesn't make a difference if I send netlink attributes with zeroes or not.
I was thinking the code is more future-proof for other per-tc attributes.
If I then add support for other per-tc stuff, I'll have num_max_sdu_entries,
num_other_stuff_entries, etc, and I'll have to iterate up to the max()
of those (or just up to TC_QOPT_MAX_QUEUE), and add the nla only if i <=
the num_*_entries of the corresponding attribute.

Anyway I don't have plans for other per-tc entries so I'll make this change.

> > +	}
> > +}
> > +
> >  static int taprio_parse_opt(struct qdisc_util *qu, int argc,
> >  			    char **argv, struct nlmsghdr *n, const char *dev)
> >  {
> > +	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = { };
> >  	__s32 clockid = CLOCKID_INVALID;
> >  	struct tc_mqprio_qopt opt = { };
> >  	__s64 cycle_time_extension = 0;
> >  	struct list_head sched_entries;
> > +	bool have_tc_entries = false;
> >  	struct rtattr *tail, *l;
> >  	__u32 taprio_flags = 0;
> >  	__u32 txtime_delay = 0;
> > @@ -211,6 +230,18 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
> >  				free(tmp);
> >  				idx++;
> >  			}
> > +		} else if (strcmp(*argv, "max-sdu") == 0) {
> > +			while (idx < TC_QOPT_MAX_QUEUE && NEXT_ARG_OK()) {
> > +				NEXT_ARG();
> > +				if (get_u32(&max_sdu[idx], *argv, 10)) {
> > +					PREV_ARG();
> > +					break;
> > +				}
> > +				idx++;
> > +			}
> > +			for ( ; idx < TC_QOPT_MAX_QUEUE; idx++)
> > +				max_sdu[idx] = 0;
> 
> max_sdu is initialized to 0 and you have "have_tc_entries" to detect
> multiple options on the command line.

Can remove.

> > +			have_tc_entries = true;
> >  		} else if (strcmp(*argv, "sched-entry") == 0) {
> >  			uint32_t mask, interval;
> >  			struct sched_entry *e;
> > @@ -341,6 +372,9 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
> >  		addattr_l(n, 1024, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
> >  			  &cycle_time_extension, sizeof(cycle_time_extension));
> >  
> > +	if (have_tc_entries)
> > +		add_tc_entries(n, max_sdu);
> > +
> >  	l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST | NLA_F_NESTED);
> >  
> >  	err = add_sched_list(&sched_entries, n);
> > @@ -430,6 +464,59 @@ static int print_schedule(FILE *f, struct rtattr **tb)
> >  	return 0;
> >  }
> >  
> > +static void dump_tc_entry(__u32 max_sdu[TC_QOPT_MAX_QUEUE],
> > +			  struct rtattr *item, bool *have_tc_entries)
> > +{
> > +	struct rtattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1];
> > +	__u32 tc, val = 0;
> > +
> > +	parse_rtattr_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, item);
> > +
> > +	if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
> > +		fprintf(stderr, "Missing tc entry index\n");
> > +		return;
> > +	}
> > +
> > +	tc = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
> > +
> > +	if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
> > +		val = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
> > +
> > +	max_sdu[tc] = val;

The array dereference here can potentially go out of bounds with future
kernels which might provide a TCA_TAPRIO_TC_ENTRY_INDEX past TC_QOPT_MAX_QUEUE.
I will add a check for this, and ignore it (or error out ?!).

> > +
> > +	*have_tc_entries = true;
> > +}
> > +
> > +static void dump_tc_entries(FILE *f, struct rtattr *opt)
> > +{
> > +	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = {};
> > +	bool have_tc_entries = false;
> > +	struct rtattr *i;
> > +	int tc, rem;
> > +
> > +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
> > +		max_sdu[tc] = 0;
> 
> max_sdu is initialized to 0 above when it is declared.

True.

> > +
> > +	rem = RTA_PAYLOAD(opt);
> > +
> > +	for (i = RTA_DATA(opt); RTA_OK(i, rem); i = RTA_NEXT(i, rem)) {
> > +		if (i->rta_type != (TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED))
> > +			continue;
> > +
> > +		dump_tc_entry(max_sdu, i, &have_tc_entries);
> > +	}
> > +
> > +	if (!have_tc_entries)
> > +		return;
> > +
> > +	open_json_array(PRINT_ANY, "max-sdu");
> > +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
> 
> you can know the max index so why not use it here?

Can do. The kernel will always report TC_QOPT_MAX_QUEUE entries anyway,
so it doesn't really have a practical effect right now.

I'd like to avoid using an array as temporary storage in the first
place (see the comment about kernel potentially causing out-of-bounds
access), but I only want to call open_json_array() if there is at least
one actual per-tc entry present. I'll think about this some more.
diff mbox series

Patch

diff --git a/man/man8/tc-taprio.8 b/man/man8/tc-taprio.8
index d13c86f779b7..e1f32e73bab0 100644
--- a/man/man8/tc-taprio.8
+++ b/man/man8/tc-taprio.8
@@ -150,6 +150,15 @@  value should always be greater than the delta specified in the
 .BR etf(8)
 qdisc.
 
+.TP
+max-sdu
+.br
+Specifies an array containing at most 16 elements, one per traffic class, which
+corresponds to the queueMaxSDU table from IEEE 802.1Q-2018. Each array element
+represents the maximum L2 payload size that can egress that traffic class.
+Elements that are not filled in default to 0. The value 0 means that the
+traffic class can send packets up to the port's maximum MTU in size.
+
 .SH EXAMPLES
 
 The following example shows how an traffic schedule with three traffic
@@ -205,17 +214,22 @@  is implicitly calculated as the sum of all
 durations (i.e. 20 us + 20 us + 60 us = 100 us). Although the base-time is in
 the past, the hardware will start executing the schedule at a PTP time equal to
 the smallest integer multiple of 100 us, plus 200 ns, that is larger than the
-NIC's current PTP time.
+NIC's current PTP time. In addition, the MTU for traffic class 5 is limited to
+200 octets, so that the interference this creates upon traffic class 7 during
+the time window when their gates are both open is bounded. The interference is
+determined by the transmit time of the max SDU, plus the L2 header length, plus
+the L1 overhead.
 
 .EX
 # tc qdisc add dev eth0 parent root taprio \\
               num_tc 8 \\
               map 0 1 2 3 4 5 6 7 \\
               queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \\
+              max-sdu 0 0 0 0 0 200 0 0 \\
               base-time 200 \\
               sched-entry S 80 20000 \\
               sched-entry S a0 20000 \\
-              sched-entry S df 60000 \\
+              sched-entry S 5f 60000 \\
               flags 0x2
 .EE
 
diff --git a/tc/q_taprio.c b/tc/q_taprio.c
index e3af3f3fa047..45f82be1f50a 100644
--- a/tc/q_taprio.c
+++ b/tc/q_taprio.c
@@ -151,13 +151,32 @@  static struct sched_entry *create_entry(uint32_t gatemask, uint32_t interval, ui
 	return e;
 }
 
+static void add_tc_entries(struct nlmsghdr *n,
+			   __u32 max_sdu[TC_QOPT_MAX_QUEUE])
+{
+	struct rtattr *l;
+	__u32 tc;
+
+	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
+		l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED);
+
+		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_INDEX, &tc, sizeof(tc));
+		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
+			  &max_sdu[tc], sizeof(max_sdu[tc]));
+
+		addattr_nest_end(n, l);
+	}
+}
+
 static int taprio_parse_opt(struct qdisc_util *qu, int argc,
 			    char **argv, struct nlmsghdr *n, const char *dev)
 {
+	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = { };
 	__s32 clockid = CLOCKID_INVALID;
 	struct tc_mqprio_qopt opt = { };
 	__s64 cycle_time_extension = 0;
 	struct list_head sched_entries;
+	bool have_tc_entries = false;
 	struct rtattr *tail, *l;
 	__u32 taprio_flags = 0;
 	__u32 txtime_delay = 0;
@@ -211,6 +230,18 @@  static int taprio_parse_opt(struct qdisc_util *qu, int argc,
 				free(tmp);
 				idx++;
 			}
+		} else if (strcmp(*argv, "max-sdu") == 0) {
+			while (idx < TC_QOPT_MAX_QUEUE && NEXT_ARG_OK()) {
+				NEXT_ARG();
+				if (get_u32(&max_sdu[idx], *argv, 10)) {
+					PREV_ARG();
+					break;
+				}
+				idx++;
+			}
+			for ( ; idx < TC_QOPT_MAX_QUEUE; idx++)
+				max_sdu[idx] = 0;
+			have_tc_entries = true;
 		} else if (strcmp(*argv, "sched-entry") == 0) {
 			uint32_t mask, interval;
 			struct sched_entry *e;
@@ -341,6 +372,9 @@  static int taprio_parse_opt(struct qdisc_util *qu, int argc,
 		addattr_l(n, 1024, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
 			  &cycle_time_extension, sizeof(cycle_time_extension));
 
+	if (have_tc_entries)
+		add_tc_entries(n, max_sdu);
+
 	l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST | NLA_F_NESTED);
 
 	err = add_sched_list(&sched_entries, n);
@@ -430,6 +464,59 @@  static int print_schedule(FILE *f, struct rtattr **tb)
 	return 0;
 }
 
+static void dump_tc_entry(__u32 max_sdu[TC_QOPT_MAX_QUEUE],
+			  struct rtattr *item, bool *have_tc_entries)
+{
+	struct rtattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1];
+	__u32 tc, val = 0;
+
+	parse_rtattr_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, item);
+
+	if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
+		fprintf(stderr, "Missing tc entry index\n");
+		return;
+	}
+
+	tc = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
+
+	if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
+		val = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
+
+	max_sdu[tc] = val;
+
+	*have_tc_entries = true;
+}
+
+static void dump_tc_entries(FILE *f, struct rtattr *opt)
+{
+	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = {};
+	bool have_tc_entries = false;
+	struct rtattr *i;
+	int tc, rem;
+
+	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
+		max_sdu[tc] = 0;
+
+	rem = RTA_PAYLOAD(opt);
+
+	for (i = RTA_DATA(opt); RTA_OK(i, rem); i = RTA_NEXT(i, rem)) {
+		if (i->rta_type != (TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED))
+			continue;
+
+		dump_tc_entry(max_sdu, i, &have_tc_entries);
+	}
+
+	if (!have_tc_entries)
+		return;
+
+	open_json_array(PRINT_ANY, "max-sdu");
+	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
+		print_uint(PRINT_ANY, NULL, " %u", max_sdu[tc]);
+	close_json_array(PRINT_ANY, "");
+
+	print_nl();
+}
+
 static int taprio_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
 {
 	struct rtattr *tb[TCA_TAPRIO_ATTR_MAX + 1];
@@ -503,6 +590,8 @@  static int taprio_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
 		close_json_object();
 	}
 
+	dump_tc_entries(f, opt);
+
 	return 0;
 }