@@ -134,4 +134,7 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int (*vlan_fill)(struct sk_buff *skb,
struct net_device *dev,
u32 filter_mask));
+
+extern void rtnl_offload_xstats_notify(struct net_device *dev);
+
#endif /* __LINUX_RTNETLINK_H */
@@ -1164,6 +1164,7 @@ enum {
* bitfield32 with a filter mask for the
* corresponding stat group.
*/
+ IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */
__IFLA_STATS_GETSET_MAX,
};
@@ -758,6 +758,8 @@ enum rtnetlink_groups {
#define RTNLGRP_BRVLAN RTNLGRP_BRVLAN
RTNLGRP_MCTP_IFADDR,
#define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR
+ RTNLGRP_STATS,
+#define RTNLGRP_STATS RTNLGRP_STATS
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
@@ -5568,6 +5568,7 @@ rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
static const struct nla_policy
ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
[IFLA_STATS_GETSET_UNSPEC] = { .strict_start_type = 1 },
+ [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
@@ -5775,16 +5776,51 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+void rtnl_offload_xstats_notify(struct net_device *dev)
+{
+ struct rtnl_stats_dump_filters response_filters = {};
+ struct net *net = dev_net(dev);
+ int idxattr = 0, prividx = 0;
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ ASSERT_RTNL();
+
+ response_filters.mask[0] |=
+ IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
+ response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
+ IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
+
+ skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
+ GFP_KERNEL);
+ if (!skb)
+ goto errout;
+
+ err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
+ &response_filters, &idxattr, &prividx, NULL);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
+ return;
+
+errout:
+ rtnl_set_sk_err(net, RTNLGRP_STATS, err);
+}
+EXPORT_SYMBOL(rtnl_offload_xstats_notify);
+
static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
struct rtnl_stats_dump_filters response_filters = {};
struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
struct net *net = sock_net(skb->sk);
struct net_device *dev = NULL;
- int idxattr = 0, prividx = 0;
struct if_stats_msg *ifsm;
- struct sk_buff *nskb;
+ bool notify = false;
int err;
err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
@@ -5816,24 +5852,29 @@ static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
- nskb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
- GFP_KERNEL);
- if (!nskb)
- return -ENOBUFS;
+ if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
+ u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
- err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
- NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
- 0, &response_filters, &idxattr, &prividx,
- extack);
- if (err < 0) {
- /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
- WARN_ON(err == -EMSGSIZE);
- kfree_skb(nskb);
- } else {
- err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
+ if (req)
+ err = netdev_offload_xstats_enable(dev, t_l3, extack);
+ else
+ err = netdev_offload_xstats_disable(dev, t_l3);
+
+ if (!err)
+ notify = true;
+ else if (err != -EALREADY)
+ return err;
+
+ response_filters.mask[0] |=
+ IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
+ response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
+ IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
}
- return err;
+ if (notify)
+ rtnl_offload_xstats_notify(dev);
+
+ return 0;
}
/* Process one rtnetlink message. */