diff mbox series

[v5,net-next,04/12] net-shapers: implement NL group operation

Message ID f67b0502e7e9e9e8452760c4d3ad7cdac648ecda.1724944117.git.pabeni@redhat.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series net: introduce TX H/W shaping API | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; GEN HAS DIFF 2 files changed, 1310 insertions(+);
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 7 this patch: 7
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 4 of 4 maintainers
netdev/build_clang success Errors and warnings before: 7 this patch: 7
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 7 this patch: 7
netdev/checkpatch warning WARNING: line length of 123 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Paolo Abeni Aug. 29, 2024, 3:16 p.m. UTC
Allow grouping multiple leaves shaper under the given root.
The root and the leaves shapers are created, if needed, otherwise
the existing shapers are re-linked as requested.

Try hard to pre-allocated the needed resources, to avoid non
trivial H/W configuration rollbacks in case of any failure.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
v4 -> v5:
 - replace net_device* with binding* in most helpers
 - factor out net_shaper_fill_binding() helper for re-use in later patch
 - move most sanity check at parse time and use NL_SET_BAD_ATTR
 - reused net_shaper_fill_handle() in net_shaper_group_send_reply()
   instead of open-coding it.

v3 -> v4:
 - cleanup left-over scope node shaper after re-link, as needed
 - add locking
 - separate arguments for shaper handle

RFC v2 -> RFC v3:
 - dev_put() -> netdev_put()
---
 net/shaper/shaper.c | 348 +++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 346 insertions(+), 2 deletions(-)

Comments

Jakub Kicinski Aug. 30, 2024, 2:04 a.m. UTC | #1
On Thu, 29 Aug 2024 17:16:57 +0200 Paolo Abeni wrote:
> Allow grouping multiple leaves shaper under the given root.
> The root and the leaves shapers are created, if needed, otherwise
> the existing shapers are re-linked as requested.
> 
> Try hard to pre-allocated the needed resources, to avoid non
> trivial H/W configuration rollbacks in case of any failure.

Need to s/root/parent/ the commit message?

> +static int __net_shaper_group(struct net_shaper_binding *binding,
> +			      int leaves_count,
> +			      const struct net_shaper_handle *leaves_handles,
> +			      struct net_shaper_info *leaves,
> +			      struct net_shaper_handle *node_handle,
> +			      struct net_shaper_info *node,
> +			      struct netlink_ext_ack *extack)
> +{
> +	const struct net_shaper_ops *ops = net_shaper_binding_ops(binding);
> +	struct net_shaper_info *parent = NULL;
> +	struct net_shaper_handle leaf_handle;
> +	int i, ret;
> +
> +	if (node_handle->scope == NET_SHAPER_SCOPE_NODE) {
> +		if (node_handle->id != NET_SHAPER_ID_UNSPEC &&
> +		    !net_shaper_cache_lookup(binding, node_handle)) {
> +			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
> +					   node_handle->scope, node_handle->id);

BAD_ATTR would do?

> +			return -ENOENT;
> +		}
> +
> +		/* When unspecified, the node parent scope is inherited from
> +		 * the leaves.
> +		 */
> +		if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
> +			for (i = 1; i < leaves_count; ++i) {
> +				if (leaves[i].parent.scope !=
> +				    leaves[0].parent.scope ||
> +				    leaves[i].parent.id !=
> +				    leaves[0].parent.id) {

memcmp() ? put a BUILD_BUG_ON(sizeof() != 8) to make sure we double
check it if the struct grows?

> +					NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
> +					return -EINVAL;

5 indents is too many indents :( maybe make the for loop a helper?

> +				}
> +			}
> +
> +			if (leaves_count > 0)

how can we get here and not have leaves? :o

> +				node->parent = leaves[0].parent;
> +		}
> +
> +	} else {
> +		net_shaper_default_parent(node_handle, &node->parent);
> +	}

> +static int net_shaper_group_send_reply(struct genl_info *info,
> +				       struct net_shaper_handle *handle)
> +{
> +	struct net_shaper_binding *binding = info->user_ptr[0];
> +	struct sk_buff *msg;
> +	int ret = -EMSGSIZE;
> +	void *hdr;
> +
> +	/* Prepare the msg reply in advance, to avoid device operation
> +	 * rollback.
> +	 */
> +	msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
> +	if (!msg)
> +		return ret;

return -ENOMEM;

> +
> +	hdr = genlmsg_iput(msg, info);
> +	if (!hdr)
> +		goto free_msg;
> +
> +	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX))
> +		goto free_msg;
> +
> +	if (net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))

you can combine the two fill ifs into one with ||

> +		goto free_msg;
> +
> +	genlmsg_end(msg, hdr);
> +
> +	ret = genlmsg_reply(msg, info);
> +	if (ret)
> +		goto free_msg;

reply always eats the skb, just:

	return genlmsg_reply(msg, info);

> +
> +	return ret;
> +
> +free_msg:
> +	nlmsg_free(msg);
> +	return ret;

return -EMSGSIZE;

> +}
> +
> +int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
> +{
> +	struct net_shaper_handle *leaves_handles, node_handle;
> +	struct net_shaper_info *leaves, node;
> +	struct net_shaper_binding *binding;
> +	int i, ret, rem, leaves_count;
> +	struct nlattr *attr;
> +
> +	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES) ||
> +	    GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_NODE))
> +		return -EINVAL;
> +
> +	binding = net_shaper_binding_from_ctx(info->user_ptr[0]);
> +	leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
> +	leaves = kcalloc(leaves_count, sizeof(struct net_shaper_info) +
> +			 sizeof(struct net_shaper_handle), GFP_KERNEL);
> +	if (!leaves) {
> +		GENL_SET_ERR_MSG_FMT(info, "Can't allocate memory for %d leaves shapers",
> +				     leaves_count);
> +		return -ENOMEM;
> +	}
> +	leaves_handles = (struct net_shaper_handle *)&leaves[leaves_count];
> +
> +	ret = net_shaper_parse_node(binding, info->attrs[NET_SHAPER_A_NODE],
> +				    info, &node_handle, &node);
> +	if (ret)
> +		goto free_shapers;
> +
> +	i = 0;
> +	nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
> +			       genlmsg_data(info->genlhdr),
> +			       genlmsg_len(info->genlhdr), rem) {
> +		if (WARN_ON_ONCE(i >= leaves_count))
> +			goto free_shapers;
> +
> +		ret = net_shaper_parse_info_nest(binding, attr, info,
> +						 NET_SHAPER_SCOPE_QUEUE,
> +						 &leaves_handles[i],

Wouldn't it be convenient to store the handle in the "info" object?
AFAIU the handle is forever for an info, so no risk of it being out 
of sync...

> +						 &leaves[i]);
> +		if (ret)
> +			goto free_shapers;
> +		i++;
> +	}
> +
> +	ret = net_shaper_group(binding, leaves_count, leaves_handles, leaves,
> +			       &node_handle, &node, info->extack);

...and it'd be nice if group had 5 rather than 7 params

> +	if (ret < 0)
> +		goto free_shapers;
> +
> +	ret = net_shaper_group_send_reply(info, &node_handle);
> +	if (ret) {
> +		/* Error on reply is not fatal to avoid rollback a successful
> +		 * configuration.

Slight issues with the grammar here, but I think it should be fatal.
The sender will most likely block until they get a response.
Not to mention that the caller will not know what the handle 
we allocated is.

> +		 */
> +		GENL_SET_ERR_MSG_FMT(info, "Can't send reply %d", ret);
> +		ret = 0;
> +	}
> +
> +free_shapers:
> +	kfree(leaves);
> +	return ret;
> +}
Paolo Abeni Aug. 30, 2024, 4:48 p.m. UTC | #2
On 8/30/24 04:04, Jakub Kicinski wrote:
>> +static int __net_shaper_group(struct net_shaper_binding *binding,
>> +			      int leaves_count,
>> +			      const struct net_shaper_handle *leaves_handles,
>> +			      struct net_shaper_info *leaves,
>> +			      struct net_shaper_handle *node_handle,
>> +			      struct net_shaper_info *node,
>> +			      struct netlink_ext_ack *extack)
>> +{
>> +	const struct net_shaper_ops *ops = net_shaper_binding_ops(binding);
>> +	struct net_shaper_info *parent = NULL;
>> +	struct net_shaper_handle leaf_handle;
>> +	int i, ret;
>> +
>> +	if (node_handle->scope == NET_SHAPER_SCOPE_NODE) {
>> +		if (node_handle->id != NET_SHAPER_ID_UNSPEC &&
>> +		    !net_shaper_cache_lookup(binding, node_handle)) {
>> +			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
>> +					   node_handle->scope, node_handle->id);
> 
> BAD_ATTR would do?

We can reach here from the delete() op (next patch), there will be no 
paired attribute is such case. Even for the group() operation it will 
need to push here towards several callers additional context to identify 
the attribute, it should be quite ugly, can we keep with ERR_MSG_FMT here?

>> +	if (ret < 0)
>> +		goto free_shapers;
>> +
>> +	ret = net_shaper_group_send_reply(info, &node_handle);
>> +	if (ret) {
>> +		/* Error on reply is not fatal to avoid rollback a successful
>> +		 * configuration.
> 
> Slight issues with the grammar here, but I think it should be fatal.
> The sender will most likely block until they get a response.
> Not to mention that the caller will not know what the handle
> we allocated is.

You mean we should return a negative error code, and _not_ that we 
should additionally attempt a rollback, right? The rollback will be very 
difficult at best: at this point destructive action have taken place.

Thanks,

Paolo
Jakub Kicinski Aug. 30, 2024, 6:48 p.m. UTC | #3
On Fri, 30 Aug 2024 18:48:41 +0200 Paolo Abeni wrote:
> >> +	const struct net_shaper_ops *ops = net_shaper_binding_ops(binding);
> >> +	struct net_shaper_info *parent = NULL;
> >> +	struct net_shaper_handle leaf_handle;
> >> +	int i, ret;
> >> +
> >> +	if (node_handle->scope == NET_SHAPER_SCOPE_NODE) {
> >> +		if (node_handle->id != NET_SHAPER_ID_UNSPEC &&
> >> +		    !net_shaper_cache_lookup(binding, node_handle)) {
> >> +			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
> >> +					   node_handle->scope, node_handle->id);  
> > 
> > BAD_ATTR would do?  
> 
> We can reach here from the delete() op (next patch), there will be no 
> paired attribute is such case. Even for the group() operation it will 
> need to push here towards several callers additional context to identify 
> the attribute, it should be quite ugly, can we keep with ERR_MSG_FMT here?

Alright. But TBH I haven't grasped the semantics of how you use UNSPEC.
So I reserve the right to complain again in v6 if I think of a better
way ;)

> >> +	if (ret < 0)
> >> +		goto free_shapers;
> >> +
> >> +	ret = net_shaper_group_send_reply(info, &node_handle);
> >> +	if (ret) {
> >> +		/* Error on reply is not fatal to avoid rollback a successful
> >> +		 * configuration.  
> > 
> > Slight issues with the grammar here, but I think it should be fatal.
> > The sender will most likely block until they get a response.
> > Not to mention that the caller will not know what the handle
> > we allocated is.  
> 
> You mean we should return a negative error code, and _not_ that we 
> should additionally attempt a rollback, right? The rollback will be very 
> difficult at best: at this point destructive action have taken place.

net_shaper_group_send_reply() does a bit too much, TBH.
Given the rollback complexity propagating the failure just
from genlmsg_reply() is fine. I think the only case it fails
is if the socket is congested, which is in senders control.
But genlmsg_new() can be done before we start. And we should 
size the skb so that nla_puts sever fail (just pop a WARN_ON()
on their error path, to make sure we catch it if they grow,
I don't think they can fail with your current code).
diff mbox series

Patch

diff --git a/net/shaper/shaper.c b/net/shaper/shaper.c
index a58bdd2ec013..f5e8464b8408 100644
--- a/net/shaper/shaper.c
+++ b/net/shaper/shaper.c
@@ -72,6 +72,24 @@  net_shaper_binding_ops(struct net_shaper_binding *binding)
 	return NULL;
 }
 
+/* Count the number of [multi] attributes of the given type. */
+static int net_shaper_list_len(struct genl_info *info, int type)
+{
+	struct nlattr *attr;
+	int rem, cnt = 0;
+
+	nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr),
+			       genlmsg_len(info->genlhdr), rem)
+		cnt++;
+	return cnt;
+}
+
+static int net_shaper_handle_size(void)
+{
+	return nla_total_size(nla_total_size(sizeof(u32)) +
+			      nla_total_size(sizeof(u32)));
+}
+
 static int net_shaper_fill_binding(struct sk_buff *msg,
 				   const struct net_shaper_binding *binding,
 				   u32 type)
@@ -359,6 +377,29 @@  static void net_shaper_cache_commit(struct net_shaper_binding *binding,
 	xa_unlock(&data->shapers);
 }
 
+/* Rollback all the tentative inserts from the shaper cache. */
+static void net_shaper_cache_rollback(struct net_shaper_binding *binding)
+{
+	struct net_shaper_data *data = net_shaper_binding_data(binding);
+	struct net_shaper_handle handle;
+	struct net_shaper_info *cur;
+	unsigned long index;
+
+	if (!data)
+		return;
+
+	xa_lock(&data->shapers);
+	xa_for_each_marked(&data->shapers, index, cur,
+			   NET_SHAPER_CACHE_NOT_VALID) {
+		net_shaper_index_to_handle(index, &handle);
+		if (handle.scope == NET_SHAPER_SCOPE_NODE)
+			idr_remove(&data->node_ids, handle.id);
+		__xa_erase(&data->shapers, index);
+		kfree(cur);
+	}
+	xa_unlock(&data->shapers);
+}
+
 static int net_shaper_parse_handle(const struct nlattr *attr,
 				   const struct genl_info *info,
 				   struct net_shaper_handle *handle)
@@ -455,6 +496,7 @@  static int net_shaper_parse_info(struct net_shaper_binding *binding,
 static int net_shaper_parse_info_nest(struct net_shaper_binding *binding,
 				      const struct nlattr *attr,
 				      const struct genl_info *info,
+				      enum net_shaper_scope expected_scope,
 				      struct net_shaper_handle *handle,
 				      struct net_shaper_info *shaper)
 {
@@ -471,11 +513,62 @@  static int net_shaper_parse_info_nest(struct net_shaper_binding *binding,
 	if (ret < 0)
 		return ret;
 
+	if (expected_scope != NET_SHAPER_SCOPE_UNSPEC &&
+	    handle->scope != expected_scope) {
+		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
+		return -EINVAL;
+	}
+
 	if (!cached)
 		net_shaper_default_parent(handle, &shaper->parent);
 	return 0;
 }
 
+/* Alike net_parse_shaper_info(), but additionally allow the user specifying
+ * the shaper's parent handle.
+ */
+static int net_shaper_parse_node(struct net_shaper_binding *binding,
+				 const struct nlattr *attr,
+				 const struct genl_info *info,
+				 struct net_shaper_handle *handle,
+				 struct net_shaper_info *shaper)
+{
+	struct nlattr *tb[NET_SHAPER_A_PARENT + 1];
+	bool cached;
+	int ret;
+
+	ret = nla_parse_nested(tb, NET_SHAPER_A_PARENT, attr,
+			       net_shaper_node_info_nl_policy,
+			       info->extack);
+	if (ret < 0)
+		return ret;
+
+	ret = net_shaper_parse_info(binding, tb, info, handle, shaper,
+				    &cached);
+	if (ret)
+		return ret;
+
+	if (handle->scope != NET_SHAPER_SCOPE_NODE &&
+	    handle->scope != NET_SHAPER_SCOPE_NETDEV) {
+		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
+		return -EINVAL;
+	}
+
+	if (tb[NET_SHAPER_A_PARENT]) {
+		ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info,
+					      &shaper->parent);
+		if (ret)
+			return ret;
+
+		if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE &&
+		    shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) {
+			NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
 static int net_shaper_generic_pre(struct genl_info *info, int type)
 {
 	struct net_shaper_nl_ctx *ctx;
@@ -657,8 +750,9 @@  int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
 
 	binding = net_shaper_binding_from_ctx(info->user_ptr[0]);
 	attr = info->attrs[NET_SHAPER_A_SHAPER];
-	ret = net_shaper_parse_info_nest(binding, attr, info, &handle,
-					 &shaper);
+	ret = net_shaper_parse_info_nest(binding, attr, info,
+					 NET_SHAPER_SCOPE_UNSPEC,
+					 &handle, &shaper);
 	if (ret)
 		return ret;
 
@@ -704,6 +798,100 @@  static int __net_shaper_delete(struct net_shaper_binding *binding,
 	return 0;
 }
 
+static int __net_shaper_group(struct net_shaper_binding *binding,
+			      int leaves_count,
+			      const struct net_shaper_handle *leaves_handles,
+			      struct net_shaper_info *leaves,
+			      struct net_shaper_handle *node_handle,
+			      struct net_shaper_info *node,
+			      struct netlink_ext_ack *extack)
+{
+	const struct net_shaper_ops *ops = net_shaper_binding_ops(binding);
+	struct net_shaper_info *parent = NULL;
+	struct net_shaper_handle leaf_handle;
+	int i, ret;
+
+	if (node_handle->scope == NET_SHAPER_SCOPE_NODE) {
+		if (node_handle->id != NET_SHAPER_ID_UNSPEC &&
+		    !net_shaper_cache_lookup(binding, node_handle)) {
+			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
+					   node_handle->scope, node_handle->id);
+			return -ENOENT;
+		}
+
+		/* When unspecified, the node parent scope is inherited from
+		 * the leaves.
+		 */
+		if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
+			for (i = 1; i < leaves_count; ++i) {
+				if (leaves[i].parent.scope !=
+				    leaves[0].parent.scope ||
+				    leaves[i].parent.id !=
+				    leaves[0].parent.id) {
+					NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
+					return -EINVAL;
+				}
+			}
+
+			if (leaves_count > 0)
+				node->parent = leaves[0].parent;
+		}
+
+	} else {
+		net_shaper_default_parent(node_handle, &node->parent);
+	}
+
+	if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
+		parent = net_shaper_cache_lookup(binding, &node->parent);
+		if (!parent) {
+			NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
+					   node->parent.scope, node->parent.id);
+			return -ENOENT;
+		}
+	}
+
+	/* For newly created node scope shaper, the following will update
+	 * the handle, due to id allocation.
+	 */
+	ret = net_shaper_cache_pre_insert(binding, node_handle, extack);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < leaves_count; ++i) {
+		leaf_handle = leaves_handles[i];
+
+		ret = net_shaper_cache_pre_insert(binding, &leaf_handle,
+						  extack);
+		if (ret)
+			goto rollback;
+
+		if (leaves[i].parent.scope == node_handle->scope &&
+		    leaves[i].parent.id == node_handle->id)
+			continue;
+
+		/* The leaves shapers will be nested to the node, update the
+		 * linking accordingly.
+		 */
+		leaves[i].parent = *node_handle;
+		node->leaves++;
+	}
+
+	ret = ops->group(binding, leaves_count, leaves_handles, leaves,
+			 node_handle, node, extack);
+	if (ret < 0)
+		goto rollback;
+
+	if (parent)
+		parent->leaves++;
+	net_shaper_cache_commit(binding, 1, node_handle, node);
+	net_shaper_cache_commit(binding, leaves_count, leaves_handles, leaves);
+	return 0;
+
+rollback:
+	net_shaper_cache_rollback(binding);
+	return ret;
+}
+
 static int net_shaper_delete(struct net_shaper_binding *binding,
 			     const struct net_shaper_handle *handle,
 			     struct netlink_ext_ack *extack)
@@ -756,6 +944,162 @@  int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
 	return net_shaper_delete(binding, &handle, info->extack);
 }
 
+/* Update the H/W and on success update the local cache, too */
+static int net_shaper_group(struct net_shaper_binding *binding,
+			    int leaves_count,
+			    const struct net_shaper_handle *leaves_handles,
+			    struct net_shaper_info *leaves,
+			    struct net_shaper_handle *node_handle,
+			    struct net_shaper_info *node,
+			    struct netlink_ext_ack *extack)
+{
+	struct net_shaper_data *data = net_shaper_cache_init(binding, extack);
+	struct net_shaper_handle *old_nodes;
+	int i, ret, old_nodes_count = 0;
+
+	if (!data)
+		return -ENOMEM;
+
+	old_nodes = kcalloc(leaves_count, sizeof(struct net_shaper_handle),
+			    GFP_KERNEL);
+	if (!old_nodes)
+		return -ENOMEM;
+
+	for (i = 0; i < leaves_count; i++)
+		if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
+		    (leaves[i].parent.scope != node_handle->scope ||
+		     leaves[i].parent.id != node_handle->id))
+			old_nodes[old_nodes_count++] = leaves[i].parent;
+
+	mutex_lock(&data->lock);
+	ret = __net_shaper_group(binding, leaves_count, leaves_handles,
+				 leaves, node_handle, node, extack);
+
+	/* Check if we need to delete any NODE left alone by the new leaves
+	 * linkage.
+	 */
+	for (i = 0; i < old_nodes_count; ++i) {
+		node = net_shaper_cache_lookup(binding, &old_nodes[i]);
+		if (!node)
+			continue;
+
+		if (--node->leaves > 0)
+			continue;
+
+		/* Errors here are not fatal: the grouping operation is
+		 * completed, and user-space can still explicitly clean-up
+		 * left-over nodes.
+		 */
+		__net_shaper_delete(binding, &old_nodes[i], node, extack);
+	}
+
+	mutex_unlock(&data->lock);
+
+	kfree(old_nodes);
+	return ret;
+}
+
+static int net_shaper_group_send_reply(struct genl_info *info,
+				       struct net_shaper_handle *handle)
+{
+	struct net_shaper_binding *binding = info->user_ptr[0];
+	struct sk_buff *msg;
+	int ret = -EMSGSIZE;
+	void *hdr;
+
+	/* Prepare the msg reply in advance, to avoid device operation
+	 * rollback.
+	 */
+	msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
+	if (!msg)
+		return ret;
+
+	hdr = genlmsg_iput(msg, info);
+	if (!hdr)
+		goto free_msg;
+
+	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX))
+		goto free_msg;
+
+	if (net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))
+		goto free_msg;
+
+	genlmsg_end(msg, hdr);
+
+	ret = genlmsg_reply(msg, info);
+	if (ret)
+		goto free_msg;
+
+	return ret;
+
+free_msg:
+	nlmsg_free(msg);
+	return ret;
+}
+
+int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_shaper_handle *leaves_handles, node_handle;
+	struct net_shaper_info *leaves, node;
+	struct net_shaper_binding *binding;
+	int i, ret, rem, leaves_count;
+	struct nlattr *attr;
+
+	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES) ||
+	    GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_NODE))
+		return -EINVAL;
+
+	binding = net_shaper_binding_from_ctx(info->user_ptr[0]);
+	leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
+	leaves = kcalloc(leaves_count, sizeof(struct net_shaper_info) +
+			 sizeof(struct net_shaper_handle), GFP_KERNEL);
+	if (!leaves) {
+		GENL_SET_ERR_MSG_FMT(info, "Can't allocate memory for %d leaves shapers",
+				     leaves_count);
+		return -ENOMEM;
+	}
+	leaves_handles = (struct net_shaper_handle *)&leaves[leaves_count];
+
+	ret = net_shaper_parse_node(binding, info->attrs[NET_SHAPER_A_NODE],
+				    info, &node_handle, &node);
+	if (ret)
+		goto free_shapers;
+
+	i = 0;
+	nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
+			       genlmsg_data(info->genlhdr),
+			       genlmsg_len(info->genlhdr), rem) {
+		if (WARN_ON_ONCE(i >= leaves_count))
+			goto free_shapers;
+
+		ret = net_shaper_parse_info_nest(binding, attr, info,
+						 NET_SHAPER_SCOPE_QUEUE,
+						 &leaves_handles[i],
+						 &leaves[i]);
+		if (ret)
+			goto free_shapers;
+		i++;
+	}
+
+	ret = net_shaper_group(binding, leaves_count, leaves_handles, leaves,
+			       &node_handle, &node, info->extack);
+	if (ret < 0)
+		goto free_shapers;
+
+	ret = net_shaper_group_send_reply(info, &node_handle);
+	if (ret) {
+		/* Error on reply is not fatal to avoid rollback a successful
+		 * configuration.
+		 */
+		GENL_SET_ERR_MSG_FMT(info, "Can't send reply %d", ret);
+		ret = 0;
+	}
+
+free_shapers:
+	kfree(leaves);
+	return ret;
+}
+
 static void net_shaper_flush(struct net_shaper_binding *binding)
 {
 	struct net_shaper_data *data = net_shaper_binding_data(binding);