diff mbox series

[net-next,v2] macsec: Add support for IFLA_MACSEC_OFFLOAD in the netlink layer

Message ID 20221206085757.5816-1-ehakim@nvidia.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series [net-next,v2] macsec: Add support for IFLA_MACSEC_OFFLOAD in the netlink layer | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 4 this patch: 4
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/checkpatch warning WARNING: line length of 86 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Emeel Hakim Dec. 6, 2022, 8:57 a.m. UTC
From: Emeel Hakim <ehakim@nvidia.com>

This adds support for configuring Macsec offload through the
netlink layer by:
- Considering IFLA_MACSEC_OFFLOAD in macsec_fill_info.
- Handling IFLA_MACSEC_OFFLOAD in macsec_changelink.
- Adding IFLA_MACSEC_OFFLOAD to the netlink policy.
- Adjusting macsec_get_size.

The handling in macsec_changlink is similar to
macsec_upd_offload.
Update macsec_upd_offload to use a common helper function
to avoid duplication.

Example for setting offload for a macsec device
    ip link set macsec0 type macsec offload mac

Reviewed-by: Raed Salem <raeds@nvidia.com>
Signed-off-by: Emeel Hakim <ehakim@nvidia.com>
---
V1 -> V2: Add common helper to avoid duplicating code
 drivers/net/macsec.c | 114 ++++++++++++++++++++++++++++---------------
 1 file changed, 74 insertions(+), 40 deletions(-)

Comments

Jiri Pirko Dec. 6, 2022, 9:16 a.m. UTC | #1
Tue, Dec 06, 2022 at 09:57:57AM CET, ehakim@nvidia.com wrote:
>From: Emeel Hakim <ehakim@nvidia.com>
>
>This adds support for configuring Macsec offload through the

Tell the codebase what to do. Be imperative in your patch descriptions
so it is clear what are the intensions of the patch.


>netlink layer by:
>- Considering IFLA_MACSEC_OFFLOAD in macsec_fill_info.
>- Handling IFLA_MACSEC_OFFLOAD in macsec_changelink.
>- Adding IFLA_MACSEC_OFFLOAD to the netlink policy.
>- Adjusting macsec_get_size.

4 patches then?

I mean really, not a macsec person, but I should be able to follow what
are your intensions looking and description&code right away.


>
>The handling in macsec_changlink is similar to

s/macsec_changlink/macsec_changelink/

>macsec_upd_offload.
>Update macsec_upd_offload to use a common helper function
>to avoid duplication.
>
>Example for setting offload for a macsec device
>    ip link set macsec0 type macsec offload mac
>
>Reviewed-by: Raed Salem <raeds@nvidia.com>
>Signed-off-by: Emeel Hakim <ehakim@nvidia.com>
>---
>V1 -> V2: Add common helper to avoid duplicating code
> drivers/net/macsec.c | 114 ++++++++++++++++++++++++++++---------------
> 1 file changed, 74 insertions(+), 40 deletions(-)
>
>diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
>index d73b9d535b7a..afd6ff47ba56 100644
>--- a/drivers/net/macsec.c
>+++ b/drivers/net/macsec.c
>@@ -2583,16 +2583,45 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
> 	return false;
> }
> 
>+static int macsec_update_offload(struct macsec_dev *macsec, enum macsec_offload offload)
>+{
>+	enum macsec_offload prev_offload;
>+	const struct macsec_ops *ops;
>+	struct macsec_context ctx;
>+	int ret = 0;
>+
>+	prev_offload = macsec->offload;
>+
>+	/* Check if the device already has rules configured: we do not support
>+	 * rules migration.
>+	 */
>+	if (macsec_is_configured(macsec))
>+		return -EBUSY;
>+
>+	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
>+			       macsec, &ctx);
>+	if (!ops)
>+		return -EOPNOTSUPP;
>+
>+	macsec->offload = offload;
>+
>+	ctx.secy = &macsec->secy;
>+	ret = (offload == MACSEC_OFFLOAD_OFF) ? macsec_offload(ops->mdo_del_secy, &ctx) :
>+		      macsec_offload(ops->mdo_add_secy, &ctx);
>+
>+	if (ret)
>+		macsec->offload = prev_offload;
>+
>+	return ret;
>+}
>+
> static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
> {
> 	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
>-	enum macsec_offload offload, prev_offload;
>-	int (*func)(struct macsec_context *ctx);
> 	struct nlattr **attrs = info->attrs;
>-	struct net_device *dev;
>-	const struct macsec_ops *ops;
>-	struct macsec_context ctx;
>+	enum macsec_offload offload;
> 	struct macsec_dev *macsec;
>+	struct net_device *dev;
> 	int ret;
> 
> 	if (!attrs[MACSEC_ATTR_IFINDEX])
>@@ -2629,39 +2658,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
> 
> 	rtnl_lock();
> 
>-	prev_offload = macsec->offload;
>-	macsec->offload = offload;
>-
>-	/* Check if the device already has rules configured: we do not support
>-	 * rules migration.
>-	 */
>-	if (macsec_is_configured(macsec)) {
>-		ret = -EBUSY;
>-		goto rollback;
>-	}
>-
>-	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
>-			       macsec, &ctx);
>-	if (!ops) {
>-		ret = -EOPNOTSUPP;
>-		goto rollback;
>-	}
>-
>-	if (prev_offload == MACSEC_OFFLOAD_OFF)
>-		func = ops->mdo_add_secy;
>-	else
>-		func = ops->mdo_del_secy;
>-
>-	ctx.secy = &macsec->secy;
>-	ret = macsec_offload(func, &ctx);
>-	if (ret)
>-		goto rollback;
>-
>-	rtnl_unlock();
>-	return 0;
>-
>-rollback:
>-	macsec->offload = prev_offload;
>+	ret = macsec_update_offload(macsec, offload);
> 
> 	rtnl_unlock();
> 	return ret;
>@@ -3698,6 +3695,7 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
> 	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
> 	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
> 	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
>+	[IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
> };
> 
> static void macsec_free_netdev(struct net_device *dev)
>@@ -3803,6 +3801,29 @@ static int macsec_changelink_common(struct net_device *dev,
> 	return 0;
> }
> 
>+static int macsec_changelink_upd_offload(struct net_device *dev, struct nlattr *data[])
>+{
>+	enum macsec_offload offload;
>+	struct macsec_dev *macsec;
>+
>+	macsec = macsec_priv(dev);
>+	offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
>+
>+	if (macsec->offload == offload)
>+		return 0;
>+
>+	/* Check if the offloading mode is supported by the underlying layers */
>+	if (offload != MACSEC_OFFLOAD_OFF &&
>+	    !macsec_check_offload(offload, macsec))
>+		return -EOPNOTSUPP;
>+
>+	/* Check if the net device is busy. */
>+	if (netif_running(dev))
>+		return -EBUSY;
>+
>+	return macsec_update_offload(macsec, offload);
>+}
>+
> static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
> 			     struct nlattr *data[],
> 			     struct netlink_ext_ack *extack)
>@@ -3831,6 +3852,12 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
> 	if (ret)
> 		goto cleanup;
> 
>+	if (data[IFLA_MACSEC_OFFLOAD]) {
>+		ret = macsec_changelink_upd_offload(dev, data);
>+		if (ret)
>+			goto cleanup;
>+	}
>+
> 	/* If h/w offloading is available, propagate to the device */
> 	if (macsec_is_offloaded(macsec)) {
> 		const struct macsec_ops *ops;
>@@ -4231,16 +4258,22 @@ static size_t macsec_get_size(const struct net_device *dev)
> 		nla_total_size(1) + /* IFLA_MACSEC_SCB */
> 		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
> 		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
>+		nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
> 		0;
> }
> 
> static int macsec_fill_info(struct sk_buff *skb,
> 			    const struct net_device *dev)
> {
>-	struct macsec_secy *secy = &macsec_priv(dev)->secy;
>-	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
>+	struct macsec_tx_sc *tx_sc;
>+	struct macsec_dev *macsec;
>+	struct macsec_secy *secy;
> 	u64 csid;
> 
>+	macsec = macsec_priv(dev);
>+	secy = &macsec->secy;
>+	tx_sc = &secy->tx_sc;
>+
> 	switch (secy->key_len) {
> 	case MACSEC_GCM_AES_128_SAK_LEN:
> 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
>@@ -4265,6 +4298,7 @@ static int macsec_fill_info(struct sk_buff *skb,
> 	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
> 	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
> 	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
>+	    nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
> 	    0)
> 		goto nla_put_failure;
> 
>-- 
>2.21.3
>
Emeel Hakim Dec. 6, 2022, 12:31 p.m. UTC | #2
> -----Original Message-----
> From: Jiri Pirko <jiri@resnulli.us>
> Sent: Tuesday, 6 December 2022 11:16
> To: Emeel Hakim <ehakim@nvidia.com>
> Cc: linux-kernel@vger.kernel.org; Raed Salem <raeds@nvidia.com>;
> davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; netdev@vger.kernel.org; sd@queasysnail.net
> Subject: Re: [PATCH net-next v2] macsec: Add support for IFLA_MACSEC_OFFLOAD
> in the netlink layer
> 
> External email: Use caution opening links or attachments
> 
> 
> Tue, Dec 06, 2022 at 09:57:57AM CET, ehakim@nvidia.com wrote:
> >From: Emeel Hakim <ehakim@nvidia.com>
> >
> >This adds support for configuring Macsec offload through the
> 
> Tell the codebase what to do. Be imperative in your patch descriptions so it is clear
> what are the intensions of the patch.

Ack

> 
> 
> >netlink layer by:
> >- Considering IFLA_MACSEC_OFFLOAD in macsec_fill_info.
> >- Handling IFLA_MACSEC_OFFLOAD in macsec_changelink.
> >- Adding IFLA_MACSEC_OFFLOAD to the netlink policy.
> >- Adjusting macsec_get_size.
> 
> 4 patches then?

Ack, I will change the commit message to be imperative and will replace the list with a good description.
I still believe it should be a one patch since splitting this could break a bisect process.

> I mean really, not a macsec person, but I should be able to follow what are your
> intensions looking and description&code right away.
> 
> >
> >The handling in macsec_changlink is similar to
> 
> s/macsec_changlink/macsec_changelink/

Ack

> >macsec_upd_offload.
> >Update macsec_upd_offload to use a common helper function to avoid
> >duplication.
> >
> >Example for setting offload for a macsec device
> >    ip link set macsec0 type macsec offload mac
> >
> >Reviewed-by: Raed Salem <raeds@nvidia.com>
> >Signed-off-by: Emeel Hakim <ehakim@nvidia.com>
> >---
> >V1 -> V2: Add common helper to avoid duplicating code
> >drivers/net/macsec.c | 114 ++++++++++++++++++++++++++++---------------
> > 1 file changed, 74 insertions(+), 40 deletions(-)
> >
> >diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index
> >d73b9d535b7a..afd6ff47ba56 100644
> >--- a/drivers/net/macsec.c
> >+++ b/drivers/net/macsec.c
> >@@ -2583,16 +2583,45 @@ static bool macsec_is_configured(struct macsec_dev
> *macsec)
> >       return false;
> > }
> >
> >+static int macsec_update_offload(struct macsec_dev *macsec, enum
> >+macsec_offload offload) {
> >+      enum macsec_offload prev_offload;
> >+      const struct macsec_ops *ops;
> >+      struct macsec_context ctx;
> >+      int ret = 0;
> >+
> >+      prev_offload = macsec->offload;
> >+
> >+      /* Check if the device already has rules configured: we do not support
> >+       * rules migration.
> >+       */
> >+      if (macsec_is_configured(macsec))
> >+              return -EBUSY;
> >+
> >+      ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload :
> offload,
> >+                             macsec, &ctx);
> >+      if (!ops)
> >+              return -EOPNOTSUPP;
> >+
> >+      macsec->offload = offload;
> >+
> >+      ctx.secy = &macsec->secy;
> >+      ret = (offload == MACSEC_OFFLOAD_OFF) ? macsec_offload(ops-
> >mdo_del_secy, &ctx) :
> >+                    macsec_offload(ops->mdo_add_secy, &ctx);
> >+
> >+      if (ret)
> >+              macsec->offload = prev_offload;
> >+
> >+      return ret;
> >+}
> >+
> > static int macsec_upd_offload(struct sk_buff *skb, struct genl_info
> >*info)  {
> >       struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
> >-      enum macsec_offload offload, prev_offload;
> >-      int (*func)(struct macsec_context *ctx);
> >       struct nlattr **attrs = info->attrs;
> >-      struct net_device *dev;
> >-      const struct macsec_ops *ops;
> >-      struct macsec_context ctx;
> >+      enum macsec_offload offload;
> >       struct macsec_dev *macsec;
> >+      struct net_device *dev;
> >       int ret;
> >
> >       if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -2629,39 +2658,7 @@ static
> >int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
> >
> >       rtnl_lock();
> >
> >-      prev_offload = macsec->offload;
> >-      macsec->offload = offload;
> >-
> >-      /* Check if the device already has rules configured: we do not support
> >-       * rules migration.
> >-       */
> >-      if (macsec_is_configured(macsec)) {
> >-              ret = -EBUSY;
> >-              goto rollback;
> >-      }
> >-
> >-      ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload :
> offload,
> >-                             macsec, &ctx);
> >-      if (!ops) {
> >-              ret = -EOPNOTSUPP;
> >-              goto rollback;
> >-      }
> >-
> >-      if (prev_offload == MACSEC_OFFLOAD_OFF)
> >-              func = ops->mdo_add_secy;
> >-      else
> >-              func = ops->mdo_del_secy;
> >-
> >-      ctx.secy = &macsec->secy;
> >-      ret = macsec_offload(func, &ctx);
> >-      if (ret)
> >-              goto rollback;
> >-
> >-      rtnl_unlock();
> >-      return 0;
> >-
> >-rollback:
> >-      macsec->offload = prev_offload;
> >+      ret = macsec_update_offload(macsec, offload);
> >
> >       rtnl_unlock();
> >       return ret;
> >@@ -3698,6 +3695,7 @@ static const struct nla_policy
> macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
> >       [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
> >       [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
> >       [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
> >+      [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
> > };
> >
> > static void macsec_free_netdev(struct net_device *dev) @@ -3803,6
> >+3801,29 @@ static int macsec_changelink_common(struct net_device *dev,
> >       return 0;
> > }
> >
> >+static int macsec_changelink_upd_offload(struct net_device *dev,
> >+struct nlattr *data[]) {
> >+      enum macsec_offload offload;
> >+      struct macsec_dev *macsec;
> >+
> >+      macsec = macsec_priv(dev);
> >+      offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
> >+
> >+      if (macsec->offload == offload)
> >+              return 0;
> >+
> >+      /* Check if the offloading mode is supported by the underlying layers */
> >+      if (offload != MACSEC_OFFLOAD_OFF &&
> >+          !macsec_check_offload(offload, macsec))
> >+              return -EOPNOTSUPP;
> >+
> >+      /* Check if the net device is busy. */
> >+      if (netif_running(dev))
> >+              return -EBUSY;
> >+
> >+      return macsec_update_offload(macsec, offload); }
> >+
> > static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
> >                            struct nlattr *data[],
> >                            struct netlink_ext_ack *extack) @@ -3831,6
> >+3852,12 @@ static int macsec_changelink(struct net_device *dev, struct nlattr
> *tb[],
> >       if (ret)
> >               goto cleanup;
> >
> >+      if (data[IFLA_MACSEC_OFFLOAD]) {
> >+              ret = macsec_changelink_upd_offload(dev, data);
> >+              if (ret)
> >+                      goto cleanup;
> >+      }
> >+
> >       /* If h/w offloading is available, propagate to the device */
> >       if (macsec_is_offloaded(macsec)) {
> >               const struct macsec_ops *ops; @@ -4231,16 +4258,22 @@
> >static size_t macsec_get_size(const struct net_device *dev)
> >               nla_total_size(1) + /* IFLA_MACSEC_SCB */
> >               nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
> >               nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
> >+              nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
> >               0;
> > }
> >
> > static int macsec_fill_info(struct sk_buff *skb,
> >                           const struct net_device *dev)  {
> >-      struct macsec_secy *secy = &macsec_priv(dev)->secy;
> >-      struct macsec_tx_sc *tx_sc = &secy->tx_sc;
> >+      struct macsec_tx_sc *tx_sc;
> >+      struct macsec_dev *macsec;
> >+      struct macsec_secy *secy;
> >       u64 csid;
> >
> >+      macsec = macsec_priv(dev);
> >+      secy = &macsec->secy;
> >+      tx_sc = &secy->tx_sc;
> >+
> >       switch (secy->key_len) {
> >       case MACSEC_GCM_AES_128_SAK_LEN:
> >               csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 :
> >MACSEC_DEFAULT_CIPHER_ID; @@ -4265,6 +4298,7 @@ static int
> macsec_fill_info(struct sk_buff *skb,
> >           nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
> >           nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
> >           nla_put_u8(skb, IFLA_MACSEC_VALIDATION,
> >secy->validate_frames) ||
> >+          nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
> >           0)
> >               goto nla_put_failure;
> >
> >--
> >2.21.3
> >
Jiri Pirko Dec. 6, 2022, 1:35 p.m. UTC | #3
Tue, Dec 06, 2022 at 01:31:54PM CET, ehakim@nvidia.com wrote:
>
>
>> -----Original Message-----
>> From: Jiri Pirko <jiri@resnulli.us>
>> Sent: Tuesday, 6 December 2022 11:16
>> To: Emeel Hakim <ehakim@nvidia.com>
>> Cc: linux-kernel@vger.kernel.org; Raed Salem <raeds@nvidia.com>;
>> davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
>> pabeni@redhat.com; netdev@vger.kernel.org; sd@queasysnail.net
>> Subject: Re: [PATCH net-next v2] macsec: Add support for IFLA_MACSEC_OFFLOAD
>> in the netlink layer
>> 
>> External email: Use caution opening links or attachments
>> 
>> 
>> Tue, Dec 06, 2022 at 09:57:57AM CET, ehakim@nvidia.com wrote:
>> >From: Emeel Hakim <ehakim@nvidia.com>
>> >
>> >This adds support for configuring Macsec offload through the
>> 
>> Tell the codebase what to do. Be imperative in your patch descriptions so it is clear
>> what are the intensions of the patch.
>
>Ack
>
>> 
>> 
>> >netlink layer by:
>> >- Considering IFLA_MACSEC_OFFLOAD in macsec_fill_info.
>> >- Handling IFLA_MACSEC_OFFLOAD in macsec_changelink.
>> >- Adding IFLA_MACSEC_OFFLOAD to the netlink policy.
>> >- Adjusting macsec_get_size.
>> 
>> 4 patches then?
>
>Ack, I will change the commit message to be imperative and will replace the list with a good description.
>I still believe it should be a one patch since splitting this could break a bisect process.

Well, when you split, you have to make sure you don't break bisection,
always. Please try to figure that out.


>
>> I mean really, not a macsec person, but I should be able to follow what are your
>> intensions looking and description&code right away.
>> 
>> >
>> >The handling in macsec_changlink is similar to
>> 
>> s/macsec_changlink/macsec_changelink/
>
>Ack
>
>> >macsec_upd_offload.
>> >Update macsec_upd_offload to use a common helper function to avoid
>> >duplication.
>> >
>> >Example for setting offload for a macsec device
>> >    ip link set macsec0 type macsec offload mac
>> >
>> >Reviewed-by: Raed Salem <raeds@nvidia.com>
>> >Signed-off-by: Emeel Hakim <ehakim@nvidia.com>
>> >---
>> >V1 -> V2: Add common helper to avoid duplicating code
>> >drivers/net/macsec.c | 114 ++++++++++++++++++++++++++++---------------
>> > 1 file changed, 74 insertions(+), 40 deletions(-)
>> >
>> >diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index
>> >d73b9d535b7a..afd6ff47ba56 100644
>> >--- a/drivers/net/macsec.c
>> >+++ b/drivers/net/macsec.c
>> >@@ -2583,16 +2583,45 @@ static bool macsec_is_configured(struct macsec_dev
>> *macsec)
>> >       return false;
>> > }
>> >
>> >+static int macsec_update_offload(struct macsec_dev *macsec, enum
>> >+macsec_offload offload) {
>> >+      enum macsec_offload prev_offload;
>> >+      const struct macsec_ops *ops;
>> >+      struct macsec_context ctx;
>> >+      int ret = 0;
>> >+
>> >+      prev_offload = macsec->offload;
>> >+
>> >+      /* Check if the device already has rules configured: we do not support
>> >+       * rules migration.
>> >+       */
>> >+      if (macsec_is_configured(macsec))
>> >+              return -EBUSY;
>> >+
>> >+      ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload :
>> offload,
>> >+                             macsec, &ctx);
>> >+      if (!ops)
>> >+              return -EOPNOTSUPP;
>> >+
>> >+      macsec->offload = offload;
>> >+
>> >+      ctx.secy = &macsec->secy;
>> >+      ret = (offload == MACSEC_OFFLOAD_OFF) ? macsec_offload(ops-
>> >mdo_del_secy, &ctx) :
>> >+                    macsec_offload(ops->mdo_add_secy, &ctx);
>> >+
>> >+      if (ret)
>> >+              macsec->offload = prev_offload;
>> >+
>> >+      return ret;
>> >+}
>> >+
>> > static int macsec_upd_offload(struct sk_buff *skb, struct genl_info
>> >*info)  {
>> >       struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
>> >-      enum macsec_offload offload, prev_offload;
>> >-      int (*func)(struct macsec_context *ctx);
>> >       struct nlattr **attrs = info->attrs;
>> >-      struct net_device *dev;
>> >-      const struct macsec_ops *ops;
>> >-      struct macsec_context ctx;
>> >+      enum macsec_offload offload;
>> >       struct macsec_dev *macsec;
>> >+      struct net_device *dev;
>> >       int ret;
>> >
>> >       if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -2629,39 +2658,7 @@ static
>> >int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
>> >
>> >       rtnl_lock();
>> >
>> >-      prev_offload = macsec->offload;
>> >-      macsec->offload = offload;
>> >-
>> >-      /* Check if the device already has rules configured: we do not support
>> >-       * rules migration.
>> >-       */
>> >-      if (macsec_is_configured(macsec)) {
>> >-              ret = -EBUSY;
>> >-              goto rollback;
>> >-      }
>> >-
>> >-      ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload :
>> offload,
>> >-                             macsec, &ctx);
>> >-      if (!ops) {
>> >-              ret = -EOPNOTSUPP;
>> >-              goto rollback;
>> >-      }
>> >-
>> >-      if (prev_offload == MACSEC_OFFLOAD_OFF)
>> >-              func = ops->mdo_add_secy;
>> >-      else
>> >-              func = ops->mdo_del_secy;
>> >-
>> >-      ctx.secy = &macsec->secy;
>> >-      ret = macsec_offload(func, &ctx);
>> >-      if (ret)
>> >-              goto rollback;
>> >-
>> >-      rtnl_unlock();
>> >-      return 0;
>> >-
>> >-rollback:
>> >-      macsec->offload = prev_offload;
>> >+      ret = macsec_update_offload(macsec, offload);
>> >
>> >       rtnl_unlock();
>> >       return ret;
>> >@@ -3698,6 +3695,7 @@ static const struct nla_policy
>> macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
>> >       [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
>> >       [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
>> >       [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
>> >+      [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
>> > };
>> >
>> > static void macsec_free_netdev(struct net_device *dev) @@ -3803,6
>> >+3801,29 @@ static int macsec_changelink_common(struct net_device *dev,
>> >       return 0;
>> > }
>> >
>> >+static int macsec_changelink_upd_offload(struct net_device *dev,
>> >+struct nlattr *data[]) {
>> >+      enum macsec_offload offload;
>> >+      struct macsec_dev *macsec;
>> >+
>> >+      macsec = macsec_priv(dev);
>> >+      offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
>> >+
>> >+      if (macsec->offload == offload)
>> >+              return 0;
>> >+
>> >+      /* Check if the offloading mode is supported by the underlying layers */
>> >+      if (offload != MACSEC_OFFLOAD_OFF &&
>> >+          !macsec_check_offload(offload, macsec))
>> >+              return -EOPNOTSUPP;
>> >+
>> >+      /* Check if the net device is busy. */
>> >+      if (netif_running(dev))
>> >+              return -EBUSY;
>> >+
>> >+      return macsec_update_offload(macsec, offload); }
>> >+
>> > static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
>> >                            struct nlattr *data[],
>> >                            struct netlink_ext_ack *extack) @@ -3831,6
>> >+3852,12 @@ static int macsec_changelink(struct net_device *dev, struct nlattr
>> *tb[],
>> >       if (ret)
>> >               goto cleanup;
>> >
>> >+      if (data[IFLA_MACSEC_OFFLOAD]) {
>> >+              ret = macsec_changelink_upd_offload(dev, data);
>> >+              if (ret)
>> >+                      goto cleanup;
>> >+      }
>> >+
>> >       /* If h/w offloading is available, propagate to the device */
>> >       if (macsec_is_offloaded(macsec)) {
>> >               const struct macsec_ops *ops; @@ -4231,16 +4258,22 @@
>> >static size_t macsec_get_size(const struct net_device *dev)
>> >               nla_total_size(1) + /* IFLA_MACSEC_SCB */
>> >               nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
>> >               nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
>> >+              nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
>> >               0;
>> > }
>> >
>> > static int macsec_fill_info(struct sk_buff *skb,
>> >                           const struct net_device *dev)  {
>> >-      struct macsec_secy *secy = &macsec_priv(dev)->secy;
>> >-      struct macsec_tx_sc *tx_sc = &secy->tx_sc;
>> >+      struct macsec_tx_sc *tx_sc;
>> >+      struct macsec_dev *macsec;
>> >+      struct macsec_secy *secy;
>> >       u64 csid;
>> >
>> >+      macsec = macsec_priv(dev);
>> >+      secy = &macsec->secy;
>> >+      tx_sc = &secy->tx_sc;
>> >+
>> >       switch (secy->key_len) {
>> >       case MACSEC_GCM_AES_128_SAK_LEN:
>> >               csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 :
>> >MACSEC_DEFAULT_CIPHER_ID; @@ -4265,6 +4298,7 @@ static int
>> macsec_fill_info(struct sk_buff *skb,
>> >           nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
>> >           nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
>> >           nla_put_u8(skb, IFLA_MACSEC_VALIDATION,
>> >secy->validate_frames) ||
>> >+          nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
>> >           0)
>> >               goto nla_put_failure;
>> >
>> >--
>> >2.21.3
>> >
Sabrina Dubroca Dec. 6, 2022, 4:10 p.m. UTC | #4
2022-12-06, 14:35:23 +0100, Jiri Pirko wrote:
> Tue, Dec 06, 2022 at 01:31:54PM CET, ehakim@nvidia.com wrote:
> >> Tue, Dec 06, 2022 at 09:57:57AM CET, ehakim@nvidia.com wrote:
> >> >From: Emeel Hakim <ehakim@nvidia.com>
> >> >
> >> >This adds support for configuring Macsec offload through the
> >> 
> >> Tell the codebase what to do. Be imperative in your patch descriptions so it is clear
> >> what are the intensions of the patch.
> >
> >Ack
> >
> >> 
> >> 
> >> >netlink layer by:
> >> >- Considering IFLA_MACSEC_OFFLOAD in macsec_fill_info.
> >> >- Handling IFLA_MACSEC_OFFLOAD in macsec_changelink.
> >> >- Adding IFLA_MACSEC_OFFLOAD to the netlink policy.
> >> >- Adjusting macsec_get_size.
> >> 
> >> 4 patches then?
> >
> >Ack, I will change the commit message to be imperative and will replace the list with a good description.
> >I still believe it should be a one patch since splitting this could break a bisect process.
> 
> Well, when you split, you have to make sure you don't break bisection,
> always. Please try to figure that out.

I think this can be split pretty nicely into 3 patches:
 - add IFLA_MACSEC_OFFLOAD to macsec_rtnl_policy (probably for net
   with a Fixes tag on the commit that introduced IFLA_MACSEC_OFFLOAD)
 - add offload to macsec_fill_info/macsec_get_size
 - add IFLA_MACSEC_OFFLOAD support to changelink

The subject of the last patch should also make it clear that it's only
adding IFLA_MACSEC_OFFLOAD to changelink. As it's written, someone
could assume there's no support at all via rtnl ops and wonder why
this patch isn't doing anything to newlink, and whether/why this
IFLA_MACSEC_OFFLOAD already exists.
Emeel Hakim Dec. 6, 2022, 9:33 p.m. UTC | #5
> -----Original Message-----
> From: Sabrina Dubroca <sd@queasysnail.net>
> Sent: Tuesday, 6 December 2022 18:11
> To: Jiri Pirko <jiri@resnulli.us>
> Cc: Emeel Hakim <ehakim@nvidia.com>; linux-kernel@vger.kernel.org; Raed
> Salem <raeds@nvidia.com>; davem@davemloft.net; edumazet@google.com;
> kuba@kernel.org; pabeni@redhat.com; netdev@vger.kernel.org
> Subject: Re: [PATCH net-next v2] macsec: Add support for IFLA_MACSEC_OFFLOAD
> in the netlink layer
> 
> External email: Use caution opening links or attachments
> 
> 
> 2022-12-06, 14:35:23 +0100, Jiri Pirko wrote:
> > Tue, Dec 06, 2022 at 01:31:54PM CET, ehakim@nvidia.com wrote:
> > >> Tue, Dec 06, 2022 at 09:57:57AM CET, ehakim@nvidia.com wrote:
> > >> >From: Emeel Hakim <ehakim@nvidia.com>
> > >> >
> > >> >This adds support for configuring Macsec offload through the
> > >>
> > >> Tell the codebase what to do. Be imperative in your patch
> > >> descriptions so it is clear what are the intensions of the patch.
> > >
> > >Ack
> > >
> > >>
> > >>
> > >> >netlink layer by:
> > >> >- Considering IFLA_MACSEC_OFFLOAD in macsec_fill_info.
> > >> >- Handling IFLA_MACSEC_OFFLOAD in macsec_changelink.
> > >> >- Adding IFLA_MACSEC_OFFLOAD to the netlink policy.
> > >> >- Adjusting macsec_get_size.
> > >>
> > >> 4 patches then?
> > >
> > >Ack, I will change the commit message to be imperative and will replace the list
> with a good description.
> > >I still believe it should be a one patch since splitting this could break a bisect
> process.
> >
> > Well, when you split, you have to make sure you don't break bisection,
> > always. Please try to figure that out.
> 
> I think this can be split pretty nicely into 3 patches:
>  - add IFLA_MACSEC_OFFLOAD to macsec_rtnl_policy (probably for net
>    with a Fixes tag on the commit that introduced IFLA_MACSEC_OFFLOAD)
>  - add offload to macsec_fill_info/macsec_get_size
>  - add IFLA_MACSEC_OFFLOAD support to changelink
> 
> The subject of the last patch should also make it clear that it's only adding
> IFLA_MACSEC_OFFLOAD to changelink. As it's written, someone could assume
> there's no support at all via rtnl ops and wonder why this patch isn't doing anything
> to newlink, and whether/why this IFLA_MACSEC_OFFLOAD already exists.

Ack , I will split the patch an send the new patches.

> --
> Sabrina
diff mbox series

Patch

diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d73b9d535b7a..afd6ff47ba56 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2583,16 +2583,45 @@  static bool macsec_is_configured(struct macsec_dev *macsec)
 	return false;
 }
 
+static int macsec_update_offload(struct macsec_dev *macsec, enum macsec_offload offload)
+{
+	enum macsec_offload prev_offload;
+	const struct macsec_ops *ops;
+	struct macsec_context ctx;
+	int ret = 0;
+
+	prev_offload = macsec->offload;
+
+	/* Check if the device already has rules configured: we do not support
+	 * rules migration.
+	 */
+	if (macsec_is_configured(macsec))
+		return -EBUSY;
+
+	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+			       macsec, &ctx);
+	if (!ops)
+		return -EOPNOTSUPP;
+
+	macsec->offload = offload;
+
+	ctx.secy = &macsec->secy;
+	ret = (offload == MACSEC_OFFLOAD_OFF) ? macsec_offload(ops->mdo_del_secy, &ctx) :
+		      macsec_offload(ops->mdo_add_secy, &ctx);
+
+	if (ret)
+		macsec->offload = prev_offload;
+
+	return ret;
+}
+
 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
 {
 	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
-	enum macsec_offload offload, prev_offload;
-	int (*func)(struct macsec_context *ctx);
 	struct nlattr **attrs = info->attrs;
-	struct net_device *dev;
-	const struct macsec_ops *ops;
-	struct macsec_context ctx;
+	enum macsec_offload offload;
 	struct macsec_dev *macsec;
+	struct net_device *dev;
 	int ret;
 
 	if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -2629,39 +2658,7 @@  static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
 
 	rtnl_lock();
 
-	prev_offload = macsec->offload;
-	macsec->offload = offload;
-
-	/* Check if the device already has rules configured: we do not support
-	 * rules migration.
-	 */
-	if (macsec_is_configured(macsec)) {
-		ret = -EBUSY;
-		goto rollback;
-	}
-
-	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
-			       macsec, &ctx);
-	if (!ops) {
-		ret = -EOPNOTSUPP;
-		goto rollback;
-	}
-
-	if (prev_offload == MACSEC_OFFLOAD_OFF)
-		func = ops->mdo_add_secy;
-	else
-		func = ops->mdo_del_secy;
-
-	ctx.secy = &macsec->secy;
-	ret = macsec_offload(func, &ctx);
-	if (ret)
-		goto rollback;
-
-	rtnl_unlock();
-	return 0;
-
-rollback:
-	macsec->offload = prev_offload;
+	ret = macsec_update_offload(macsec, offload);
 
 	rtnl_unlock();
 	return ret;
@@ -3698,6 +3695,7 @@  static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
 	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
 	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
 	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
+	[IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
 };
 
 static void macsec_free_netdev(struct net_device *dev)
@@ -3803,6 +3801,29 @@  static int macsec_changelink_common(struct net_device *dev,
 	return 0;
 }
 
+static int macsec_changelink_upd_offload(struct net_device *dev, struct nlattr *data[])
+{
+	enum macsec_offload offload;
+	struct macsec_dev *macsec;
+
+	macsec = macsec_priv(dev);
+	offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
+
+	if (macsec->offload == offload)
+		return 0;
+
+	/* Check if the offloading mode is supported by the underlying layers */
+	if (offload != MACSEC_OFFLOAD_OFF &&
+	    !macsec_check_offload(offload, macsec))
+		return -EOPNOTSUPP;
+
+	/* Check if the net device is busy. */
+	if (netif_running(dev))
+		return -EBUSY;
+
+	return macsec_update_offload(macsec, offload);
+}
+
 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
 			     struct nlattr *data[],
 			     struct netlink_ext_ack *extack)
@@ -3831,6 +3852,12 @@  static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
 	if (ret)
 		goto cleanup;
 
+	if (data[IFLA_MACSEC_OFFLOAD]) {
+		ret = macsec_changelink_upd_offload(dev, data);
+		if (ret)
+			goto cleanup;
+	}
+
 	/* If h/w offloading is available, propagate to the device */
 	if (macsec_is_offloaded(macsec)) {
 		const struct macsec_ops *ops;
@@ -4231,16 +4258,22 @@  static size_t macsec_get_size(const struct net_device *dev)
 		nla_total_size(1) + /* IFLA_MACSEC_SCB */
 		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
 		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
+		nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
 		0;
 }
 
 static int macsec_fill_info(struct sk_buff *skb,
 			    const struct net_device *dev)
 {
-	struct macsec_secy *secy = &macsec_priv(dev)->secy;
-	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+	struct macsec_tx_sc *tx_sc;
+	struct macsec_dev *macsec;
+	struct macsec_secy *secy;
 	u64 csid;
 
+	macsec = macsec_priv(dev);
+	secy = &macsec->secy;
+	tx_sc = &secy->tx_sc;
+
 	switch (secy->key_len) {
 	case MACSEC_GCM_AES_128_SAK_LEN:
 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
@@ -4265,6 +4298,7 @@  static int macsec_fill_info(struct sk_buff *skb,
 	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
 	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
 	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
+	    nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
 	    0)
 		goto nla_put_failure;