diff mbox series

[net-next,3/5] gve: Add flow steering device option

Message ID 20240507225945.1408516-4-ziweixiao@google.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series gve: Add flow steering support | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 927 this patch: 927
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 11 of 11 maintainers
netdev/build_clang success Errors and warnings before: 942 this patch: 942
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 938 this patch: 938
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Ziwei Xiao May 7, 2024, 10:59 p.m. UTC
From: Jeroen de Borst <jeroendb@google.com>

Add a new device option to signal to the driver that the device supports
flow steering. This device option also carries the maximum number of
flow steering rules that the device can store.

Signed-off-by: Jeroen de Borst <jeroendb@google.com>
Co-developed-by: Ziwei Xiao <ziweixiao@google.com>
Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
---
 drivers/net/ethernet/google/gve/gve.h        |  2 +
 drivers/net/ethernet/google/gve/gve_adminq.c | 42 ++++++++++++++++++--
 drivers/net/ethernet/google/gve/gve_adminq.h | 11 +++++
 3 files changed, 51 insertions(+), 4 deletions(-)

Comments

David Wei May 8, 2024, 5:32 a.m. UTC | #1
On 2024-05-07 15:59, Ziwei Xiao wrote:
> From: Jeroen de Borst <jeroendb@google.com>
> 
> Add a new device option to signal to the driver that the device supports
> flow steering. This device option also carries the maximum number of
> flow steering rules that the device can store.
> 
> Signed-off-by: Jeroen de Borst <jeroendb@google.com>
> Co-developed-by: Ziwei Xiao <ziweixiao@google.com>
> Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
> Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
> Reviewed-by: Willem de Bruijn <willemb@google.com>
> ---
>  drivers/net/ethernet/google/gve/gve.h        |  2 +
>  drivers/net/ethernet/google/gve/gve_adminq.c | 42 ++++++++++++++++++--
>  drivers/net/ethernet/google/gve/gve_adminq.h | 11 +++++
>  3 files changed, 51 insertions(+), 4 deletions(-)

Think something went wrong here. The title is different but patch is
same as 2/5.

> 
> diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
> index ca7fce17f2c0..58213c15e084 100644
> --- a/drivers/net/ethernet/google/gve/gve.h
> +++ b/drivers/net/ethernet/google/gve/gve.h
> @@ -786,6 +786,8 @@ struct gve_priv {
>  
>  	u16 header_buf_size; /* device configured, header-split supported if non-zero */
>  	bool header_split_enabled; /* True if the header split is enabled by the user */
> +
> +	u32 max_flow_rules;
>  };
>  
>  enum gve_service_task_flags_bit {
> diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
> index 514641b3ccc7..85d0d742ad21 100644
> --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> @@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv,
>  			     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
>  			     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
>  			     struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> +			     struct gve_device_option_flow_steering **dev_op_flow_steering,
>  			     struct gve_device_option_modify_ring **dev_op_modify_ring)
>  {
>  	u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
> @@ -189,6 +190,23 @@ void gve_parse_device_option(struct gve_priv *priv,
>  		if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
>  			priv->default_min_ring_size = true;
>  		break;
> +	case GVE_DEV_OPT_ID_FLOW_STEERING:
> +		if (option_length < sizeof(**dev_op_flow_steering) ||
> +		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
> +			dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
> +				 "Flow Steering",
> +				 (int)sizeof(**dev_op_flow_steering),
> +				 GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
> +				 option_length, req_feat_mask);
> +			break;
> +		}
> +
> +		if (option_length > sizeof(**dev_op_flow_steering))
> +			dev_warn(&priv->pdev->dev,
> +				 GVE_DEVICE_OPTION_TOO_BIG_FMT,
> +				 "Flow Steering");
> +		*dev_op_flow_steering = (void *)(option + 1);
> +		break;
>  	default:
>  		/* If we don't recognize the option just continue
>  		 * without doing anything.
> @@ -208,6 +226,7 @@ gve_process_device_options(struct gve_priv *priv,
>  			   struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
>  			   struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
>  			   struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> +			   struct gve_device_option_flow_steering **dev_op_flow_steering,
>  			   struct gve_device_option_modify_ring **dev_op_modify_ring)
>  {
>  	const int num_options = be16_to_cpu(descriptor->num_device_options);
> @@ -230,7 +249,7 @@ gve_process_device_options(struct gve_priv *priv,
>  					dev_op_gqi_rda, dev_op_gqi_qpl,
>  					dev_op_dqo_rda, dev_op_jumbo_frames,
>  					dev_op_dqo_qpl, dev_op_buffer_sizes,
> -					dev_op_modify_ring);
> +					dev_op_flow_steering, dev_op_modify_ring);
>  		dev_opt = next_opt;
>  	}
>  
> @@ -838,6 +857,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
>  					  *dev_op_dqo_qpl,
>  					  const struct gve_device_option_buffer_sizes
>  					  *dev_op_buffer_sizes,
> +					  const struct gve_device_option_flow_steering
> +					  *dev_op_flow_steering,
>  					  const struct gve_device_option_modify_ring
>  					  *dev_op_modify_ring)
>  {
> @@ -890,10 +911,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
>  			priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
>  		}
>  	}
> +
> +	if (dev_op_flow_steering &&
> +	    (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
> +		if (dev_op_flow_steering->max_flow_rules) {
> +			priv->max_flow_rules =
> +				be32_to_cpu(dev_op_flow_steering->max_flow_rules);
> +			dev_info(&priv->pdev->dev,
> +				 "FLOW STEERING device option enabled with max rule limit of %u.\n",
> +				 priv->max_flow_rules);
> +		}
> +	}
>  }
>  
>  int gve_adminq_describe_device(struct gve_priv *priv)
>  {
> +	struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
>  	struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
>  	struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
>  	struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
> @@ -930,6 +963,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
>  					 &dev_op_gqi_qpl, &dev_op_dqo_rda,
>  					 &dev_op_jumbo_frames, &dev_op_dqo_qpl,
>  					 &dev_op_buffer_sizes,
> +					 &dev_op_flow_steering,
>  					 &dev_op_modify_ring);
>  	if (err)
>  		goto free_device_descriptor;
> @@ -969,9 +1003,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
>  	/* set default descriptor counts */
>  	gve_set_default_desc_cnt(priv, descriptor);
>  
> -	/* DQO supports LRO. */
>  	if (!gve_is_gqi(priv))
> -		priv->dev->hw_features |= NETIF_F_LRO;
> +		priv->dev->hw_features |= NETIF_F_LRO | NETIF_F_NTUPLE;
>  
>  	priv->max_registered_pages =
>  				be64_to_cpu(descriptor->max_registered_pages);
> @@ -991,7 +1024,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
>  
>  	gve_enable_supported_features(priv, supported_features_mask,
>  				      dev_op_jumbo_frames, dev_op_dqo_qpl,
> -				      dev_op_buffer_sizes, dev_op_modify_ring);
> +				      dev_op_buffer_sizes, dev_op_flow_steering,
> +				      dev_op_modify_ring);
>  
>  free_device_descriptor:
>  	dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
> diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
> index e0370ace8397..e64a0e72e781 100644
> --- a/drivers/net/ethernet/google/gve/gve_adminq.h
> +++ b/drivers/net/ethernet/google/gve/gve_adminq.h
> @@ -146,6 +146,14 @@ struct gve_device_option_modify_ring {
>  
>  static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
>  
> +struct gve_device_option_flow_steering {
> +	__be32 supported_features_mask;
> +	__be32 reserved;
> +	__be32 max_flow_rules;
> +};
> +
> +static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
> +
>  /* Terminology:
>   *
>   * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
> @@ -163,6 +171,7 @@ enum gve_dev_opt_id {
>  	GVE_DEV_OPT_ID_DQO_QPL			= 0x7,
>  	GVE_DEV_OPT_ID_JUMBO_FRAMES		= 0x8,
>  	GVE_DEV_OPT_ID_BUFFER_SIZES		= 0xa,
> +	GVE_DEV_OPT_ID_FLOW_STEERING		= 0xb,
>  };
>  
>  enum gve_dev_opt_req_feat_mask {
> @@ -174,12 +183,14 @@ enum gve_dev_opt_req_feat_mask {
>  	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL		= 0x0,
>  	GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES		= 0x0,
>  	GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING		= 0x0,
> +	GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING		= 0x0,
>  };
>  
>  enum gve_sup_feature_mask {
>  	GVE_SUP_MODIFY_RING_MASK	= 1 << 0,
>  	GVE_SUP_JUMBO_FRAMES_MASK	= 1 << 2,
>  	GVE_SUP_BUFFER_SIZES_MASK	= 1 << 4,
> +	GVE_SUP_FLOW_STEERING_MASK	= 1 << 5,
>  };
>  
>  #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
David Wei May 8, 2024, 5:34 a.m. UTC | #2
On 2024-05-07 15:59, Ziwei Xiao wrote:
> From: Jeroen de Borst <jeroendb@google.com>
> 
> Add a new device option to signal to the driver that the device supports
> flow steering. This device option also carries the maximum number of
> flow steering rules that the device can store.

Other than superficial style choices, looks good.

> 
> Signed-off-by: Jeroen de Borst <jeroendb@google.com>
> Co-developed-by: Ziwei Xiao <ziweixiao@google.com>
> Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
> Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
> Reviewed-by: Willem de Bruijn <willemb@google.com>
> ---
>  drivers/net/ethernet/google/gve/gve.h        |  2 +
>  drivers/net/ethernet/google/gve/gve_adminq.c | 42 ++++++++++++++++++--
>  drivers/net/ethernet/google/gve/gve_adminq.h | 11 +++++
>  3 files changed, 51 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
> index ca7fce17f2c0..58213c15e084 100644
> --- a/drivers/net/ethernet/google/gve/gve.h
> +++ b/drivers/net/ethernet/google/gve/gve.h
> @@ -786,6 +786,8 @@ struct gve_priv {
>  
>  	u16 header_buf_size; /* device configured, header-split supported if non-zero */
>  	bool header_split_enabled; /* True if the header split is enabled by the user */
> +
> +	u32 max_flow_rules;

nit: this struct is lovingly documented, could we continue by adding a
one liner here maybe about how it's device configured?

>  };
>  
>  enum gve_service_task_flags_bit {
> diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
> index 514641b3ccc7..85d0d742ad21 100644
> --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> @@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv,
>  			     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
>  			     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
>  			     struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> +			     struct gve_device_option_flow_steering **dev_op_flow_steering,

nit: getting unwieldy here, is it time to pack into a struct?
Ziwei Xiao May 10, 2024, 12:17 a.m. UTC | #3
On Tue, May 7, 2024 at 10:34 PM David Wei <dw@davidwei.uk> wrote:
>
> On 2024-05-07 15:59, Ziwei Xiao wrote:
> > From: Jeroen de Borst <jeroendb@google.com>
> >
> > Add a new device option to signal to the driver that the device supports
> > flow steering. This device option also carries the maximum number of
> > flow steering rules that the device can store.
>
> Other than superficial style choices, looks good.
>
> >
> > Signed-off-by: Jeroen de Borst <jeroendb@google.com>
> > Co-developed-by: Ziwei Xiao <ziweixiao@google.com>
> > Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
> > Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
> > Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
> > Reviewed-by: Willem de Bruijn <willemb@google.com>
> > ---
> >  drivers/net/ethernet/google/gve/gve.h        |  2 +
> >  drivers/net/ethernet/google/gve/gve_adminq.c | 42 ++++++++++++++++++--
> >  drivers/net/ethernet/google/gve/gve_adminq.h | 11 +++++
> >  3 files changed, 51 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
> > index ca7fce17f2c0..58213c15e084 100644
> > --- a/drivers/net/ethernet/google/gve/gve.h
> > +++ b/drivers/net/ethernet/google/gve/gve.h
> > @@ -786,6 +786,8 @@ struct gve_priv {
> >
> >       u16 header_buf_size; /* device configured, header-split supported if non-zero */
> >       bool header_split_enabled; /* True if the header split is enabled by the user */
> > +
> > +     u32 max_flow_rules;
>
> nit: this struct is lovingly documented, could we continue by adding a
> one liner here maybe about how it's device configured?
>
Will add.

> >  };
> >
> >  enum gve_service_task_flags_bit {
> > diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
> > index 514641b3ccc7..85d0d742ad21 100644
> > --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> > +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> > @@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv,
> >                            struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
> >                            struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
> >                            struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> > +                          struct gve_device_option_flow_steering **dev_op_flow_steering,
>
> nit: getting unwieldy here, is it time to pack into a struct?
Thank you for pointing this out! We have plans to improve this device
option part, but may not be able to be included in this patch.
Ziwei Xiao May 10, 2024, 12:18 a.m. UTC | #4
On Tue, May 7, 2024 at 10:33 PM David Wei <dw@davidwei.uk> wrote:
>
> On 2024-05-07 15:59, Ziwei Xiao wrote:
> > From: Jeroen de Borst <jeroendb@google.com>
> >
> > Add a new device option to signal to the driver that the device supports
> > flow steering. This device option also carries the maximum number of
> > flow steering rules that the device can store.
> >
> > Signed-off-by: Jeroen de Borst <jeroendb@google.com>
> > Co-developed-by: Ziwei Xiao <ziweixiao@google.com>
> > Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
> > Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
> > Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
> > Reviewed-by: Willem de Bruijn <willemb@google.com>
> > ---
> >  drivers/net/ethernet/google/gve/gve.h        |  2 +
> >  drivers/net/ethernet/google/gve/gve_adminq.c | 42 ++++++++++++++++++--
> >  drivers/net/ethernet/google/gve/gve_adminq.h | 11 +++++
> >  3 files changed, 51 insertions(+), 4 deletions(-)
>
> Think something went wrong here. The title is different but patch is
> same as 2/5.
This is the patch for adding the device option(3/5), while the
previous patch you commented is actually for adding extended
adminq(2/5). I don't see any wrong with these two patches. Maybe it's
replying in the wrong thread?

>
> >
> > diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
> > index ca7fce17f2c0..58213c15e084 100644
> > --- a/drivers/net/ethernet/google/gve/gve.h
> > +++ b/drivers/net/ethernet/google/gve/gve.h
> > @@ -786,6 +786,8 @@ struct gve_priv {
> >
> >       u16 header_buf_size; /* device configured, header-split supported if non-zero */
> >       bool header_split_enabled; /* True if the header split is enabled by the user */
> > +
> > +     u32 max_flow_rules;
> >  };
> >
> >  enum gve_service_task_flags_bit {
> > diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
> > index 514641b3ccc7..85d0d742ad21 100644
> > --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> > +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> > @@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv,
> >                            struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
> >                            struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
> >                            struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> > +                          struct gve_device_option_flow_steering **dev_op_flow_steering,
> >                            struct gve_device_option_modify_ring **dev_op_modify_ring)
> >  {
> >       u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
> > @@ -189,6 +190,23 @@ void gve_parse_device_option(struct gve_priv *priv,
> >               if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
> >                       priv->default_min_ring_size = true;
> >               break;
> > +     case GVE_DEV_OPT_ID_FLOW_STEERING:
> > +             if (option_length < sizeof(**dev_op_flow_steering) ||
> > +                 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
> > +                     dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
> > +                              "Flow Steering",
> > +                              (int)sizeof(**dev_op_flow_steering),
> > +                              GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
> > +                              option_length, req_feat_mask);
> > +                     break;
> > +             }
> > +
> > +             if (option_length > sizeof(**dev_op_flow_steering))
> > +                     dev_warn(&priv->pdev->dev,
> > +                              GVE_DEVICE_OPTION_TOO_BIG_FMT,
> > +                              "Flow Steering");
> > +             *dev_op_flow_steering = (void *)(option + 1);
> > +             break;
> >       default:
> >               /* If we don't recognize the option just continue
> >                * without doing anything.
> > @@ -208,6 +226,7 @@ gve_process_device_options(struct gve_priv *priv,
> >                          struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
> >                          struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
> >                          struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> > +                        struct gve_device_option_flow_steering **dev_op_flow_steering,
> >                          struct gve_device_option_modify_ring **dev_op_modify_ring)
> >  {
> >       const int num_options = be16_to_cpu(descriptor->num_device_options);
> > @@ -230,7 +249,7 @@ gve_process_device_options(struct gve_priv *priv,
> >                                       dev_op_gqi_rda, dev_op_gqi_qpl,
> >                                       dev_op_dqo_rda, dev_op_jumbo_frames,
> >                                       dev_op_dqo_qpl, dev_op_buffer_sizes,
> > -                                     dev_op_modify_ring);
> > +                                     dev_op_flow_steering, dev_op_modify_ring);
> >               dev_opt = next_opt;
> >       }
> >
> > @@ -838,6 +857,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
> >                                         *dev_op_dqo_qpl,
> >                                         const struct gve_device_option_buffer_sizes
> >                                         *dev_op_buffer_sizes,
> > +                                       const struct gve_device_option_flow_steering
> > +                                       *dev_op_flow_steering,
> >                                         const struct gve_device_option_modify_ring
> >                                         *dev_op_modify_ring)
> >  {
> > @@ -890,10 +911,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
> >                       priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
> >               }
> >       }
> > +
> > +     if (dev_op_flow_steering &&
> > +         (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
> > +             if (dev_op_flow_steering->max_flow_rules) {
> > +                     priv->max_flow_rules =
> > +                             be32_to_cpu(dev_op_flow_steering->max_flow_rules);
> > +                     dev_info(&priv->pdev->dev,
> > +                              "FLOW STEERING device option enabled with max rule limit of %u.\n",
> > +                              priv->max_flow_rules);
> > +             }
> > +     }
> >  }
> >
> >  int gve_adminq_describe_device(struct gve_priv *priv)
> >  {
> > +     struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
> >       struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
> >       struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
> >       struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
> > @@ -930,6 +963,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
> >                                        &dev_op_gqi_qpl, &dev_op_dqo_rda,
> >                                        &dev_op_jumbo_frames, &dev_op_dqo_qpl,
> >                                        &dev_op_buffer_sizes,
> > +                                      &dev_op_flow_steering,
> >                                        &dev_op_modify_ring);
> >       if (err)
> >               goto free_device_descriptor;
> > @@ -969,9 +1003,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
> >       /* set default descriptor counts */
> >       gve_set_default_desc_cnt(priv, descriptor);
> >
> > -     /* DQO supports LRO. */
> >       if (!gve_is_gqi(priv))
> > -             priv->dev->hw_features |= NETIF_F_LRO;
> > +             priv->dev->hw_features |= NETIF_F_LRO | NETIF_F_NTUPLE;
> >
> >       priv->max_registered_pages =
> >                               be64_to_cpu(descriptor->max_registered_pages);
> > @@ -991,7 +1024,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
> >
> >       gve_enable_supported_features(priv, supported_features_mask,
> >                                     dev_op_jumbo_frames, dev_op_dqo_qpl,
> > -                                   dev_op_buffer_sizes, dev_op_modify_ring);
> > +                                   dev_op_buffer_sizes, dev_op_flow_steering,
> > +                                   dev_op_modify_ring);
> >
> >  free_device_descriptor:
> >       dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
> > diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
> > index e0370ace8397..e64a0e72e781 100644
> > --- a/drivers/net/ethernet/google/gve/gve_adminq.h
> > +++ b/drivers/net/ethernet/google/gve/gve_adminq.h
> > @@ -146,6 +146,14 @@ struct gve_device_option_modify_ring {
> >
> >  static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
> >
> > +struct gve_device_option_flow_steering {
> > +     __be32 supported_features_mask;
> > +     __be32 reserved;
> > +     __be32 max_flow_rules;
> > +};
> > +
> > +static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
> > +
> >  /* Terminology:
> >   *
> >   * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
> > @@ -163,6 +171,7 @@ enum gve_dev_opt_id {
> >       GVE_DEV_OPT_ID_DQO_QPL                  = 0x7,
> >       GVE_DEV_OPT_ID_JUMBO_FRAMES             = 0x8,
> >       GVE_DEV_OPT_ID_BUFFER_SIZES             = 0xa,
> > +     GVE_DEV_OPT_ID_FLOW_STEERING            = 0xb,
> >  };
> >
> >  enum gve_dev_opt_req_feat_mask {
> > @@ -174,12 +183,14 @@ enum gve_dev_opt_req_feat_mask {
> >       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL               = 0x0,
> >       GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES          = 0x0,
> >       GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING           = 0x0,
> > +     GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING         = 0x0,
> >  };
> >
> >  enum gve_sup_feature_mask {
> >       GVE_SUP_MODIFY_RING_MASK        = 1 << 0,
> >       GVE_SUP_JUMBO_FRAMES_MASK       = 1 << 2,
> >       GVE_SUP_BUFFER_SIZES_MASK       = 1 << 4,
> > +     GVE_SUP_FLOW_STEERING_MASK      = 1 << 5,
> >  };
> >
> >  #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
diff mbox series

Patch

diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index ca7fce17f2c0..58213c15e084 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -786,6 +786,8 @@  struct gve_priv {
 
 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
 	bool header_split_enabled; /* True if the header split is enabled by the user */
+
+	u32 max_flow_rules;
 };
 
 enum gve_service_task_flags_bit {
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 514641b3ccc7..85d0d742ad21 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -44,6 +44,7 @@  void gve_parse_device_option(struct gve_priv *priv,
 			     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
 			     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
 			     struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+			     struct gve_device_option_flow_steering **dev_op_flow_steering,
 			     struct gve_device_option_modify_ring **dev_op_modify_ring)
 {
 	u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -189,6 +190,23 @@  void gve_parse_device_option(struct gve_priv *priv,
 		if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
 			priv->default_min_ring_size = true;
 		break;
+	case GVE_DEV_OPT_ID_FLOW_STEERING:
+		if (option_length < sizeof(**dev_op_flow_steering) ||
+		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
+			dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+				 "Flow Steering",
+				 (int)sizeof(**dev_op_flow_steering),
+				 GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
+				 option_length, req_feat_mask);
+			break;
+		}
+
+		if (option_length > sizeof(**dev_op_flow_steering))
+			dev_warn(&priv->pdev->dev,
+				 GVE_DEVICE_OPTION_TOO_BIG_FMT,
+				 "Flow Steering");
+		*dev_op_flow_steering = (void *)(option + 1);
+		break;
 	default:
 		/* If we don't recognize the option just continue
 		 * without doing anything.
@@ -208,6 +226,7 @@  gve_process_device_options(struct gve_priv *priv,
 			   struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
 			   struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
 			   struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+			   struct gve_device_option_flow_steering **dev_op_flow_steering,
 			   struct gve_device_option_modify_ring **dev_op_modify_ring)
 {
 	const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -230,7 +249,7 @@  gve_process_device_options(struct gve_priv *priv,
 					dev_op_gqi_rda, dev_op_gqi_qpl,
 					dev_op_dqo_rda, dev_op_jumbo_frames,
 					dev_op_dqo_qpl, dev_op_buffer_sizes,
-					dev_op_modify_ring);
+					dev_op_flow_steering, dev_op_modify_ring);
 		dev_opt = next_opt;
 	}
 
@@ -838,6 +857,8 @@  static void gve_enable_supported_features(struct gve_priv *priv,
 					  *dev_op_dqo_qpl,
 					  const struct gve_device_option_buffer_sizes
 					  *dev_op_buffer_sizes,
+					  const struct gve_device_option_flow_steering
+					  *dev_op_flow_steering,
 					  const struct gve_device_option_modify_ring
 					  *dev_op_modify_ring)
 {
@@ -890,10 +911,22 @@  static void gve_enable_supported_features(struct gve_priv *priv,
 			priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
 		}
 	}
+
+	if (dev_op_flow_steering &&
+	    (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
+		if (dev_op_flow_steering->max_flow_rules) {
+			priv->max_flow_rules =
+				be32_to_cpu(dev_op_flow_steering->max_flow_rules);
+			dev_info(&priv->pdev->dev,
+				 "FLOW STEERING device option enabled with max rule limit of %u.\n",
+				 priv->max_flow_rules);
+		}
+	}
 }
 
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
+	struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
 	struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
 	struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
 	struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
@@ -930,6 +963,7 @@  int gve_adminq_describe_device(struct gve_priv *priv)
 					 &dev_op_gqi_qpl, &dev_op_dqo_rda,
 					 &dev_op_jumbo_frames, &dev_op_dqo_qpl,
 					 &dev_op_buffer_sizes,
+					 &dev_op_flow_steering,
 					 &dev_op_modify_ring);
 	if (err)
 		goto free_device_descriptor;
@@ -969,9 +1003,8 @@  int gve_adminq_describe_device(struct gve_priv *priv)
 	/* set default descriptor counts */
 	gve_set_default_desc_cnt(priv, descriptor);
 
-	/* DQO supports LRO. */
 	if (!gve_is_gqi(priv))
-		priv->dev->hw_features |= NETIF_F_LRO;
+		priv->dev->hw_features |= NETIF_F_LRO | NETIF_F_NTUPLE;
 
 	priv->max_registered_pages =
 				be64_to_cpu(descriptor->max_registered_pages);
@@ -991,7 +1024,8 @@  int gve_adminq_describe_device(struct gve_priv *priv)
 
 	gve_enable_supported_features(priv, supported_features_mask,
 				      dev_op_jumbo_frames, dev_op_dqo_qpl,
-				      dev_op_buffer_sizes, dev_op_modify_ring);
+				      dev_op_buffer_sizes, dev_op_flow_steering,
+				      dev_op_modify_ring);
 
 free_device_descriptor:
 	dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index e0370ace8397..e64a0e72e781 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -146,6 +146,14 @@  struct gve_device_option_modify_ring {
 
 static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
 
+struct gve_device_option_flow_steering {
+	__be32 supported_features_mask;
+	__be32 reserved;
+	__be32 max_flow_rules;
+};
+
+static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+
 /* Terminology:
  *
  * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -163,6 +171,7 @@  enum gve_dev_opt_id {
 	GVE_DEV_OPT_ID_DQO_QPL			= 0x7,
 	GVE_DEV_OPT_ID_JUMBO_FRAMES		= 0x8,
 	GVE_DEV_OPT_ID_BUFFER_SIZES		= 0xa,
+	GVE_DEV_OPT_ID_FLOW_STEERING		= 0xb,
 };
 
 enum gve_dev_opt_req_feat_mask {
@@ -174,12 +183,14 @@  enum gve_dev_opt_req_feat_mask {
 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL		= 0x0,
 	GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES		= 0x0,
 	GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING		= 0x0,
+	GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING		= 0x0,
 };
 
 enum gve_sup_feature_mask {
 	GVE_SUP_MODIFY_RING_MASK	= 1 << 0,
 	GVE_SUP_JUMBO_FRAMES_MASK	= 1 << 2,
 	GVE_SUP_BUFFER_SIZES_MASK	= 1 << 4,
+	GVE_SUP_FLOW_STEERING_MASK	= 1 << 5,
 };
 
 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0