diff mbox series

[iwl-next,v1,1/4] ixgbe: add MDD support

Message ID 20250207104343.2791001-2-michal.swiatkowski@linux.intel.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series ixgbe: support MDD events | expand

Checks

Context Check Description
netdev/series_format warning Target tree name not specified in the subject
netdev/tree_selection success Guessed tree name to be net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 4 maintainers not CCed: kuba@kernel.org andrew+netdev@lunn.ch pabeni@redhat.com edumazet@google.com
netdev/build_clang success Errors and warnings before: 24 this patch: 24
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 190 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 47 this patch: 47
netdev/source_inline success Was 0 now: 0

Commit Message

Michal Swiatkowski Feb. 7, 2025, 10:43 a.m. UTC
From: Paul Greenwalt <paul.greenwalt@intel.com>

Add malicious driver detection. Support enabling MDD, disabling MDD,
handling a MDD event, and restoring a MDD VF.

Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com>
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
---
 drivers/net/ethernet/intel/ixgbe/ixgbe_type.h |  28 +++++
 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h |   5 +
 drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c |   4 +
 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 119 ++++++++++++++++++
 4 files changed, 156 insertions(+)

Comments

Simon Horman Feb. 7, 2025, 3:07 p.m. UTC | #1
On Fri, Feb 07, 2025 at 11:43:40AM +0100, Michal Swiatkowski wrote:
> From: Paul Greenwalt <paul.greenwalt@intel.com>
> 
> Add malicious driver detection. Support enabling MDD, disabling MDD,
> handling a MDD event, and restoring a MDD VF.
> 
> Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
> Reviewed-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
> Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com>
> Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
> Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>

...

> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c

...

> +/**
> + * ixgbe_handle_mdd_x550 - handle malicious driver detection event
> + * @hw: pointer to hardware structure
> + * @vf_bitmap: output vf bitmap of malicious vfs
> + */
> +void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
> +{
> +	u32 i, j, reg, q, div, vf, wqbr;
> +
> +	/* figure out pool size for mapping to vf's */
> +	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
> +	switch (reg & IXGBE_MRQC_MRQE_MASK) {
> +	case IXGBE_MRQC_VMDQRT8TCEN:
> +		div = IXGBE_16VFS_QUEUES;
> +		break;
> +	case IXGBE_MRQC_VMDQRSS32EN:
> +	case IXGBE_MRQC_VMDQRT4TCEN:
> +		div = IXGBE_32VFS_QUEUES;
> +		break;
> +	default:
> +		div = IXGBE_64VFS_QUEUES;
> +		break;
> +	}
> +
> +	/* Read WQBR_TX and WQBR_RX and check for malicious queues */
> +	for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
> +		wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
> +		       IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
> +		if (!wqbr)
> +			continue;
> +
> +		/* Get malicious queue */
> +		for_each_set_bit(j, (unsigned long *)&wqbr,
> +				 IXGBE_QUEUES_PER_REG) {

The type of wqbr is a u32, that is it is 32-bits wide.
Above it's address is cast to unsigned long *.
But, unsigned long may be 64-bits wide, e.g. on x86_64.

GCC 14.2.0 EXTRA_CFLAGS=-Warray-bounds builds report this as:

In file included from ./include/linux/bitmap.h:11,
                 from ./include/linux/cpumask.h:12,
                 from ./arch/x86/include/asm/paravirt.h:21,
                 from ./arch/x86/include/asm/cpuid.h:71,
                 from ./arch/x86/include/asm/processor.h:19,
                 from ./arch/x86/include/asm/cpufeature.h:5,
                 from ./arch/x86/include/asm/thread_info.h:59,
                 from ./include/linux/thread_info.h:60,
                 from ./include/linux/uio.h:9,
                 from ./include/linux/socket.h:8,
                 from ./include/uapi/linux/if.h:25,
                 from ./include/linux/mii.h:12,
                 from ./include/uapi/linux/mdio.h:15,
                 from ./include/linux/mdio.h:9,
                 from drivers/net/ethernet/intel/ixgbe/ixgbe_type.h:8,
                 from drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h:7,
                 from drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c:4:
In function ‘find_next_bit’,
    inlined from ‘ixgbe_handle_mdd_x550’ at drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c:3907:3:
./include/linux/find.h:65:23: error: array subscript ‘long unsigned int[0]’ is partly outside array bounds of ‘u32[1]’ {aka ‘unsigned int[1]’} [-Werror=array-bounds=]
   65 |                 val = *addr & GENMASK(size - 1, offset);
      |                       ^~~~~

I think this can be addressed by changing the type of wqmbr to unsigned long.

> +			/* Get queue from bitmask */
> +			q = j + (i * IXGBE_QUEUES_PER_REG);
> +			/* Map queue to vf */
> +			vf = q / div;
> +			set_bit(vf, vf_bitmap);
> +		}
> +	}
> +}
> +
>  #define X550_COMMON_MAC \
>  	.init_hw			= &ixgbe_init_hw_generic, \
>  	.start_hw			= &ixgbe_start_hw_X540, \

...
Michal Swiatkowski Feb. 10, 2025, 5:51 a.m. UTC | #2
On Fri, Feb 07, 2025 at 03:07:49PM +0000, Simon Horman wrote:
> On Fri, Feb 07, 2025 at 11:43:40AM +0100, Michal Swiatkowski wrote:
> > From: Paul Greenwalt <paul.greenwalt@intel.com>
> > 
> > Add malicious driver detection. Support enabling MDD, disabling MDD,
> > handling a MDD event, and restoring a MDD VF.
> > 
> > Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
> > Reviewed-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
> > Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com>
> > Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
> > Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
> 
> ...
> 
> > diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
> 
> ...
> 
> > +/**
> > + * ixgbe_handle_mdd_x550 - handle malicious driver detection event
> > + * @hw: pointer to hardware structure
> > + * @vf_bitmap: output vf bitmap of malicious vfs
> > + */
> > +void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
> > +{
> > +	u32 i, j, reg, q, div, vf, wqbr;
> > +
> > +	/* figure out pool size for mapping to vf's */
> > +	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
> > +	switch (reg & IXGBE_MRQC_MRQE_MASK) {
> > +	case IXGBE_MRQC_VMDQRT8TCEN:
> > +		div = IXGBE_16VFS_QUEUES;
> > +		break;
> > +	case IXGBE_MRQC_VMDQRSS32EN:
> > +	case IXGBE_MRQC_VMDQRT4TCEN:
> > +		div = IXGBE_32VFS_QUEUES;
> > +		break;
> > +	default:
> > +		div = IXGBE_64VFS_QUEUES;
> > +		break;
> > +	}
> > +
> > +	/* Read WQBR_TX and WQBR_RX and check for malicious queues */
> > +	for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
> > +		wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
> > +		       IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
> > +		if (!wqbr)
> > +			continue;
> > +
> > +		/* Get malicious queue */
> > +		for_each_set_bit(j, (unsigned long *)&wqbr,
> > +				 IXGBE_QUEUES_PER_REG) {
> 
> The type of wqbr is a u32, that is it is 32-bits wide.
> Above it's address is cast to unsigned long *.
> But, unsigned long may be 64-bits wide, e.g. on x86_64.
> 
> GCC 14.2.0 EXTRA_CFLAGS=-Warray-bounds builds report this as:
> 
> In file included from ./include/linux/bitmap.h:11,
>                  from ./include/linux/cpumask.h:12,
>                  from ./arch/x86/include/asm/paravirt.h:21,
>                  from ./arch/x86/include/asm/cpuid.h:71,
>                  from ./arch/x86/include/asm/processor.h:19,
>                  from ./arch/x86/include/asm/cpufeature.h:5,
>                  from ./arch/x86/include/asm/thread_info.h:59,
>                  from ./include/linux/thread_info.h:60,
>                  from ./include/linux/uio.h:9,
>                  from ./include/linux/socket.h:8,
>                  from ./include/uapi/linux/if.h:25,
>                  from ./include/linux/mii.h:12,
>                  from ./include/uapi/linux/mdio.h:15,
>                  from ./include/linux/mdio.h:9,
>                  from drivers/net/ethernet/intel/ixgbe/ixgbe_type.h:8,
>                  from drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h:7,
>                  from drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c:4:
> In function ‘find_next_bit’,
>     inlined from ‘ixgbe_handle_mdd_x550’ at drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c:3907:3:
> ./include/linux/find.h:65:23: error: array subscript ‘long unsigned int[0]’ is partly outside array bounds of ‘u32[1]’ {aka ‘unsigned int[1]’} [-Werror=array-bounds=]
>    65 |                 val = *addr & GENMASK(size - 1, offset);
>       |                       ^~~~~
> 
> I think this can be addressed by changing the type of wqmbr to unsigned long.

Thanks for catching that, I will fix.

> 
> > +			/* Get queue from bitmask */
> > +			q = j + (i * IXGBE_QUEUES_PER_REG);
> > +			/* Map queue to vf */
> > +			vf = q / div;
> > +			set_bit(vf, vf_bitmap);
> > +		}
> > +	}
> > +}
> > +
> >  #define X550_COMMON_MAC \
> >  	.init_hw			= &ixgbe_init_hw_generic, \
> >  	.start_hw			= &ixgbe_start_hw_X540, \
> 
> ...
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 5fdf32d79d82..d446c375335a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2746,6 +2746,28 @@  enum ixgbe_fdir_pballoc_type {
 #define FW_PHY_INFO_ID_HI_MASK		0xFFFF0000u
 #define FW_PHY_INFO_ID_LO_MASK		0x0000FFFFu
 
+/* There are only 3 options for VFs creation on this device:
+ * 16 VFs pool with 8 queues each
+ * 32 VFs pool with 4 queues each
+ * 64 VFs pool with 2 queues each
+ *
+ * That means reading some VF registers that map VF to queue depending on
+ * chosen option. Define values that help dealing with each scenario.
+ */
+/* Number of queues based on VFs pool */
+#define IXGBE_16VFS_QUEUES		8
+#define IXGBE_32VFS_QUEUES		4
+#define IXGBE_64VFS_QUEUES		2
+/* Mask for getting queues bits based on VFs pool */
+#define IXGBE_16VFS_BITMASK		GENMASK(IXGBE_16VFS_QUEUES - 1, 0)
+#define IXGBE_32VFS_BITMASK		GENMASK(IXGBE_32VFS_QUEUES - 1, 0)
+#define IXGBE_64VFS_BITMASK		GENMASK(IXGBE_64VFS_QUEUES - 1, 0)
+/* Convert queue index to register number.
+ * We have 4 registers with 32 queues in each.
+ */
+#define IXGBE_QUEUES_PER_REG		32
+#define IXGBE_QUEUES_REG_AMOUNT		4
+
 /* Host Interface Command Structures */
 struct ixgbe_hic_hdr {
 	u8 cmd;
@@ -3534,6 +3556,12 @@  struct ixgbe_mac_operations {
 	int (*dmac_config_tcs)(struct ixgbe_hw *hw);
 	int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
 	int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+
+	/* MDD events */
+	void (*enable_mdd)(struct ixgbe_hw *hw);
+	void (*disable_mdd)(struct ixgbe_hw *hw);
+	void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+	void (*handle_mdd)(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
 };
 
 struct ixgbe_phy_operations {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
index 3e4092f8da3e..2a11147fb1bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -17,4 +17,9 @@  void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
 void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
 					    bool enable, int vf);
 
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
+
 #endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index 683c668672d6..e67d105fd99a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -2630,6 +2630,10 @@  static const struct ixgbe_mac_operations mac_ops_e610 = {
 	.prot_autoc_write		= prot_autoc_write_generic,
 	.setup_fc			= ixgbe_setup_fc_e610,
 	.fc_autoneg			= ixgbe_fc_autoneg_e610,
+	.enable_mdd			= ixgbe_enable_mdd_x550,
+	.disable_mdd			= ixgbe_disable_mdd_x550,
+	.restore_mdd_vf			= ixgbe_restore_mdd_vf_x550,
+	.handle_mdd			= ixgbe_handle_mdd_x550,
 };
 
 static const struct ixgbe_phy_operations phy_ops_e610 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 277ceaf8a793..f148d3f29378 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3800,6 +3800,121 @@  static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
 	return status;
 }
 
+static void ixgbe_set_mdd_x550(struct ixgbe_hw *hw, bool ena)
+{
+	u32 reg_dma, reg_rdr;
+
+	reg_dma = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+	reg_rdr = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+	if (ena) {
+		reg_dma |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+		reg_rdr |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+	} else {
+		reg_dma &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+		reg_rdr &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_dma);
+	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_rdr);
+}
+
+/**
+ * ixgbe_enable_mdd_x550 - enable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw)
+{
+	ixgbe_set_mdd_x550(hw, true);
+}
+
+/**
+ * ixgbe_disable_mdd_x550 - disable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw)
+{
+	ixgbe_set_mdd_x550(hw, false);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_x550 - restore VF that was disabled during MDD event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ */
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf)
+{
+	u32 idx, reg, val, num_qs, start_q, bitmask;
+
+	/* Map VF to queues */
+	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	switch (reg & IXGBE_MRQC_MRQE_MASK) {
+	case IXGBE_MRQC_VMDQRT8TCEN:
+		num_qs = IXGBE_16VFS_QUEUES;
+		bitmask = IXGBE_16VFS_BITMASK;
+		break;
+	case IXGBE_MRQC_VMDQRSS32EN:
+	case IXGBE_MRQC_VMDQRT4TCEN:
+		num_qs = IXGBE_32VFS_QUEUES;
+		bitmask = IXGBE_32VFS_BITMASK;
+		break;
+	default:
+		num_qs = IXGBE_64VFS_QUEUES;
+		bitmask = IXGBE_64VFS_BITMASK;
+		break;
+	}
+	start_q = vf * num_qs;
+
+	/* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+	idx = start_q / IXGBE_QUEUES_PER_REG;
+	val = bitmask << (start_q % IXGBE_QUEUES_PER_REG);
+	IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), val);
+	IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), val);
+}
+
+/**
+ * ixgbe_handle_mdd_x550 - handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: output vf bitmap of malicious vfs
+ */
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
+{
+	u32 i, j, reg, q, div, vf, wqbr;
+
+	/* figure out pool size for mapping to vf's */
+	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	switch (reg & IXGBE_MRQC_MRQE_MASK) {
+	case IXGBE_MRQC_VMDQRT8TCEN:
+		div = IXGBE_16VFS_QUEUES;
+		break;
+	case IXGBE_MRQC_VMDQRSS32EN:
+	case IXGBE_MRQC_VMDQRT4TCEN:
+		div = IXGBE_32VFS_QUEUES;
+		break;
+	default:
+		div = IXGBE_64VFS_QUEUES;
+		break;
+	}
+
+	/* Read WQBR_TX and WQBR_RX and check for malicious queues */
+	for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
+		wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
+		       IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+		if (!wqbr)
+			continue;
+
+		/* Get malicious queue */
+		for_each_set_bit(j, (unsigned long *)&wqbr,
+				 IXGBE_QUEUES_PER_REG) {
+			/* Get queue from bitmask */
+			q = j + (i * IXGBE_QUEUES_PER_REG);
+			/* Map queue to vf */
+			vf = q / div;
+			set_bit(vf, vf_bitmap);
+		}
+	}
+}
+
 #define X550_COMMON_MAC \
 	.init_hw			= &ixgbe_init_hw_generic, \
 	.start_hw			= &ixgbe_start_hw_X540, \
@@ -3863,6 +3978,10 @@  static const struct ixgbe_mac_operations mac_ops_X550 = {
 	.prot_autoc_write	= prot_autoc_write_generic,
 	.setup_fc		= ixgbe_setup_fc_generic,
 	.fc_autoneg		= ixgbe_fc_autoneg,
+	.enable_mdd		= ixgbe_enable_mdd_x550,
+	.disable_mdd		= ixgbe_disable_mdd_x550,
+	.restore_mdd_vf		= ixgbe_restore_mdd_vf_x550,
+	.handle_mdd		= ixgbe_handle_mdd_x550,
 };
 
 static const struct ixgbe_mac_operations mac_ops_X550EM_x = {