diff mbox series

[net-next] octeontx2-af: map management port always to first PF

Message ID 20240327160348.3023-1-hkelam@marvell.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series [net-next] octeontx2-af: map management port always to first PF | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 10 of 10 maintainers
netdev/build_clang success Errors and warnings before: 955 this patch: 955
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 975 this patch: 975
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 95 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-03-28--03-00 (tests: 949)

Commit Message

Hariprasad Kelam March 27, 2024, 4:03 p.m. UTC
The user can enable or disable any MAC block or a few ports of the
block. The management port's interface name varies depending on the
setup of the user if its not mapped to the first pf.

The management port mapping is now configured to always connect to the
first PF. This patch implements this change.

Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
---
 .../net/ethernet/marvell/octeontx2/af/mbox.h  |  5 +-
 .../ethernet/marvell/octeontx2/af/rvu_cgx.c   | 60 +++++++++++++++----
 2 files changed, 53 insertions(+), 12 deletions(-)

Comments

Simon Horman March 28, 2024, 2:58 p.m. UTC | #1
On Wed, Mar 27, 2024 at 09:33:48PM +0530, Hariprasad Kelam wrote:
> The user can enable or disable any MAC block or a few ports of the
> block. The management port's interface name varies depending on the
> setup of the user if its not mapped to the first pf.
> 
> The management port mapping is now configured to always connect to the
> first PF. This patch implements this change.
> 
> Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
> Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>

Hi Hariprasad and Sunil,

some feedback from my side.

> ---
>  .../net/ethernet/marvell/octeontx2/af/mbox.h  |  5 +-
>  .../ethernet/marvell/octeontx2/af/rvu_cgx.c   | 60 +++++++++++++++----
>  2 files changed, 53 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> index eb2a20b5a0d0..105d2e8f25df 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> @@ -638,7 +638,10 @@ struct cgx_lmac_fwdata_s {
>  	/* Only applicable if SFP/QSFP slot is present */
>  	struct sfp_eeprom_s sfp_eeprom;
>  	struct phy_s phy;
> -#define LMAC_FWDATA_RESERVED_MEM 1021
> +	u32 lmac_type;
> +	u32 portm_idx;
> +	u64 mgmt_port:1;
> +#define LMAC_FWDATA_RESERVED_MEM 1019
>  	u64 reserved[LMAC_FWDATA_RESERVED_MEM];
>  };
>  
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> index 72e060cf6b61..446344801576 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> @@ -118,15 +118,40 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
>  		pfvf->nix_blkaddr = BLKADDR_NIX1;
>  }
>  
> -static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> +static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int lmac_id)
> +{
> +	struct cgx_lmac_fwdata_s *fwdata;
> +
> +	fwdata =  &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
> +	if (fwdata->mgmt_port)
> +		return true;
> +
> +	return false;

nit: I think this could be more succinctly expressed as:

	return !!fwdata->mgmt_port;

> +}
> +
> +static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, int pf, int cgx, int lmac)
>  {
>  	struct npc_pkind *pkind = &rvu->hw->pkind;
> +	int numvfs, hwvfs;
> +	int free_pkind;
> +
> +	rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
> +	rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;

This isn't strictly related to this patch, but here
it seems implied that pf is not negative and <= 63, as
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] is only 64 bits wide.

So firstly I wonder if pf should be unsigned

> +	free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> +	pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;

Here pf is masked off so it is not more than 63.
But that seems to conflict with the assumption above that it is <= 63.

If there is a concern about it being larger, it should be
capped in the for loop that calls __rvu_map_cgx_lmac_pf() ?

> +	rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> +	rvu->cgx_mapped_pfs++;
> +	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> +	rvu->cgx_mapped_vfs += numvfs;
> +}
> +
> +static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> +{
>  	int cgx_cnt_max = rvu->cgx_cnt_max;
>  	int pf = PF_CGXMAP_BASE;
>  	unsigned long lmac_bmap;
> -	int size, free_pkind;
>  	int cgx, lmac, iter;
> -	int numvfs, hwvfs;
> +	int size;
>  
>  	if (!cgx_cnt_max)
>  		return 0;
> @@ -155,6 +180,24 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
>  		return -ENOMEM;
>  
>  	rvu->cgx_mapped_pfs = 0;
> +
> +	/* Map mgmt port always to first PF */
> +	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> +		if (!rvu_cgx_pdata(cgx, rvu))
> +			continue;
> +		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
> +		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
> +			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
> +			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
> +				__rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
> +				pf++;
> +				goto non_mgmtport_mapping;
> +			}
> +		}
> +	}
> +
> +non_mgmtport_mapping:
> +
>  	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
>  		if (!rvu_cgx_pdata(cgx, rvu))
>  			continue;
> @@ -162,14 +205,9 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
>  		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
>  			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
>  					      iter);
> -			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
> -			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
> -			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> -			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
> -			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> -			rvu->cgx_mapped_pfs++;
> -			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> -			rvu->cgx_mapped_vfs += numvfs;
> +			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
> +				continue;
> +			__rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
>  			pf++;
>  		}
>  	}

There seems to be a fair amount of code duplication above.
If we can assume that there is always a management port,
then perhaps the following is simpler (compile tested only!).

And if not, I'd suggest moving the outermost for loop and everything
within it into a helper with a parameter such that it can handle
the (first?) management port on one invocation, and non management
ports on the next invocation.

 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 {
 	struct npc_pkind *pkind = &rvu->hw->pkind;
 	int cgx_cnt_max = rvu->cgx_cnt_max;
-	int pf = PF_CGXMAP_BASE;
+	int next_pf = PF_CGXMAP_BASE + 1;
 	unsigned long lmac_bmap;
 	int size, free_pkind;
 	int cgx, lmac, iter;
@@ -158,10 +167,20 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
 		if (!rvu_cgx_pdata(cgx, rvu))
 			continue;
+
 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+			int pf;
+
 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
 					      iter);
+
+			/* Always use first PF for management port */
+			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
+				pf = PF_CGXMAP_BASE;
+			else
+				pf = next_pf++;
+
 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
Hariprasad Kelam April 1, 2024, 8:38 a.m. UTC | #2
Hi Simon,

Thanks for the review, see inline 

> On Wed, Mar 27, 2024 at 09:33:48PM +0530, Hariprasad Kelam wrote:
> > The user can enable or disable any MAC block or a few ports of the
> > block. The management port's interface name varies depending on the
> > setup of the user if its not mapped to the first pf.
> >
> > The management port mapping is now configured to always connect to the
> > first PF. This patch implements this change.
> >
> > Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
> > Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
> 
> Hi Hariprasad and Sunil,
> 
> some feedback from my side.
> 
> > ---
> >  .../net/ethernet/marvell/octeontx2/af/mbox.h  |  5 +-
> >  .../ethernet/marvell/octeontx2/af/rvu_cgx.c   | 60 +++++++++++++++----
> >  2 files changed, 53 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > index eb2a20b5a0d0..105d2e8f25df 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > @@ -638,7 +638,10 @@ struct cgx_lmac_fwdata_s {
> >  	/* Only applicable if SFP/QSFP slot is present */
> >  	struct sfp_eeprom_s sfp_eeprom;
> >  	struct phy_s phy;
> > -#define LMAC_FWDATA_RESERVED_MEM 1021
> > +	u32 lmac_type;
> > +	u32 portm_idx;
> > +	u64 mgmt_port:1;
> > +#define LMAC_FWDATA_RESERVED_MEM 1019
> >  	u64 reserved[LMAC_FWDATA_RESERVED_MEM];
> >  };
> >
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > index 72e060cf6b61..446344801576 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > @@ -118,15 +118,40 @@ static void rvu_map_cgx_nix_block(struct rvu
> *rvu, int pf,
> >  		pfvf->nix_blkaddr = BLKADDR_NIX1;
> >  }
> >
> > -static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> > +static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int
> > +lmac_id) {
> > +	struct cgx_lmac_fwdata_s *fwdata;
> > +
> > +	fwdata =  &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
> > +	if (fwdata->mgmt_port)
> > +		return true;
> > +
> > +	return false;
> 
> nit: I think this could be more succinctly expressed as:
> 
> 	return !!fwdata->mgmt_port;
> ACK, will fix in V2. 

> > +}
> > +
> > +static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, int pf, int cgx,
> > +int lmac)
> >  {
> >  	struct npc_pkind *pkind = &rvu->hw->pkind;
> > +	int numvfs, hwvfs;
> > +	int free_pkind;
> > +
> > +	rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
> > +	rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
> 
> This isn't strictly related to this patch, but here it seems implied that pf is not
> negative and <= 63, as
> rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] is only 64 bits wide.
> 
> So firstly I wonder if pf should be unsigned
> 
> > +	free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> > +	pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
> 
> Here pf is masked off so it is not more than 63.
> But that seems to conflict with the assumption above that it is <= 63.
> 
> If there is a concern about it being larger, it should be capped in the for loop
> that calls __rvu_map_cgx_lmac_pf() ?
> 
     Max PF value is from 0 to 63  will address  proposed change in V2.
> > +	rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> > +	rvu->cgx_mapped_pfs++;
> > +	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> > +	rvu->cgx_mapped_vfs += numvfs;
> > +}
> > +
> > +static int rvu_map_cgx_lmac_pf(struct rvu *rvu) {
> >  	int cgx_cnt_max = rvu->cgx_cnt_max;
> >  	int pf = PF_CGXMAP_BASE;
> >  	unsigned long lmac_bmap;
> > -	int size, free_pkind;
> >  	int cgx, lmac, iter;
> > -	int numvfs, hwvfs;
> > +	int size;
> >
> >  	if (!cgx_cnt_max)
> >  		return 0;
> > @@ -155,6 +180,24 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> >  		return -ENOMEM;
> >
> >  	rvu->cgx_mapped_pfs = 0;
> > +
> > +	/* Map mgmt port always to first PF */
> > +	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> > +		if (!rvu_cgx_pdata(cgx, rvu))
> > +			continue;
> > +		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
> > +		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx)
> {
> > +			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
> > +			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
> > +				__rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
> > +				pf++;
> > +				goto non_mgmtport_mapping;
> > +			}
> > +		}
> > +	}
> > +
> > +non_mgmtport_mapping:
> > +
> >  	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> >  		if (!rvu_cgx_pdata(cgx, rvu))
> >  			continue;
> > @@ -162,14 +205,9 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> >  		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx)
> {
> >  			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
> >  					      iter);
> > -			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx,
> lmac);
> > -			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1
> << pf;
> > -			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> > -			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) <<
> 16;
> > -			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> > -			rvu->cgx_mapped_pfs++;
> > -			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> > -			rvu->cgx_mapped_vfs += numvfs;
> > +			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
> > +				continue;
> > +			__rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
> >  			pf++;
> >  		}
> >  	}
> 
> There seems to be a fair amount of code duplication above.
> If we can assume that there is always a management port, then perhaps the
> following is simpler (compile tested only!).
> 
> And if not, I'd suggest moving the outermost for loop and everything within it
> into a helper with a parameter such that it can handle the (first?) management
> port on one invocation, and non management ports on the next invocation.
> 

Management ports might not be available on all devices,  we will go with option2.

    Thanks,
    Hariprasad k

>  static int rvu_map_cgx_lmac_pf(struct rvu *rvu)  {
>  	struct npc_pkind *pkind = &rvu->hw->pkind;
>  	int cgx_cnt_max = rvu->cgx_cnt_max;
> -	int pf = PF_CGXMAP_BASE;
> +	int next_pf = PF_CGXMAP_BASE + 1;
>  	unsigned long lmac_bmap;
>  	int size, free_pkind;
>  	int cgx, lmac, iter;
> @@ -158,10 +167,20 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
>  	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
>  		if (!rvu_cgx_pdata(cgx, rvu))
>  			continue;
> +
>  		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
>  		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx)
> {
> +			int pf;
> +
>  			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
>  					      iter);
> +
> +			/* Always use first PF for management port */
> +			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
> +				pf = PF_CGXMAP_BASE;
> +			else
> +				pf = next_pf++;
> +
>  			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx,
> lmac);
>  			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1
> << pf;
>  			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index eb2a20b5a0d0..105d2e8f25df 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -638,7 +638,10 @@  struct cgx_lmac_fwdata_s {
 	/* Only applicable if SFP/QSFP slot is present */
 	struct sfp_eeprom_s sfp_eeprom;
 	struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+	u32 lmac_type;
+	u32 portm_idx;
+	u64 mgmt_port:1;
+#define LMAC_FWDATA_RESERVED_MEM 1019
 	u64 reserved[LMAC_FWDATA_RESERVED_MEM];
 };
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 72e060cf6b61..446344801576 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -118,15 +118,40 @@  static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
 		pfvf->nix_blkaddr = BLKADDR_NIX1;
 }
 
-static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+	struct cgx_lmac_fwdata_s *fwdata;
+
+	fwdata =  &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+	if (fwdata->mgmt_port)
+		return true;
+
+	return false;
+}
+
+static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, int pf, int cgx, int lmac)
 {
 	struct npc_pkind *pkind = &rvu->hw->pkind;
+	int numvfs, hwvfs;
+	int free_pkind;
+
+	rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+	rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+	free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+	pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+	rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
+	rvu->cgx_mapped_pfs++;
+	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+	rvu->cgx_mapped_vfs += numvfs;
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
 	int cgx_cnt_max = rvu->cgx_cnt_max;
 	int pf = PF_CGXMAP_BASE;
 	unsigned long lmac_bmap;
-	int size, free_pkind;
 	int cgx, lmac, iter;
-	int numvfs, hwvfs;
+	int size;
 
 	if (!cgx_cnt_max)
 		return 0;
@@ -155,6 +180,24 @@  static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 		return -ENOMEM;
 
 	rvu->cgx_mapped_pfs = 0;
+
+	/* Map mgmt port always to first PF */
+	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+		if (!rvu_cgx_pdata(cgx, rvu))
+			continue;
+		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
+			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
+				__rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
+				pf++;
+				goto non_mgmtport_mapping;
+			}
+		}
+	}
+
+non_mgmtport_mapping:
+
 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
 		if (!rvu_cgx_pdata(cgx, rvu))
 			continue;
@@ -162,14 +205,9 @@  static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
 					      iter);
-			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
-			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
-			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
-			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
-			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
-			rvu->cgx_mapped_pfs++;
-			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
-			rvu->cgx_mapped_vfs += numvfs;
+			if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
+				continue;
+			__rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
 			pf++;
 		}
 	}