diff mbox series

[V3] PCI: Extend ACS configurability

Message ID 20240523063528.199908-1-vidyas@nvidia.com (mailing list archive)
State Changes Requested
Delegated to: Bjorn Helgaas
Headers show
Series [V3] PCI: Extend ACS configurability | expand

Commit Message

Vidya Sagar May 23, 2024, 6:35 a.m. UTC
For iommu_groups to form correctly, the ACS settings in the PCIe fabric
need to be setup early in the boot process, either via the BIOS or via
the kernel disable_acs_redir parameter.

disable_acs_redir allows clearing the RR|CR|EC ACS flags, but the PCIe
spec Rev3.0 already defines 7 different ACS related flags with many more
useful combinations depending on the fabric design.

For backward compatibility, leave the 'disable_acs_redir' as is and add
a new parameter 'config_acs'so that the user can directly specify the ACS
flags to set on a per-device basis. Use a similar syntax to the existing
'resource_alignment'  parameter by using the @ character and have the user
specify the ACS flags using a bit encoding. If both 'disable_acs_redir' and
'config_acs' are specified for a particular device, configuration specified
through 'config_acs' takes precedence over the other.

Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
---
v3:
* Fixed a documentation issue reported by kernel test bot

v2:
* Refactored the code as per Jason's suggestion

 .../admin-guide/kernel-parameters.txt         |  22 +++
 drivers/pci/pci.c                             | 148 +++++++++++-------
 2 files changed, 112 insertions(+), 58 deletions(-)

Comments

Bjorn Helgaas May 23, 2024, 2:59 p.m. UTC | #1
[+cc iommu folks]

On Thu, May 23, 2024 at 12:05:28PM +0530, Vidya Sagar wrote:
> For iommu_groups to form correctly, the ACS settings in the PCIe fabric
> need to be setup early in the boot process, either via the BIOS or via
> the kernel disable_acs_redir parameter.

Can you point to the iommu code that is involved here?  It sounds like
the iommu_groups are built at boot time and are immutable after that?

If we need per-device ACS config that depends on the workload, it
seems kind of problematic to only be able to specify this at boot
time.  I guess we would need to reboot if we want to run a workload
that needs a different config?

Is this the iommu usage model we want in the long term?

> disable_acs_redir allows clearing the RR|CR|EC ACS flags, but the PCIe
> spec Rev3.0 already defines 7 different ACS related flags with many more
> useful combinations depending on the fabric design.

If we need a spec citation, I'd rather use r6.x since r3.0 is from
2010.

> For backward compatibility, leave the 'disable_acs_redir' as is and add
> a new parameter 'config_acs'so that the user can directly specify the ACS
> flags to set on a per-device basis. Use a similar syntax to the existing
> 'resource_alignment'  parameter by using the @ character and have the user
> specify the ACS flags using a bit encoding. If both 'disable_acs_redir' and
> 'config_acs' are specified for a particular device, configuration specified
> through 'config_acs' takes precedence over the other.
> 
> Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
> ---
> v3:
> * Fixed a documentation issue reported by kernel test bot
> 
> v2:
> * Refactored the code as per Jason's suggestion
> 
>  .../admin-guide/kernel-parameters.txt         |  22 +++
>  drivers/pci/pci.c                             | 148 +++++++++++-------
>  2 files changed, 112 insertions(+), 58 deletions(-)
> 
> diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
> index 41644336e..b4a8207eb 100644
> --- a/Documentation/admin-guide/kernel-parameters.txt
> +++ b/Documentation/admin-guide/kernel-parameters.txt
> @@ -4456,6 +4456,28 @@
>  				bridges without forcing it upstream. Note:
>  				this removes isolation between devices and
>  				may put more devices in an IOMMU group.
> +		config_acs=
> +				Format:
> +				=<ACS flags>@<pci_dev>[; ...]
> +				Specify one or more PCI devices (in the format
> +				specified above) optionally prepended with flags
> +				and separated by semicolons. The respective
> +				capabilities will be enabled, disabled or unchanged
> +				based on what is specified in flags.
> +				ACS Flags is defined as follows
> +				bit-0 : ACS Source Validation
> +				bit-1 : ACS Translation Blocking
> +				bit-2 : ACS P2P Request Redirect
> +				bit-3 : ACS P2P Completion Redirect
> +				bit-4 : ACS Upstream Forwarding
> +				bit-5 : ACS P2P Egress Control
> +				bit-6 : ACS Direct Translated P2P
> +				Each bit can be marked as
> +				‘0‘ – force disabled
> +				‘1’ – force enabled
> +				‘x’ – unchanged.
> +				Note: this may remove isolation between devices
> +				and may put more devices in an IOMMU group.
>  		force_floating	[S390] Force usage of floating interrupts.
>  		nomio		[S390] Do not use MIO instructions.
>  		norid		[S390] ignore the RID field and force use of
> diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
> index a607f277c..a46264f83 100644
> --- a/drivers/pci/pci.c
> +++ b/drivers/pci/pci.c
> @@ -887,30 +887,67 @@ void pci_request_acs(void)
>  }
>  
>  static const char *disable_acs_redir_param;
> +static const char *config_acs_param;
>  
> -/**
> - * pci_disable_acs_redir - disable ACS redirect capabilities
> - * @dev: the PCI device
> - *
> - * For only devices specified in the disable_acs_redir parameter.
> - */
> -static void pci_disable_acs_redir(struct pci_dev *dev)
> +struct pci_acs {
> +	u16 cap;
> +	u16 ctrl;
> +	u16 fw_ctrl;
> +};
> +
> +static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
> +			     const char *p, u16 mask, u16 flags)
>  {
> +	char *delimit;
>  	int ret = 0;
> -	const char *p;
> -	int pos;
> -	u16 ctrl;
>  
> -	if (!disable_acs_redir_param)
> +	if (!p)
>  		return;
>  
> -	p = disable_acs_redir_param;
>  	while (*p) {
> +		if (!mask) {
> +			/* Check for ACS flags */
> +			delimit = strstr(p, "@");
> +			if (delimit) {
> +				int end;
> +				u32 shift = 0;
> +
> +				end = delimit - p - 1;
> +
> +				while (end > -1) {
> +					if (*(p + end) == '0') {
> +						mask |= 1 << shift;
> +						shift++;
> +						end--;
> +					} else if (*(p + end) == '1') {
> +						mask |= 1 << shift;
> +						flags |= 1 << shift;
> +						shift++;
> +						end--;
> +					} else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
> +						shift++;
> +						end--;
> +					} else {
> +						pci_err(dev, "Invalid ACS flags... Ignoring\n");
> +						return;
> +					}
> +				}
> +				p = delimit + 1;
> +			} else {
> +				pci_err(dev, "ACS Flags missing\n");
> +				return;
> +			}
> +		}
> +
> +		if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
> +			    PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
> +			pci_err(dev, "Invalid ACS flags specified\n");
> +			return;
> +		}
> +
>  		ret = pci_dev_str_match(dev, p, &p);
>  		if (ret < 0) {
> -			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
> -				     disable_acs_redir_param);
> -
> +			pr_info_once("PCI: Can't parse acs command line parameter\n");
>  			break;
>  		} else if (ret == 1) {
>  			/* Found a match */
> @@ -930,56 +967,38 @@ static void pci_disable_acs_redir(struct pci_dev *dev)
>  	if (!pci_dev_specific_disable_acs_redir(dev))
>  		return;
>  
> -	pos = dev->acs_cap;
> -	if (!pos) {
> -		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
> -		return;
> -	}
> -
> -	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
> +	pci_dbg(dev, "ACS mask  = 0x%X\n", mask);
> +	pci_dbg(dev, "ACS flags = 0x%X\n", flags);
>  
> -	/* P2P Request & Completion Redirect */
> -	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
> +	/* If mask is 0 then we copy the bit from the firmware setting. */
> +	caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
> +	caps->ctrl |= flags;
>  
> -	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
> -
> -	pci_info(dev, "disabled ACS redirect\n");
> +	pci_info(dev, "Configured ACS to 0x%x\n", caps->ctrl);
>  }
>  
>  /**
>   * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
>   * @dev: the PCI device
> + * @caps: default ACS controls
>   */
> -static void pci_std_enable_acs(struct pci_dev *dev)
> +static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
>  {
> -	int pos;
> -	u16 cap;
> -	u16 ctrl;
> -
> -	pos = dev->acs_cap;
> -	if (!pos)
> -		return;
> -
> -	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
> -	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
> -
>  	/* Source Validation */
> -	ctrl |= (cap & PCI_ACS_SV);
> +	caps->ctrl |= (caps->cap & PCI_ACS_SV);
>  
>  	/* P2P Request Redirect */
> -	ctrl |= (cap & PCI_ACS_RR);
> +	caps->ctrl |= (caps->cap & PCI_ACS_RR);
>  
>  	/* P2P Completion Redirect */
> -	ctrl |= (cap & PCI_ACS_CR);
> +	caps->ctrl |= (caps->cap & PCI_ACS_CR);
>  
>  	/* Upstream Forwarding */
> -	ctrl |= (cap & PCI_ACS_UF);
> +	caps->ctrl |= (caps->cap & PCI_ACS_UF);
>  
>  	/* Enable Translation Blocking for external devices and noats */
>  	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
> -		ctrl |= (cap & PCI_ACS_TB);
> -
> -	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
> +		caps->ctrl |= (caps->cap & PCI_ACS_TB);
>  }
>  
>  /**
> @@ -988,23 +1007,33 @@ static void pci_std_enable_acs(struct pci_dev *dev)
>   */
>  static void pci_enable_acs(struct pci_dev *dev)
>  {
> -	if (!pci_acs_enable)
> -		goto disable_acs_redir;
> +	struct pci_acs caps;
> +	int pos;
> +
> +	pos = dev->acs_cap;
> +	if (!pos)
> +		return;
>  
> -	if (!pci_dev_specific_enable_acs(dev))
> -		goto disable_acs_redir;
> +	pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
> +	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
> +	caps.fw_ctrl = caps.ctrl;
>  
> -	pci_std_enable_acs(dev);
> +	/* If an iommu is present we start with kernel default caps */
> +	if (pci_acs_enable) {
> +		if (pci_dev_specific_enable_acs(dev))
> +			pci_std_enable_acs(dev, &caps);
> +	}
>  
> -disable_acs_redir:
>  	/*
> -	 * Note: pci_disable_acs_redir() must be called even if ACS was not
> -	 * enabled by the kernel because it may have been enabled by
> -	 * platform firmware.  So if we are told to disable it, we should
> -	 * always disable it after setting the kernel's default
> -	 * preferences.
> +	 * Always apply caps from the command line, even if there is no iommu.
> +	 * Trust that the admin has a reason to change the ACS settings.
>  	 */
> -	pci_disable_acs_redir(dev);
> +	__pci_config_acs(dev, &caps, disable_acs_redir_param,
> +			 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
> +			 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
> +	__pci_config_acs(dev, &caps, config_acs_param, 0, 0);
> +
> +	pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
>  }
>  
>  /**
> @@ -7023,6 +7052,8 @@ static int __init pci_setup(char *str)
>  				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
>  			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
>  				disable_acs_redir_param = str + 18;
> +			} else if (!strncmp(str, "config_acs=", 11)) {
> +				config_acs_param = str + 11;
>  			} else {
>  				pr_err("PCI: Unknown option `%s'\n", str);
>  			}
> @@ -7047,6 +7078,7 @@ static int __init pci_realloc_setup_params(void)
>  	resource_alignment_param = kstrdup(resource_alignment_param,
>  					   GFP_KERNEL);
>  	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
> +	config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
>  
>  	return 0;
>  }
> -- 
> 2.25.1
>
Jason Gunthorpe May 23, 2024, 3:16 p.m. UTC | #2
On Thu, May 23, 2024 at 09:59:36AM -0500, Bjorn Helgaas wrote:
> [+cc iommu folks]
> 
> On Thu, May 23, 2024 at 12:05:28PM +0530, Vidya Sagar wrote:
> > For iommu_groups to form correctly, the ACS settings in the PCIe fabric
> > need to be setup early in the boot process, either via the BIOS or via
> > the kernel disable_acs_redir parameter.
> 
> Can you point to the iommu code that is involved here?  It sounds like
> the iommu_groups are built at boot time and are immutable after that?

They are created when the struct device is plugged
in. pci_device_group() does the logic.

Notably groups can't/don't change if details like ACS change after the
groups are setup.

There are alot of instructions out there telling people to boot their
servers and then manually change the ACS flags with set_pci or
something, and these are not good instructions since it defeats the
VFIO group based security mechanisms.

> If we need per-device ACS config that depends on the workload, it
> seems kind of problematic to only be able to specify this at boot
> time.  I guess we would need to reboot if we want to run a workload
> that needs a different config?

Basically. The main difference I'd see is if the server is a VM host
or running bare metal apps. You can get more efficicenty if you change
things for the bare metal case, and often bare metal will want to turn
the iommu off while a VM host often wants more of it turned on.

> Is this the iommu usage model we want in the long term?

There is some path to more dynamic behavior here, but it would require
separating groups into two components - devices that are together
because they are physically sharing translation (aliases and things)
from devices that are together because they share a security boundary
(ACS).

It is more believable we could dynamically change security group
assigments for VFIO than translation group assignment. I don't know
anyone interested in this right now - Alex and I have only talked
about it as a possibility a while back.

FWIW I don't view patch as excluding more dynamisism in the future,
but it is the best way to work with the current state of affairs, and
definitely better than set_pci instructions.

Thanks,
Jason
Vidya Sagar June 3, 2024, 7:50 a.m. UTC | #3
Hi Bjorn,
Could you let me know if Jason's reply answers your question?
Please let me know if you are looking for any more information.

Thanks,
Vidya Sagar

> -----Original Message-----
> From: Jason Gunthorpe <jgg@nvidia.com>
> Sent: Thursday, May 23, 2024 8:46 PM
> To: Bjorn Helgaas <helgaas@kernel.org>
> Cc: Vidya Sagar <vidyas@nvidia.com>; corbet@lwn.net; bhelgaas@google.com; Gal
> Shalom <galshalom@nvidia.com>; Leon Romanovsky <leonro@nvidia.com>; Thierry
> Reding <treding@nvidia.com>; Jon Hunter <jonathanh@nvidia.com>; Masoud
> Moshref Javadi <mmoshrefjava@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>;
> Vikram Sethi <vsethi@nvidia.com>; Shanker Donthineni <sdonthineni@nvidia.com>;
> Jiandi An <jan@nvidia.com>; Tushar Dave <tdave@nvidia.com>; linux-
> doc@vger.kernel.org; linux-pci@vger.kernel.org; linux-kernel@vger.kernel.org;
> Krishna Thota <kthota@nvidia.com>; Manikanta Maddireddy
> <mmaddireddy@nvidia.com>; sagar.tv@gmail.com; Joerg Roedel <joro@8bytes.org>;
> Will Deacon <will@kernel.org>; Robin Murphy <robin.murphy@arm.com>;
> iommu@lists.linux.dev
> Subject: Re: [PATCH V3] PCI: Extend ACS configurability
> 
> On Thu, May 23, 2024 at 09:59:36AM -0500, Bjorn Helgaas wrote:
> > [+cc iommu folks]
> >
> > On Thu, May 23, 2024 at 12:05:28PM +0530, Vidya Sagar wrote:
> > > For iommu_groups to form correctly, the ACS settings in the PCIe
> > > fabric need to be setup early in the boot process, either via the
> > > BIOS or via the kernel disable_acs_redir parameter.
> >
> > Can you point to the iommu code that is involved here?  It sounds like
> > the iommu_groups are built at boot time and are immutable after that?
> 
> They are created when the struct device is plugged in. pci_device_group() does the
> logic.
> 
> Notably groups can't/don't change if details like ACS change after the groups are
> setup.
> 
> There are alot of instructions out there telling people to boot their servers and then
> manually change the ACS flags with set_pci or something, and these are not good
> instructions since it defeats the VFIO group based security mechanisms.
> 
> > If we need per-device ACS config that depends on the workload, it
> > seems kind of problematic to only be able to specify this at boot
> > time.  I guess we would need to reboot if we want to run a workload
> > that needs a different config?
> 
> Basically. The main difference I'd see is if the server is a VM host or running bare
> metal apps. You can get more efficicenty if you change things for the bare metal case,
> and often bare metal will want to turn the iommu off while a VM host often wants
> more of it turned on.
> 
> > Is this the iommu usage model we want in the long term?
> 
> There is some path to more dynamic behavior here, but it would require separating
> groups into two components - devices that are together because they are physically
> sharing translation (aliases and things) from devices that are together because they
> share a security boundary (ACS).
> 
> It is more believable we could dynamically change security group assigments for VFIO
> than translation group assignment. I don't know anyone interested in this right now -
> Alex and I have only talked about it as a possibility a while back.
> 
> FWIW I don't view patch as excluding more dynamisism in the future, but it is the best
> way to work with the current state of affairs, and definitely better than set_pci
> instructions.
> 
> Thanks,
> Jason
Bjorn Helgaas June 7, 2024, 7:30 p.m. UTC | #4
On Mon, Jun 03, 2024 at 07:50:59AM +0000, Vidya Sagar wrote:
> Hi Bjorn,
> Could you let me know if Jason's reply answers your question?
> Please let me know if you are looking for any more information.

I think we should add some of that content to the commit log.  It
needs:

  - Subject line that advertises some good thing.

  - A description of why users want this.  I have no idea what the
    actual benefit is, but I'm looking for something at the level of
    "The default ACS settings put A and B in different IOMMU groups,
    preventing P2PDMA between them.  If we disable ACS X, A and B will
    be put in the same group and P2PDMA will work".

  - A primer on how users can affect IOMMU groups by enabling/
    disabling ACS settings so they can use this without just blind
    trial and error.  A note that this is immutable except at boot
    time.

  - A pointer to the code that determines IOMMU groups based on the
    ACS settings.  Similar to the above, but more useful for
    developers.

If we assert "for iommu_groups to form correctly ...", a hint about
why/where this is so would be helpful.

"Correctly" is not quite the right word here; it's just a fact that
the ACS settings determined at boot time result in certain IOMMU
groups.  If the user desires different groups, it's not that something
is "incorrect"; it's just that the user may have to accept less
isolation to get the desired IOMMU groups.

> > -----Original Message-----
> > From: Jason Gunthorpe <jgg@nvidia.com>
> > ...
> > 
> > On Thu, May 23, 2024 at 09:59:36AM -0500, Bjorn Helgaas wrote:
> > > [+cc iommu folks]
> > >
> > > On Thu, May 23, 2024 at 12:05:28PM +0530, Vidya Sagar wrote:
> > > > For iommu_groups to form correctly, the ACS settings in the PCIe
> > > > fabric need to be setup early in the boot process, either via the
> > > > BIOS or via the kernel disable_acs_redir parameter.
> > >
> > > Can you point to the iommu code that is involved here?  It sounds like
> > > the iommu_groups are built at boot time and are immutable after that?
> > 
> > They are created when the struct device is plugged in. pci_device_group() does the
> > logic.
> > 
> > Notably groups can't/don't change if details like ACS change after the groups are
> > setup.
> > 
> > There are alot of instructions out there telling people to boot their servers and then
> > manually change the ACS flags with set_pci or something, and these are not good
> > instructions since it defeats the VFIO group based security mechanisms.
> > 
> > > If we need per-device ACS config that depends on the workload, it
> > > seems kind of problematic to only be able to specify this at boot
> > > time.  I guess we would need to reboot if we want to run a workload
> > > that needs a different config?
> > 
> > Basically. The main difference I'd see is if the server is a VM host or running bare
> > metal apps. You can get more efficicenty if you change things for the bare metal case,
> > and often bare metal will want to turn the iommu off while a VM host often wants
> > more of it turned on.
> > 
> > > Is this the iommu usage model we want in the long term?
> > 
> > There is some path to more dynamic behavior here, but it would require separating
> > groups into two components - devices that are together because they are physically
> > sharing translation (aliases and things) from devices that are together because they
> > share a security boundary (ACS).
> > 
> > It is more believable we could dynamically change security group assigments for VFIO
> > than translation group assignment. I don't know anyone interested in this right now -
> > Alex and I have only talked about it as a possibility a while back.
> > 
> > FWIW I don't view patch as excluding more dynamisism in the future, but it is the best
> > way to work with the current state of affairs, and definitely better than set_pci
> > instructions.
> > 
> > Thanks,
> > Jason
Jason Gunthorpe June 10, 2024, 11:38 a.m. UTC | #5
On Fri, Jun 07, 2024 at 02:30:55PM -0500, Bjorn Helgaas wrote:
> "Correctly" is not quite the right word here; it's just a fact that
> the ACS settings determined at boot time result in certain IOMMU
> groups.  If the user desires different groups, it's not that something
> is "incorrect"; it's just that the user may have to accept less
> isolation to get the desired IOMMU groups.

That is not quite accurate.. There are HW configurations where ACS
needs to be a certain way for the HW to work with P2P at all. It isn't
just an optimization or the user accepts something, if they want P2P
at all they must get a ACS configuration appropriate for their system.

Jason
Jason Gunthorpe June 12, 2024, 12:19 p.m. UTC | #6
On Thu, May 23, 2024 at 12:05:28PM +0530, Vidya Sagar wrote:
> For iommu_groups to form correctly, the ACS settings in the PCIe fabric
> need to be setup early in the boot process, either via the BIOS or via
> the kernel disable_acs_redir parameter.
> 
> disable_acs_redir allows clearing the RR|CR|EC ACS flags, but the PCIe
> spec Rev3.0 already defines 7 different ACS related flags with many more
> useful combinations depending on the fabric design.
> 
> For backward compatibility, leave the 'disable_acs_redir' as is and add
> a new parameter 'config_acs'so that the user can directly specify the ACS
> flags to set on a per-device basis. Use a similar syntax to the existing
> 'resource_alignment'  parameter by using the @ character and have the user
> specify the ACS flags using a bit encoding. If both 'disable_acs_redir' and
> 'config_acs' are specified for a particular device, configuration specified
> through 'config_acs' takes precedence over the other.
> 
> Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
> ---
> v3:
> * Fixed a documentation issue reported by kernel test bot
> 
> v2:
> * Refactored the code as per Jason's suggestion

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>

Jason
Bjorn Helgaas June 12, 2024, 9:29 p.m. UTC | #7
[+cc Alex since VFIO entered the conversation; thread at
https://lore.kernel.org/r/20240523063528.199908-1-vidyas@nvidia.com]

On Mon, Jun 10, 2024 at 08:38:49AM -0300, Jason Gunthorpe wrote:
> On Fri, Jun 07, 2024 at 02:30:55PM -0500, Bjorn Helgaas wrote:
> > "Correctly" is not quite the right word here; it's just a fact that
> > the ACS settings determined at boot time result in certain IOMMU
> > groups.  If the user desires different groups, it's not that something
> > is "incorrect"; it's just that the user may have to accept less
> > isolation to get the desired IOMMU groups.
> 
> That is not quite accurate.. There are HW configurations where ACS
> needs to be a certain way for the HW to work with P2P at all. It isn't
> just an optimization or the user accepts something, if they want P2P
> at all they must get a ACS configuration appropriate for their system.

The current wording of "For iommu_groups to form correctly, the ACS
settings in the PCIe fabric need to be setup early" suggests that the
way we currently configure ACS is incorrect in general, regardless of
P2PDMA.

But my impression is that there's a trade-off between isolation and
the ability to do P2PDMA, and users have different requirements, and
the preference for less isolation/more P2PDMA is no more "correct"
than a preference for more isolation/less P2PDMA.

The kernel-parameters doc mentions the reduced isolation idea, but I
think we need a little more guidance for users.  It's probably too
much detail for kernel-parameters, but the commit log would be a good
place.

Maybe something like this:

  PCIe ACS settings determine how devices are put into iommu_groups.
  The iommu_groups in turn determine which devices can be passed
  through to VMs and whether P2PDMA between them is possible.  The
  iommu_groups are built at enumeration-time and are currently static.

  Add a kernel command-line option to change ACS settings for specific
  devices, which allows more devices to be put in the same
  iommu_group, at the cost of reduced isolation between them.

  ACS applies to PCIe Downstream Ports and multi-function devices.
  The default ACS settings are XXX and cause devices below an
  ACS-capable port to be put in an iommu_group isolated from P2PDMA
  from outside the group.

  Disabling ACS XXX at a port allows ... downstream devices to be
  included in the same iommu_group as ...

  [I don't know exactly how this works, so please make it make sense].
Jason Gunthorpe June 12, 2024, 11:23 p.m. UTC | #8
On Wed, Jun 12, 2024 at 04:29:03PM -0500, Bjorn Helgaas wrote:
> [+cc Alex since VFIO entered the conversation; thread at
> https://lore.kernel.org/r/20240523063528.199908-1-vidyas@nvidia.com]
> 
> On Mon, Jun 10, 2024 at 08:38:49AM -0300, Jason Gunthorpe wrote:
> > On Fri, Jun 07, 2024 at 02:30:55PM -0500, Bjorn Helgaas wrote:
> > > "Correctly" is not quite the right word here; it's just a fact that
> > > the ACS settings determined at boot time result in certain IOMMU
> > > groups.  If the user desires different groups, it's not that something
> > > is "incorrect"; it's just that the user may have to accept less
> > > isolation to get the desired IOMMU groups.
> > 
> > That is not quite accurate.. There are HW configurations where ACS
> > needs to be a certain way for the HW to work with P2P at all. It isn't
> > just an optimization or the user accepts something, if they want P2P
> > at all they must get a ACS configuration appropriate for their system.
> 
> The current wording of "For iommu_groups to form correctly, the ACS
> settings in the PCIe fabric need to be setup early" suggests that the
> way we currently configure ACS is incorrect in general, regardless of
> P2PDMA.

Yes, I'd agree with this. We don't have enough information to
configurate it properly in the kernel in an automatic way. We don't
know if pairs of devices even have SW enablement to do P2P in the
kernel and we don't accurately know what issues the root complex
has. All of this information goes into choosing the right ACS bits.

> But my impression is that there's a trade-off between isolation and
> the ability to do P2PDMA, and users have different requirements, and
> the preference for less isolation/more P2PDMA is no more "correct"
> than a preference for more isolation/less P2PDMA.

Sure, that makes sense
 
> Maybe something like this:
> 
>   PCIe ACS settings determine how devices are put into iommu_groups.
>   The iommu_groups in turn determine which devices can be passed
>   through to VMs and whether P2PDMA between them is possible.  The
>   iommu_groups are built at enumeration-time and are currently static.

Not quite, the iommu_groups don't have alot to do with the P2P. Even
devices in the same kernel group can still have non working P2P.

Maybe:

 PCIe ACS settings control the level of isolation and the possible P2P
 paths between devices. With greater isolation the kernel will create
 smaller iommu_groups and with less isolation there is more HW that
 can achieve P2P transfers. From a virtualization perspective all
 devices in the same iommu_group must be assigned to the same VM as
 they lack security isolation.

 There is no way for the kernel to automatically know the correct
 ACS settings for any given system and workload. Existing command line
 options allow only for large scale change, disabling all
 isolation, but this is not sufficient for more complex cases.

 Add a kernel command-line option to directly control all the ACS bits
 for specific devices, which allows the operator to setup the right
 level of isolation to achieve the desired P2P configuration. The
 definition is future proof, when new ACS bits are added to the spec
 the open syntax can be extended.

 ACS needs to be setup early in the kernel boot as the ACS settings
 effect how iommu_groups are formed. iommu_group formation is a one
 time event during initial device discovery, changing ACS bits after
 kernel boot can result in an inaccurate view of the iommu_groups
 compared to the current isolation configuration.
 
 ACS applies to PCIe Downstream Ports and multi-function devices.
 The default ACS settings are strict and deny any direct traffic
 between two functions. This results in the smallest iommu_group the
 HW can support. Frequently these values result in slow or
 non-working P2PDMA.

 ACS offers a range of security choices controlling how traffic is
 allowed to go directly between two devices. Some popular choices:
   - Full prevention
   - Translated requests can be direct, with various options
   - Asymetric direct traffic, A can reach B but not the reverse
   - All traffic can be direct
 Along with some other less common ones for special topologies.

 The intention is that this option would be used with expert knowledge
 of the HW capability and workload to achieve the desired
 configuration.

Jason
Bjorn Helgaas June 13, 2024, 10:05 p.m. UTC | #9
On Wed, Jun 12, 2024 at 08:23:01PM -0300, Jason Gunthorpe wrote:
> On Wed, Jun 12, 2024 at 04:29:03PM -0500, Bjorn Helgaas wrote:
> > [+cc Alex since VFIO entered the conversation; thread at
> > https://lore.kernel.org/r/20240523063528.199908-1-vidyas@nvidia.com]
> > 
> > On Mon, Jun 10, 2024 at 08:38:49AM -0300, Jason Gunthorpe wrote:
> > > On Fri, Jun 07, 2024 at 02:30:55PM -0500, Bjorn Helgaas wrote:
> > > > "Correctly" is not quite the right word here; it's just a fact that
> > > > the ACS settings determined at boot time result in certain IOMMU
> > > > groups.  If the user desires different groups, it's not that something
> > > > is "incorrect"; it's just that the user may have to accept less
> > > > isolation to get the desired IOMMU groups.
> > > 
> > > That is not quite accurate.. There are HW configurations where ACS
> > > needs to be a certain way for the HW to work with P2P at all. It isn't
> > > just an optimization or the user accepts something, if they want P2P
> > > at all they must get a ACS configuration appropriate for their system.
> > 
> > The current wording of "For iommu_groups to form correctly, the ACS
> > settings in the PCIe fabric need to be setup early" suggests that the
> > way we currently configure ACS is incorrect in general, regardless of
> > P2PDMA.
> 
> Yes, I'd agree with this. We don't have enough information to
> configurate it properly in the kernel in an automatic way. We don't
> know if pairs of devices even have SW enablement to do P2P in the
> kernel and we don't accurately know what issues the root complex
> has. All of this information goes into choosing the right ACS bits.
> 
> > But my impression is that there's a trade-off between isolation and
> > the ability to do P2PDMA, and users have different requirements, and
> > the preference for less isolation/more P2PDMA is no more "correct"
> > than a preference for more isolation/less P2PDMA.
> 
> Sure, that makes sense
>  
> > Maybe something like this:
> > 
> >   PCIe ACS settings determine how devices are put into iommu_groups.
> >   The iommu_groups in turn determine which devices can be passed
> >   through to VMs and whether P2PDMA between them is possible.  The
> >   iommu_groups are built at enumeration-time and are currently static.
> 
> Not quite, the iommu_groups don't have alot to do with the P2P. Even
> devices in the same kernel group can still have non working P2P.
> 
> Maybe:
> 
>  PCIe ACS settings control the level of isolation and the possible P2P
>  paths between devices. With greater isolation the kernel will create
>  smaller iommu_groups and with less isolation there is more HW that
>  can achieve P2P transfers. From a virtualization perspective all
>  devices in the same iommu_group must be assigned to the same VM as
>  they lack security isolation.
> 
>  There is no way for the kernel to automatically know the correct
>  ACS settings for any given system and workload. Existing command line
>  options allow only for large scale change, disabling all
>  isolation, but this is not sufficient for more complex cases.
> 
>  Add a kernel command-line option to directly control all the ACS bits
>  for specific devices, which allows the operator to setup the right
>  level of isolation to achieve the desired P2P configuration. The
>  definition is future proof, when new ACS bits are added to the spec
>  the open syntax can be extended.
> 
>  ACS needs to be setup early in the kernel boot as the ACS settings
>  effect how iommu_groups are formed. iommu_group formation is a one
>  time event during initial device discovery, changing ACS bits after
>  kernel boot can result in an inaccurate view of the iommu_groups
>  compared to the current isolation configuration.
>  
>  ACS applies to PCIe Downstream Ports and multi-function devices.
>  The default ACS settings are strict and deny any direct traffic
>  between two functions. This results in the smallest iommu_group the
>  HW can support. Frequently these values result in slow or
>  non-working P2PDMA.
> 
>  ACS offers a range of security choices controlling how traffic is
>  allowed to go directly between two devices. Some popular choices:
>    - Full prevention
>    - Translated requests can be direct, with various options
>    - Asymetric direct traffic, A can reach B but not the reverse
>    - All traffic can be direct
>  Along with some other less common ones for special topologies.
> 
>  The intention is that this option would be used with expert knowledge
>  of the HW capability and workload to achieve the desired
>  configuration.

That all sounds good.  IIUC the current default is full prevention (I
guess you said that a few paragraphs up).

It's unfortunate that this requires so much expert knowledge to use,
but I guess we don't really have a good alternative.  The only way I
can think of to help would be some kind of white paper or examples in
Documentation/PCI/.

Bjorn
Alex Williamson June 13, 2024, 10:38 p.m. UTC | #10
On Wed, 12 Jun 2024 20:23:01 -0300
Jason Gunthorpe <jgg@nvidia.com> wrote:

> On Wed, Jun 12, 2024 at 04:29:03PM -0500, Bjorn Helgaas wrote:
> > [+cc Alex since VFIO entered the conversation; thread at
> > https://lore.kernel.org/r/20240523063528.199908-1-vidyas@nvidia.com]
> > 
> > On Mon, Jun 10, 2024 at 08:38:49AM -0300, Jason Gunthorpe wrote:  
> > > On Fri, Jun 07, 2024 at 02:30:55PM -0500, Bjorn Helgaas wrote:  
> > > > "Correctly" is not quite the right word here; it's just a fact that
> > > > the ACS settings determined at boot time result in certain IOMMU
> > > > groups.  If the user desires different groups, it's not that something
> > > > is "incorrect"; it's just that the user may have to accept less
> > > > isolation to get the desired IOMMU groups.  
> > > 
> > > That is not quite accurate.. There are HW configurations where ACS
> > > needs to be a certain way for the HW to work with P2P at all. It isn't
> > > just an optimization or the user accepts something, if they want P2P
> > > at all they must get a ACS configuration appropriate for their system.  
> > 
> > The current wording of "For iommu_groups to form correctly, the ACS
> > settings in the PCIe fabric need to be setup early" suggests that the
> > way we currently configure ACS is incorrect in general, regardless of
> > P2PDMA.  
> 
> Yes, I'd agree with this. We don't have enough information to
> configurate it properly in the kernel in an automatic way. We don't
> know if pairs of devices even have SW enablement to do P2P in the
> kernel and we don't accurately know what issues the root complex
> has. All of this information goes into choosing the right ACS bits.
> 
> > But my impression is that there's a trade-off between isolation and
> > the ability to do P2PDMA, and users have different requirements, and
> > the preference for less isolation/more P2PDMA is no more "correct"
> > than a preference for more isolation/less P2PDMA.  
> 
> Sure, that makes sense
>  
> > Maybe something like this:
> > 
> >   PCIe ACS settings determine how devices are put into iommu_groups.
> >   The iommu_groups in turn determine which devices can be passed
> >   through to VMs and whether P2PDMA between them is possible.  The
> >   iommu_groups are built at enumeration-time and are currently static.  
> 
> Not quite, the iommu_groups don't have alot to do with the P2P. Even
> devices in the same kernel group can still have non working P2P.
> 
> Maybe:
> 
>  PCIe ACS settings control the level of isolation and the possible P2P
>  paths between devices. With greater isolation the kernel will create
>  smaller iommu_groups and with less isolation there is more HW that
>  can achieve P2P transfers. From a virtualization perspective all
>  devices in the same iommu_group must be assigned to the same VM as
>  they lack security isolation.
> 
>  There is no way for the kernel to automatically know the correct
>  ACS settings for any given system and workload. Existing command line
>  options allow only for large scale change, disabling all
>  isolation, but this is not sufficient for more complex cases.
> 
>  Add a kernel command-line option to directly control all the ACS bits
>  for specific devices, which allows the operator to setup the right
>  level of isolation to achieve the desired P2P configuration. The
>  definition is future proof, when new ACS bits are added to the spec
>  the open syntax can be extended.
> 
>  ACS needs to be setup early in the kernel boot as the ACS settings
>  effect how iommu_groups are formed. iommu_group formation is a one
>  time event during initial device discovery, changing ACS bits after
>  kernel boot can result in an inaccurate view of the iommu_groups
>  compared to the current isolation configuration.
>  
>  ACS applies to PCIe Downstream Ports and multi-function devices.
>  The default ACS settings are strict and deny any direct traffic
>  between two functions. This results in the smallest iommu_group the
>  HW can support. Frequently these values result in slow or
>  non-working P2PDMA.
> 
>  ACS offers a range of security choices controlling how traffic is
>  allowed to go directly between two devices. Some popular choices:
>    - Full prevention
>    - Translated requests can be direct, with various options
>    - Asymetric direct traffic, A can reach B but not the reverse
>    - All traffic can be direct
>  Along with some other less common ones for special topologies.
> 
>  The intention is that this option would be used with expert knowledge
>  of the HW capability and workload to achieve the desired
>  configuration.

FWIW, this sounds good to me too.  There certainly needed to be some
clarification that this controls the isolation of devices and IOMMU
groups are determined by aspects of that isolation rather than this
option directly and exclusively being used to configure grouping.  I
think this does that.  Thanks,

Alex
Jason Gunthorpe June 13, 2024, 11:36 p.m. UTC | #11
On Thu, Jun 13, 2024 at 05:05:20PM -0500, Bjorn Helgaas wrote:

> It's unfortunate that this requires so much expert knowledge to use,
> but I guess we don't really have a good alternative.  The only way I
> can think of to help would be some kind of white paper or examples in
> Documentation/PCI/.

So far I am seeing the system supplier supply the appropriate
instructions.. It is already this way for set_pci, and yes, it is a
huge PITA.

At one point Steven Bates was talking about some ACPI tables to give
the OS more information but I don't think that went anywhere..

Jason
diff mbox series

Patch

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 41644336e..b4a8207eb 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4456,6 +4456,28 @@ 
 				bridges without forcing it upstream. Note:
 				this removes isolation between devices and
 				may put more devices in an IOMMU group.
+		config_acs=
+				Format:
+				=<ACS flags>@<pci_dev>[; ...]
+				Specify one or more PCI devices (in the format
+				specified above) optionally prepended with flags
+				and separated by semicolons. The respective
+				capabilities will be enabled, disabled or unchanged
+				based on what is specified in flags.
+				ACS Flags is defined as follows
+				bit-0 : ACS Source Validation
+				bit-1 : ACS Translation Blocking
+				bit-2 : ACS P2P Request Redirect
+				bit-3 : ACS P2P Completion Redirect
+				bit-4 : ACS Upstream Forwarding
+				bit-5 : ACS P2P Egress Control
+				bit-6 : ACS Direct Translated P2P
+				Each bit can be marked as
+				‘0‘ – force disabled
+				‘1’ – force enabled
+				‘x’ – unchanged.
+				Note: this may remove isolation between devices
+				and may put more devices in an IOMMU group.
 		force_floating	[S390] Force usage of floating interrupts.
 		nomio		[S390] Do not use MIO instructions.
 		norid		[S390] ignore the RID field and force use of
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a607f277c..a46264f83 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -887,30 +887,67 @@  void pci_request_acs(void)
 }
 
 static const char *disable_acs_redir_param;
+static const char *config_acs_param;
 
-/**
- * pci_disable_acs_redir - disable ACS redirect capabilities
- * @dev: the PCI device
- *
- * For only devices specified in the disable_acs_redir parameter.
- */
-static void pci_disable_acs_redir(struct pci_dev *dev)
+struct pci_acs {
+	u16 cap;
+	u16 ctrl;
+	u16 fw_ctrl;
+};
+
+static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+			     const char *p, u16 mask, u16 flags)
 {
+	char *delimit;
 	int ret = 0;
-	const char *p;
-	int pos;
-	u16 ctrl;
 
-	if (!disable_acs_redir_param)
+	if (!p)
 		return;
 
-	p = disable_acs_redir_param;
 	while (*p) {
+		if (!mask) {
+			/* Check for ACS flags */
+			delimit = strstr(p, "@");
+			if (delimit) {
+				int end;
+				u32 shift = 0;
+
+				end = delimit - p - 1;
+
+				while (end > -1) {
+					if (*(p + end) == '0') {
+						mask |= 1 << shift;
+						shift++;
+						end--;
+					} else if (*(p + end) == '1') {
+						mask |= 1 << shift;
+						flags |= 1 << shift;
+						shift++;
+						end--;
+					} else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
+						shift++;
+						end--;
+					} else {
+						pci_err(dev, "Invalid ACS flags... Ignoring\n");
+						return;
+					}
+				}
+				p = delimit + 1;
+			} else {
+				pci_err(dev, "ACS Flags missing\n");
+				return;
+			}
+		}
+
+		if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
+			    PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
+			pci_err(dev, "Invalid ACS flags specified\n");
+			return;
+		}
+
 		ret = pci_dev_str_match(dev, p, &p);
 		if (ret < 0) {
-			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
-				     disable_acs_redir_param);
-
+			pr_info_once("PCI: Can't parse acs command line parameter\n");
 			break;
 		} else if (ret == 1) {
 			/* Found a match */
@@ -930,56 +967,38 @@  static void pci_disable_acs_redir(struct pci_dev *dev)
 	if (!pci_dev_specific_disable_acs_redir(dev))
 		return;
 
-	pos = dev->acs_cap;
-	if (!pos) {
-		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
-		return;
-	}
-
-	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+	pci_dbg(dev, "ACS mask  = 0x%X\n", mask);
+	pci_dbg(dev, "ACS flags = 0x%X\n", flags);
 
-	/* P2P Request & Completion Redirect */
-	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
+	/* If mask is 0 then we copy the bit from the firmware setting. */
+	caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
+	caps->ctrl |= flags;
 
-	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
-
-	pci_info(dev, "disabled ACS redirect\n");
+	pci_info(dev, "Configured ACS to 0x%x\n", caps->ctrl);
 }
 
 /**
  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
  * @dev: the PCI device
+ * @caps: default ACS controls
  */
-static void pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
 {
-	int pos;
-	u16 cap;
-	u16 ctrl;
-
-	pos = dev->acs_cap;
-	if (!pos)
-		return;
-
-	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
-	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
-
 	/* Source Validation */
-	ctrl |= (cap & PCI_ACS_SV);
+	caps->ctrl |= (caps->cap & PCI_ACS_SV);
 
 	/* P2P Request Redirect */
-	ctrl |= (cap & PCI_ACS_RR);
+	caps->ctrl |= (caps->cap & PCI_ACS_RR);
 
 	/* P2P Completion Redirect */
-	ctrl |= (cap & PCI_ACS_CR);
+	caps->ctrl |= (caps->cap & PCI_ACS_CR);
 
 	/* Upstream Forwarding */
-	ctrl |= (cap & PCI_ACS_UF);
+	caps->ctrl |= (caps->cap & PCI_ACS_UF);
 
 	/* Enable Translation Blocking for external devices and noats */
 	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
-		ctrl |= (cap & PCI_ACS_TB);
-
-	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+		caps->ctrl |= (caps->cap & PCI_ACS_TB);
 }
 
 /**
@@ -988,23 +1007,33 @@  static void pci_std_enable_acs(struct pci_dev *dev)
  */
 static void pci_enable_acs(struct pci_dev *dev)
 {
-	if (!pci_acs_enable)
-		goto disable_acs_redir;
+	struct pci_acs caps;
+	int pos;
+
+	pos = dev->acs_cap;
+	if (!pos)
+		return;
 
-	if (!pci_dev_specific_enable_acs(dev))
-		goto disable_acs_redir;
+	pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
+	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
+	caps.fw_ctrl = caps.ctrl;
 
-	pci_std_enable_acs(dev);
+	/* If an iommu is present we start with kernel default caps */
+	if (pci_acs_enable) {
+		if (pci_dev_specific_enable_acs(dev))
+			pci_std_enable_acs(dev, &caps);
+	}
 
-disable_acs_redir:
 	/*
-	 * Note: pci_disable_acs_redir() must be called even if ACS was not
-	 * enabled by the kernel because it may have been enabled by
-	 * platform firmware.  So if we are told to disable it, we should
-	 * always disable it after setting the kernel's default
-	 * preferences.
+	 * Always apply caps from the command line, even if there is no iommu.
+	 * Trust that the admin has a reason to change the ACS settings.
 	 */
-	pci_disable_acs_redir(dev);
+	__pci_config_acs(dev, &caps, disable_acs_redir_param,
+			 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
+			 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
+	__pci_config_acs(dev, &caps, config_acs_param, 0, 0);
+
+	pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
 }
 
 /**
@@ -7023,6 +7052,8 @@  static int __init pci_setup(char *str)
 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
 				disable_acs_redir_param = str + 18;
+			} else if (!strncmp(str, "config_acs=", 11)) {
+				config_acs_param = str + 11;
 			} else {
 				pr_err("PCI: Unknown option `%s'\n", str);
 			}
@@ -7047,6 +7078,7 @@  static int __init pci_realloc_setup_params(void)
 	resource_alignment_param = kstrdup(resource_alignment_param,
 					   GFP_KERNEL);
 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+	config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
 
 	return 0;
 }