diff mbox series

[v4,4/7] PCI: keystone: Add supported for PVU-based DMA isolation on AM654

Message ID 361441d35d781b3c474b05921634bcae08d1a7b4.1725444016.git.jan.kiszka@siemens.com (mailing list archive)
State Superseded
Headers show
Series soc: ti: Add and use PVU on K3-AM65 for DMA isolation | expand

Commit Message

Jan Kiszka Sept. 4, 2024, 10 a.m. UTC
From: Jan Kiszka <jan.kiszka@siemens.com>

The AM654 lacks an IOMMU, thus does not support isolating DMA requests
from untrusted PCI devices to selected memory regions this way. Use
static PVU-based protection instead.

For this, we use the availability of restricted-dma-pool memory regions
as trigger and register those as valid DMA targets with the PVU. In
addition, we need to enable the mapping of requester IDs to VirtIDs in
the PCI RC. We only use a single VirtID so far, catching all devices.
This may be extended later on.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
CC: Lorenzo Pieralisi <lpieralisi@kernel.org>
CC: "Krzysztof Wilczyński" <kw@linux.com>
CC: Bjorn Helgaas <bhelgaas@google.com>
CC: linux-pci@vger.kernel.org
---
 drivers/pci/controller/dwc/pci-keystone.c | 101 ++++++++++++++++++++++
 1 file changed, 101 insertions(+)

Comments

Bjorn Helgaas Sept. 5, 2024, 4:33 p.m. UTC | #1
[+cc Kishon, just in case you have time/interest ;)]

On Wed, Sep 04, 2024 at 12:00:13PM +0200, Jan Kiszka wrote:
> From: Jan Kiszka <jan.kiszka@siemens.com>
> 
> The AM654 lacks an IOMMU, thus does not support isolating DMA requests
> from untrusted PCI devices to selected memory regions this way. Use
> static PVU-based protection instead.
> 
> For this, we use the availability of restricted-dma-pool memory regions
> as trigger and register those as valid DMA targets with the PVU.

I guess the implication is that DMA *outside* the restricted-dma-pool
just gets dropped, and the Requester would see Completion Timeouts or
something for reads?

> In
> addition, we need to enable the mapping of requester IDs to VirtIDs in
> the PCI RC. We only use a single VirtID so far, catching all devices.
> This may be extended later on.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> CC: Lorenzo Pieralisi <lpieralisi@kernel.org>
> CC: "Krzysztof Wilczyński" <kw@linux.com>
> CC: Bjorn Helgaas <bhelgaas@google.com>
> CC: linux-pci@vger.kernel.org

Regrettably we don't really have anybody taking care of pci-keystone.c
(at least per MAINTAINERS).

> ---
>  drivers/pci/controller/dwc/pci-keystone.c | 101 ++++++++++++++++++++++
>  1 file changed, 101 insertions(+)
> 
> diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
> index 2219b1a866fa..96b871656da4 100644
> --- a/drivers/pci/controller/dwc/pci-keystone.c
> +++ b/drivers/pci/controller/dwc/pci-keystone.c
> @@ -19,6 +19,7 @@
>  #include <linux/mfd/syscon.h>
>  #include <linux/msi.h>
>  #include <linux/of.h>
> +#include <linux/of_address.h>
>  #include <linux/of_irq.h>
>  #include <linux/of_pci.h>
>  #include <linux/phy/phy.h>
> @@ -26,6 +27,7 @@
>  #include <linux/regmap.h>
>  #include <linux/resource.h>
>  #include <linux/signal.h>
> +#include <linux/ti-pvu.h>
>  
>  #include "../../pci.h"
>  #include "pcie-designware.h"
> @@ -111,6 +113,16 @@
>  
>  #define PCI_DEVICE_ID_TI_AM654X		0xb00c
>  
> +#define KS_PCI_VIRTID			0
> +
> +#define PCIE_VMAP_xP_CTRL		0x0
> +#define PCIE_VMAP_xP_REQID		0x4
> +#define PCIE_VMAP_xP_VIRTID		0x8
> +
> +#define PCIE_VMAP_xP_CTRL_EN		BIT(0)
> +
> +#define PCIE_VMAP_xP_VIRTID_VID_MASK	0xfff
> +
>  struct ks_pcie_of_data {
>  	enum dw_pcie_device_mode mode;
>  	const struct dw_pcie_host_ops *host_ops;
> @@ -1125,6 +1137,89 @@ static const struct of_device_id ks_pcie_of_match[] = {
>  	{ },
>  };
>  
> +#ifdef CONFIG_TI_PVU
> +static const char *ks_vmap_res[] = {"vmap_lp", "vmap_hp"};
> +
> +static int ks_init_restricted_dma(struct platform_device *pdev)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct of_phandle_iterator it;
> +	bool init_vmap = false;
> +	struct resource phys;
> +	struct resource *res;
> +	void __iomem *base;
> +	unsigned int n;
> +	u32 val;
> +	int err;
> +
> +	of_for_each_phandle(&it, err, dev->of_node, "memory-region",
> +			    NULL, 0) {
> +		if (!of_device_is_compatible(it.node, "restricted-dma-pool"))
> +			continue;
> +
> +		err = of_address_to_resource(it.node, 0, &phys);
> +		if (err < 0) {
> +			dev_err(dev, "failed to parse memory region %pOF: %d\n",
> +				it.node, err);
> +			continue;
> +		}
> +
> +		err = ti_pvu_create_region(KS_PCI_VIRTID, &phys);
> +		if (err < 0)
> +			return err;
> +
> +		init_vmap = true;
> +	}

  if (!init_vmap)
    return 0;

would unindent the following.

> +
> +	if (init_vmap) {
> +		for (n = 0; n < ARRAY_SIZE(ks_vmap_res); n++) {

Since the only use of ks_vmap_res is here, this might be more readable
if there were a helper that would be called twice with the constant
strings, e.g.,

  helper(pdev, "vmap_lp");
  helper(pdev, "vmap_hp");

> +			res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
> +							   ks_vmap_res[n]);

Seems like we should check "res" for error before using it?

> +			base = devm_pci_remap_cfg_resource(dev, res);
> +			if (IS_ERR(base))
> +				return PTR_ERR(base);
> +
> +			writel(0, base + PCIE_VMAP_xP_REQID);
> +
> +			val = readl(base + PCIE_VMAP_xP_VIRTID);
> +			val &= ~PCIE_VMAP_xP_VIRTID_VID_MASK;
> +			val |= KS_PCI_VIRTID;
> +			writel(val, base + PCIE_VMAP_xP_VIRTID);
> +
> +			val = readl(base + PCIE_VMAP_xP_CTRL);
> +			val |= PCIE_VMAP_xP_CTRL_EN;
> +			writel(val, base + PCIE_VMAP_xP_CTRL);

Since there's no explicit use of "restricted-dma-pool" elsewhere in
this patch, I assume the setup above causes the controller to drop any
DMA accesses outside that pool?  I think a comment about how the
controller behavior is being changed would be useful.  Basically the
same comment as for the commit log.

Would there be any value in a dmesg note about a restriction being
enforced?  Seems like it's dependent on both CONFIG_TI_PVU and some DT
properties, and since those are invisible in the log, maybe a note
would help understand/debug any issues?

> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void ks_release_restricted_dma(struct platform_device *pdev)
> +{
> +	struct of_phandle_iterator it;
> +	struct resource phys;
> +	int err;
> +
> +	of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
> +			    NULL, 0) {
> +		if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
> +		    of_address_to_resource(it.node, 0, &phys) == 0)
> +			ti_pvu_remove_region(KS_PCI_VIRTID, &phys);

I guess it's not important to undo the PCIE_VMAP_xP_CTRL_EN and
related setup that was done by ks_init_restricted_dma()?

> +	}
> +}
> +#else
> +static inline int ks_init_restricted_dma(struct platform_device *pdev)
> +{
> +	return 0;
> +}
> +
> +static inline void ks_release_restricted_dma(struct platform_device *pdev)
> +{
> +}
> +#endif
> +
>  static int ks_pcie_probe(struct platform_device *pdev)
>  {
>  	const struct dw_pcie_host_ops *host_ops;
> @@ -1273,6 +1368,10 @@ static int ks_pcie_probe(struct platform_device *pdev)
>  	if (ret < 0)
>  		goto err_get_sync;
>  
> +	ret = ks_init_restricted_dma(pdev);
> +	if (ret < 0)
> +		goto err_get_sync;
> +
>  	switch (mode) {
>  	case DW_PCIE_RC_TYPE:
>  		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
> @@ -1354,6 +1453,8 @@ static void ks_pcie_remove(struct platform_device *pdev)
>  	int num_lanes = ks_pcie->num_lanes;
>  	struct device *dev = &pdev->dev;
>  
> +	ks_release_restricted_dma(pdev);
> +
>  	pm_runtime_put(dev);
>  	pm_runtime_disable(dev);
>  	ks_pcie_disable_phy(ks_pcie);
> -- 
> 2.43.0
>
Jan Kiszka Sept. 5, 2024, 7:07 p.m. UTC | #2
On 05.09.24 18:33, Bjorn Helgaas wrote:
> [+cc Kishon, just in case you have time/interest ;)]
> 
> On Wed, Sep 04, 2024 at 12:00:13PM +0200, Jan Kiszka wrote:
>> From: Jan Kiszka <jan.kiszka@siemens.com>
>>
>> The AM654 lacks an IOMMU, thus does not support isolating DMA requests
>> from untrusted PCI devices to selected memory regions this way. Use
>> static PVU-based protection instead.
>>
>> For this, we use the availability of restricted-dma-pool memory regions
>> as trigger and register those as valid DMA targets with the PVU.
> 
> I guess the implication is that DMA *outside* the restricted-dma-pool
> just gets dropped, and the Requester would see Completion Timeouts or
> something for reads?

I cannot tell what happens on the PCI bus in that case, maybe someone 
from TI can help out.

On the host side, the PVU will record an error and raise an interrupt 
which will make the driver report that to the kernel log. That's quite 
similar to what IOMMU drivers do on translation faults.

> 
>> In
>> addition, we need to enable the mapping of requester IDs to VirtIDs in
>> the PCI RC. We only use a single VirtID so far, catching all devices.
>> This may be extended later on.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>> CC: Lorenzo Pieralisi <lpieralisi@kernel.org>
>> CC: "Krzysztof Wilczyński" <kw@linux.com>
>> CC: Bjorn Helgaas <bhelgaas@google.com>
>> CC: linux-pci@vger.kernel.org
> 
> Regrettably we don't really have anybody taking care of pci-keystone.c
> (at least per MAINTAINERS).
> 
>> ---
>>  drivers/pci/controller/dwc/pci-keystone.c | 101 ++++++++++++++++++++++
>>  1 file changed, 101 insertions(+)
>>
>> diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
>> index 2219b1a866fa..96b871656da4 100644
>> --- a/drivers/pci/controller/dwc/pci-keystone.c
>> +++ b/drivers/pci/controller/dwc/pci-keystone.c
>> @@ -19,6 +19,7 @@
>>  #include <linux/mfd/syscon.h>
>>  #include <linux/msi.h>
>>  #include <linux/of.h>
>> +#include <linux/of_address.h>
>>  #include <linux/of_irq.h>
>>  #include <linux/of_pci.h>
>>  #include <linux/phy/phy.h>
>> @@ -26,6 +27,7 @@
>>  #include <linux/regmap.h>
>>  #include <linux/resource.h>
>>  #include <linux/signal.h>
>> +#include <linux/ti-pvu.h>
>>  
>>  #include "../../pci.h"
>>  #include "pcie-designware.h"
>> @@ -111,6 +113,16 @@
>>  
>>  #define PCI_DEVICE_ID_TI_AM654X		0xb00c
>>  
>> +#define KS_PCI_VIRTID			0
>> +
>> +#define PCIE_VMAP_xP_CTRL		0x0
>> +#define PCIE_VMAP_xP_REQID		0x4
>> +#define PCIE_VMAP_xP_VIRTID		0x8
>> +
>> +#define PCIE_VMAP_xP_CTRL_EN		BIT(0)
>> +
>> +#define PCIE_VMAP_xP_VIRTID_VID_MASK	0xfff
>> +
>>  struct ks_pcie_of_data {
>>  	enum dw_pcie_device_mode mode;
>>  	const struct dw_pcie_host_ops *host_ops;
>> @@ -1125,6 +1137,89 @@ static const struct of_device_id ks_pcie_of_match[] = {
>>  	{ },
>>  };
>>  
>> +#ifdef CONFIG_TI_PVU
>> +static const char *ks_vmap_res[] = {"vmap_lp", "vmap_hp"};
>> +
>> +static int ks_init_restricted_dma(struct platform_device *pdev)
>> +{
>> +	struct device *dev = &pdev->dev;
>> +	struct of_phandle_iterator it;
>> +	bool init_vmap = false;
>> +	struct resource phys;
>> +	struct resource *res;
>> +	void __iomem *base;
>> +	unsigned int n;
>> +	u32 val;
>> +	int err;
>> +
>> +	of_for_each_phandle(&it, err, dev->of_node, "memory-region",
>> +			    NULL, 0) {
>> +		if (!of_device_is_compatible(it.node, "restricted-dma-pool"))
>> +			continue;
>> +
>> +		err = of_address_to_resource(it.node, 0, &phys);
>> +		if (err < 0) {
>> +			dev_err(dev, "failed to parse memory region %pOF: %d\n",
>> +				it.node, err);
>> +			continue;
>> +		}
>> +
>> +		err = ti_pvu_create_region(KS_PCI_VIRTID, &phys);
>> +		if (err < 0)
>> +			return err;
>> +
>> +		init_vmap = true;
>> +	}
> 
>   if (!init_vmap)
>     return 0;
> 
> would unindent the following.
> 
>> +
>> +	if (init_vmap) {
>> +		for (n = 0; n < ARRAY_SIZE(ks_vmap_res); n++) {
> 
> Since the only use of ks_vmap_res is here, this might be more readable
> if there were a helper that would be called twice with the constant
> strings, e.g.,
> 
>   helper(pdev, "vmap_lp");
>   helper(pdev, "vmap_hp");

OK.

> 
>> +			res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
>> +							   ks_vmap_res[n]);
> 
> Seems like we should check "res" for error before using it?

Oh, unfinished constructions.

> 
>> +			base = devm_pci_remap_cfg_resource(dev, res);
>> +			if (IS_ERR(base))
>> +				return PTR_ERR(base);
>> +
>> +			writel(0, base + PCIE_VMAP_xP_REQID);
>> +
>> +			val = readl(base + PCIE_VMAP_xP_VIRTID);
>> +			val &= ~PCIE_VMAP_xP_VIRTID_VID_MASK;
>> +			val |= KS_PCI_VIRTID;
>> +			writel(val, base + PCIE_VMAP_xP_VIRTID);
>> +
>> +			val = readl(base + PCIE_VMAP_xP_CTRL);
>> +			val |= PCIE_VMAP_xP_CTRL_EN;
>> +			writel(val, base + PCIE_VMAP_xP_CTRL);
> 
> Since there's no explicit use of "restricted-dma-pool" elsewhere in
> this patch, I assume the setup above causes the controller to drop any
> DMA accesses outside that pool?  I think a comment about how the
> controller behavior is being changed would be useful.  Basically the
> same comment as for the commit log.

Right, this is what will happen. Will add some comment.

> 
> Would there be any value in a dmesg note about a restriction being
> enforced?  Seems like it's dependent on both CONFIG_TI_PVU and some DT
> properties, and since those are invisible in the log, maybe a note
> would help understand/debug any issues?

This is what you will see when there are reserved region and PVU in 
play:

keystone-pcie 5600000.pcie: assigned reserved memory node restricted-dma@c0000000
ti-pvu 30f80000.iommu: created TLB entry 0.2: 0xc0000000, psize 4 (0x02000000)
ti-pvu 30f80000.iommu: created TLB entry 0.3: 0xc2000000, psize 4 (0x02000000)
...
ath9k 0000:01:00.0: assigned reserved memory node restricted-dma@c0000000

> 
>> +		}
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static void ks_release_restricted_dma(struct platform_device *pdev)
>> +{
>> +	struct of_phandle_iterator it;
>> +	struct resource phys;
>> +	int err;
>> +
>> +	of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
>> +			    NULL, 0) {
>> +		if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
>> +		    of_address_to_resource(it.node, 0, &phys) == 0)
>> +			ti_pvu_remove_region(KS_PCI_VIRTID, &phys);
> 
> I guess it's not important to undo the PCIE_VMAP_xP_CTRL_EN and
> related setup that was done by ks_init_restricted_dma()?
> 

Right, I didn't find a reason to do that.

>> +	}
>> +}
>> +#else
>> +static inline int ks_init_restricted_dma(struct platform_device *pdev)
>> +{
>> +	return 0;
>> +}
>> +
>> +static inline void ks_release_restricted_dma(struct platform_device *pdev)
>> +{
>> +}
>> +#endif
>> +
>>  static int ks_pcie_probe(struct platform_device *pdev)
>>  {
>>  	const struct dw_pcie_host_ops *host_ops;
>> @@ -1273,6 +1368,10 @@ static int ks_pcie_probe(struct platform_device *pdev)
>>  	if (ret < 0)
>>  		goto err_get_sync;
>>  
>> +	ret = ks_init_restricted_dma(pdev);
>> +	if (ret < 0)
>> +		goto err_get_sync;
>> +
>>  	switch (mode) {
>>  	case DW_PCIE_RC_TYPE:
>>  		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
>> @@ -1354,6 +1453,8 @@ static void ks_pcie_remove(struct platform_device *pdev)
>>  	int num_lanes = ks_pcie->num_lanes;
>>  	struct device *dev = &pdev->dev;
>>  
>> +	ks_release_restricted_dma(pdev);
>> +
>>  	pm_runtime_put(dev);
>>  	pm_runtime_disable(dev);
>>  	ks_pcie_disable_phy(ks_pcie);
>> -- 
>> 2.43.0
>>

Thanks,
Jan
Bjorn Helgaas Sept. 5, 2024, 7:16 p.m. UTC | #3
On Thu, Sep 05, 2024 at 09:07:36PM +0200, Jan Kiszka wrote:
> On 05.09.24 18:33, Bjorn Helgaas wrote:
> > [+cc Kishon, just in case you have time/interest ;)]
> > 
> > On Wed, Sep 04, 2024 at 12:00:13PM +0200, Jan Kiszka wrote:
> >> From: Jan Kiszka <jan.kiszka@siemens.com>
> >>
> >> The AM654 lacks an IOMMU, thus does not support isolating DMA requests
> >> from untrusted PCI devices to selected memory regions this way. Use
> >> static PVU-based protection instead.
> >>
> >> For this, we use the availability of restricted-dma-pool memory regions
> >> as trigger and register those as valid DMA targets with the PVU.
> > 
> > I guess the implication is that DMA *outside* the restricted-dma-pool
> > just gets dropped, and the Requester would see Completion Timeouts or
> > something for reads?
> 
> I cannot tell what happens on the PCI bus in that case, maybe someone 
> from TI can help out.
> 
> On the host side, the PVU will record an error and raise an interrupt 
> which will make the driver report that to the kernel log. That's quite 
> similar to what IOMMU drivers do on translation faults.

The main thing is that the DMA doesn't complete, as you mentioned
below.

> > Since there's no explicit use of "restricted-dma-pool" elsewhere in
> > this patch, I assume the setup above causes the controller to drop any
> > DMA accesses outside that pool?  I think a comment about how the
> > controller behavior is being changed would be useful.  Basically the
> > same comment as for the commit log.
> 
> Right, this is what will happen. Will add some comment.
> 
> > Would there be any value in a dmesg note about a restriction being
> > enforced?  Seems like it's dependent on both CONFIG_TI_PVU and some DT
> > properties, and since those are invisible in the log, maybe a note
> > would help understand/debug any issues?
> 
> This is what you will see when there are reserved region and PVU in 
> play:
> 
> keystone-pcie 5600000.pcie: assigned reserved memory node restricted-dma@c0000000
> ti-pvu 30f80000.iommu: created TLB entry 0.2: 0xc0000000, psize 4 (0x02000000)
> ti-pvu 30f80000.iommu: created TLB entry 0.3: 0xc2000000, psize 4 (0x02000000)
> ...
> ath9k 0000:01:00.0: assigned reserved memory node restricted-dma@c0000000

Looks reasonable and solves my concern.

> >> +	of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
> >> +			    NULL, 0) {
> >> +		if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
> >> +		    of_address_to_resource(it.node, 0, &phys) == 0)
> >> +			ti_pvu_remove_region(KS_PCI_VIRTID, &phys);
> > 
> > I guess it's not important to undo the PCIE_VMAP_xP_CTRL_EN and
> > related setup that was done by ks_init_restricted_dma()?
> > 
> 
> Right, I didn't find a reason to do that.

OK, as long as you considered it :)

Bjorn
Jan Kiszka Sept. 6, 2024, 6:24 a.m. UTC | #4
On 05.09.24 21:07, Jan Kiszka wrote:
> On 05.09.24 18:33, Bjorn Helgaas wrote:
>>> +			res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
>>> +							   ks_vmap_res[n]);
>>
>> Seems like we should check "res" for error before using it?
> 
> Oh, unfinished constructions.

In fact, devm_pci_remap_cfg_resource takes care of res == NULL.

Jan
diff mbox series

Patch

diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 2219b1a866fa..96b871656da4 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -19,6 +19,7 @@ 
 #include <linux/mfd/syscon.h>
 #include <linux/msi.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include <linux/phy/phy.h>
@@ -26,6 +27,7 @@ 
 #include <linux/regmap.h>
 #include <linux/resource.h>
 #include <linux/signal.h>
+#include <linux/ti-pvu.h>
 
 #include "../../pci.h"
 #include "pcie-designware.h"
@@ -111,6 +113,16 @@ 
 
 #define PCI_DEVICE_ID_TI_AM654X		0xb00c
 
+#define KS_PCI_VIRTID			0
+
+#define PCIE_VMAP_xP_CTRL		0x0
+#define PCIE_VMAP_xP_REQID		0x4
+#define PCIE_VMAP_xP_VIRTID		0x8
+
+#define PCIE_VMAP_xP_CTRL_EN		BIT(0)
+
+#define PCIE_VMAP_xP_VIRTID_VID_MASK	0xfff
+
 struct ks_pcie_of_data {
 	enum dw_pcie_device_mode mode;
 	const struct dw_pcie_host_ops *host_ops;
@@ -1125,6 +1137,89 @@  static const struct of_device_id ks_pcie_of_match[] = {
 	{ },
 };
 
+#ifdef CONFIG_TI_PVU
+static const char *ks_vmap_res[] = {"vmap_lp", "vmap_hp"};
+
+static int ks_init_restricted_dma(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct of_phandle_iterator it;
+	bool init_vmap = false;
+	struct resource phys;
+	struct resource *res;
+	void __iomem *base;
+	unsigned int n;
+	u32 val;
+	int err;
+
+	of_for_each_phandle(&it, err, dev->of_node, "memory-region",
+			    NULL, 0) {
+		if (!of_device_is_compatible(it.node, "restricted-dma-pool"))
+			continue;
+
+		err = of_address_to_resource(it.node, 0, &phys);
+		if (err < 0) {
+			dev_err(dev, "failed to parse memory region %pOF: %d\n",
+				it.node, err);
+			continue;
+		}
+
+		err = ti_pvu_create_region(KS_PCI_VIRTID, &phys);
+		if (err < 0)
+			return err;
+
+		init_vmap = true;
+	}
+
+	if (init_vmap) {
+		for (n = 0; n < ARRAY_SIZE(ks_vmap_res); n++) {
+			res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							   ks_vmap_res[n]);
+			base = devm_pci_remap_cfg_resource(dev, res);
+			if (IS_ERR(base))
+				return PTR_ERR(base);
+
+			writel(0, base + PCIE_VMAP_xP_REQID);
+
+			val = readl(base + PCIE_VMAP_xP_VIRTID);
+			val &= ~PCIE_VMAP_xP_VIRTID_VID_MASK;
+			val |= KS_PCI_VIRTID;
+			writel(val, base + PCIE_VMAP_xP_VIRTID);
+
+			val = readl(base + PCIE_VMAP_xP_CTRL);
+			val |= PCIE_VMAP_xP_CTRL_EN;
+			writel(val, base + PCIE_VMAP_xP_CTRL);
+		}
+	}
+
+	return 0;
+}
+
+static void ks_release_restricted_dma(struct platform_device *pdev)
+{
+	struct of_phandle_iterator it;
+	struct resource phys;
+	int err;
+
+	of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
+			    NULL, 0) {
+		if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
+		    of_address_to_resource(it.node, 0, &phys) == 0)
+			ti_pvu_remove_region(KS_PCI_VIRTID, &phys);
+
+	}
+}
+#else
+static inline int ks_init_restricted_dma(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline void ks_release_restricted_dma(struct platform_device *pdev)
+{
+}
+#endif
+
 static int ks_pcie_probe(struct platform_device *pdev)
 {
 	const struct dw_pcie_host_ops *host_ops;
@@ -1273,6 +1368,10 @@  static int ks_pcie_probe(struct platform_device *pdev)
 	if (ret < 0)
 		goto err_get_sync;
 
+	ret = ks_init_restricted_dma(pdev);
+	if (ret < 0)
+		goto err_get_sync;
+
 	switch (mode) {
 	case DW_PCIE_RC_TYPE:
 		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
@@ -1354,6 +1453,8 @@  static void ks_pcie_remove(struct platform_device *pdev)
 	int num_lanes = ks_pcie->num_lanes;
 	struct device *dev = &pdev->dev;
 
+	ks_release_restricted_dma(pdev);
+
 	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 	ks_pcie_disable_phy(ks_pcie);