diff mbox series

[v6,4/7] PCI: keystone: Add support for PVU-based DMA isolation on AM654

Message ID f6ea60ec075e981a9b587b42baec33649e3f3918.1725901439.git.jan.kiszka@siemens.com (mailing list archive)
State New, archived
Headers show
Series soc: ti: Add and use PVU on K3-AM65 for DMA isolation | expand

Commit Message

Jan Kiszka Sept. 9, 2024, 5:03 p.m. UTC
From: Jan Kiszka <jan.kiszka@siemens.com>

The AM654 lacks an IOMMU, thus does not support isolating DMA requests
from untrusted PCI devices to selected memory regions this way. Use
static PVU-based protection instead. The PVU, when enabled, will only
accept DMA requests that address previously configured regions.

Use the availability of a restricted-dma-pool memory region as trigger
and register it as valid DMA target with the PVU. In addition, enable
the mapping of requester IDs to VirtIDs in the PCI RC. Use only a single
VirtID so far, catching all devices. This may be extended later on.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
CC: Lorenzo Pieralisi <lpieralisi@kernel.org>
CC: "Krzysztof Wilczyński" <kw@linux.com>
CC: Bjorn Helgaas <bhelgaas@google.com>
CC: linux-pci@vger.kernel.org
---
 drivers/pci/controller/dwc/pci-keystone.c | 108 ++++++++++++++++++++++
 1 file changed, 108 insertions(+)

Comments

Bjorn Helgaas Oct. 30, 2024, 8:57 p.m. UTC | #1
On Mon, Sep 09, 2024 at 07:03:57PM +0200, Jan Kiszka wrote:
> From: Jan Kiszka <jan.kiszka@siemens.com>
> 
> The AM654 lacks an IOMMU, thus does not support isolating DMA requests
> from untrusted PCI devices to selected memory regions this way. Use
> static PVU-based protection instead. The PVU, when enabled, will only
> accept DMA requests that address previously configured regions.
> 
> Use the availability of a restricted-dma-pool memory region as trigger
> and register it as valid DMA target with the PVU. In addition, enable
> the mapping of requester IDs to VirtIDs in the PCI RC. Use only a single
> VirtID so far, catching all devices. This may be extended later on.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>

Acked-by: Bjorn Helgaas <bhelgaas@google.com>

> +	/* Only process the first restricted dma pool, more are not allowed */

s/dma/DMA/ to match other usage.
Vignesh Raghavendra Nov. 3, 2024, 6:15 a.m. UTC | #2
On 09/09/24 22:33, Jan Kiszka wrote:
> From: Jan Kiszka <jan.kiszka@siemens.com>
> 
> The AM654 lacks an IOMMU, thus does not support isolating DMA requests
> from untrusted PCI devices to selected memory regions this way. Use
> static PVU-based protection instead. The PVU, when enabled, will only
> accept DMA requests that address previously configured regions.
> 
> Use the availability of a restricted-dma-pool memory region as trigger
> and register it as valid DMA target with the PVU. In addition, enable
> the mapping of requester IDs to VirtIDs in the PCI RC. Use only a single
> VirtID so far, catching all devices. This may be extended later on.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> CC: Lorenzo Pieralisi <lpieralisi@kernel.org>
> CC: "Krzysztof Wilczyński" <kw@linux.com>
> CC: Bjorn Helgaas <bhelgaas@google.com>
> CC: linux-pci@vger.kernel.org
> ---
>  drivers/pci/controller/dwc/pci-keystone.c | 108 ++++++++++++++++++++++
>  1 file changed, 108 insertions(+)
> 
> diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
> index 2219b1a866fa..a5954cae6d5d 100644
> --- a/drivers/pci/controller/dwc/pci-keystone.c
> +++ b/drivers/pci/controller/dwc/pci-keystone.c
> @@ -19,6 +19,7 @@
>  #include <linux/mfd/syscon.h>
>  #include <linux/msi.h>
>  #include <linux/of.h>
> +#include <linux/of_address.h>
>  #include <linux/of_irq.h>
>  #include <linux/of_pci.h>
>  #include <linux/phy/phy.h>
> @@ -26,6 +27,7 @@
>  #include <linux/regmap.h>
>  #include <linux/resource.h>
>  #include <linux/signal.h>
> +#include <linux/ti-pvu.h>
>  
>  #include "../../pci.h"
>  #include "pcie-designware.h"
> @@ -111,6 +113,16 @@
>  
>  #define PCI_DEVICE_ID_TI_AM654X		0xb00c
>  
> +#define KS_PCI_VIRTID			0
> +
> +#define PCIE_VMAP_xP_CTRL		0x0
> +#define PCIE_VMAP_xP_REQID		0x4
> +#define PCIE_VMAP_xP_VIRTID		0x8
> +
> +#define PCIE_VMAP_xP_CTRL_EN		BIT(0)
> +
> +#define PCIE_VMAP_xP_VIRTID_VID_MASK	0xfff
> +
>  struct ks_pcie_of_data {
>  	enum dw_pcie_device_mode mode;
>  	const struct dw_pcie_host_ops *host_ops;
> @@ -1125,6 +1137,96 @@ static const struct of_device_id ks_pcie_of_match[] = {
>  	{ },
>  };
>  
> +#ifdef CONFIG_TI_PVU
> +static int ks_init_vmap(struct platform_device *pdev, const char *vmap_name)
> +{
> +	struct resource *res;
> +	void __iomem *base;
> +	u32 val;
> +

Nit:

	if (!IS_ENABLED(CONFIG_TI_PVU))
		return 0;


this looks cleaner than #ifdef.. #else..#endif .


Rest LGTM

> +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, vmap_name);
> +	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
> +	if (IS_ERR(base))
> +		return PTR_ERR(base);
> +
> +	writel(0, base + PCIE_VMAP_xP_REQID);
> +
> +	val = readl(base + PCIE_VMAP_xP_VIRTID);
> +	val &= ~PCIE_VMAP_xP_VIRTID_VID_MASK;
> +	val |= KS_PCI_VIRTID;
> +	writel(val, base + PCIE_VMAP_xP_VIRTID);
> +
> +	val = readl(base + PCIE_VMAP_xP_CTRL);
> +	val |= PCIE_VMAP_xP_CTRL_EN;
> +	writel(val, base + PCIE_VMAP_xP_CTRL);
> +
> +	return 0;
> +}
> +
> +static int ks_init_restricted_dma(struct platform_device *pdev)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct of_phandle_iterator it;
> +	struct resource phys;
> +	int err;
> +
> +	/* Only process the first restricted dma pool, more are not allowed */
> +	of_for_each_phandle(&it, err, dev->of_node, "memory-region",
> +			    NULL, 0) {
> +		if (of_device_is_compatible(it.node, "restricted-dma-pool"))
> +			break;
> +	}
> +	if (err)
> +		return err == -ENOENT ? 0 : err;
> +
> +	err = of_address_to_resource(it.node, 0, &phys);
> +	if (err < 0) {
> +		dev_err(dev, "failed to parse memory region %pOF: %d\n",
> +			it.node, err);
> +		return 0;
> +	}
> +
> +	/* Map all incoming requests on low and high prio port to virtID 0 */
> +	err = ks_init_vmap(pdev, "vmap_lp");
> +	if (err)
> +		return err;
> +	err = ks_init_vmap(pdev, "vmap_hp");
> +	if (err)
> +		return err;
> +
> +	/*
> +	 * Enforce DMA pool usage with the help of the PVU.
> +	 * Any request outside will be dropped and raise an error at the PVU.
> +	 */
> +	return ti_pvu_create_region(KS_PCI_VIRTID, &phys);
> +}
> +
> +static void ks_release_restricted_dma(struct platform_device *pdev)
> +{
> +	struct of_phandle_iterator it;
> +	struct resource phys;
> +	int err;
> +
> +	of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
> +			    NULL, 0) {
> +		if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
> +		    of_address_to_resource(it.node, 0, &phys) == 0) {
> +			ti_pvu_remove_region(KS_PCI_VIRTID, &phys);
> +			break;
> +		}
> +	}
> +}
> +#else
> +static inline int ks_init_restricted_dma(struct platform_device *pdev)
> +{
> +	return 0;
> +}
> +
> +static inline void ks_release_restricted_dma(struct platform_device *pdev)
> +{
> +}
> +#endif
> +
>  static int ks_pcie_probe(struct platform_device *pdev)
>  {
>  	const struct dw_pcie_host_ops *host_ops;
> @@ -1273,6 +1375,10 @@ static int ks_pcie_probe(struct platform_device *pdev)
>  	if (ret < 0)
>  		goto err_get_sync;
>  
> +	ret = ks_init_restricted_dma(pdev);
> +	if (ret < 0)
> +		goto err_get_sync;
> +
>  	switch (mode) {
>  	case DW_PCIE_RC_TYPE:
>  		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
> @@ -1354,6 +1460,8 @@ static void ks_pcie_remove(struct platform_device *pdev)
>  	int num_lanes = ks_pcie->num_lanes;
>  	struct device *dev = &pdev->dev;
>  
> +	ks_release_restricted_dma(pdev);
> +
>  	pm_runtime_put(dev);
>  	pm_runtime_disable(dev);
>  	ks_pcie_disable_phy(ks_pcie);
Jan Kiszka Nov. 3, 2024, 9:50 a.m. UTC | #3
On 03.11.24 07:15, Vignesh Raghavendra wrote:
> 
> 
> On 09/09/24 22:33, Jan Kiszka wrote:
>> From: Jan Kiszka <jan.kiszka@siemens.com>
>>
>> The AM654 lacks an IOMMU, thus does not support isolating DMA requests
>> from untrusted PCI devices to selected memory regions this way. Use
>> static PVU-based protection instead. The PVU, when enabled, will only
>> accept DMA requests that address previously configured regions.
>>
>> Use the availability of a restricted-dma-pool memory region as trigger
>> and register it as valid DMA target with the PVU. In addition, enable
>> the mapping of requester IDs to VirtIDs in the PCI RC. Use only a single
>> VirtID so far, catching all devices. This may be extended later on.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>> CC: Lorenzo Pieralisi <lpieralisi@kernel.org>
>> CC: "Krzysztof Wilczyński" <kw@linux.com>
>> CC: Bjorn Helgaas <bhelgaas@google.com>
>> CC: linux-pci@vger.kernel.org
>> ---
>>  drivers/pci/controller/dwc/pci-keystone.c | 108 ++++++++++++++++++++++
>>  1 file changed, 108 insertions(+)
>>
>> diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
>> index 2219b1a866fa..a5954cae6d5d 100644
>> --- a/drivers/pci/controller/dwc/pci-keystone.c
>> +++ b/drivers/pci/controller/dwc/pci-keystone.c
>> @@ -19,6 +19,7 @@
>>  #include <linux/mfd/syscon.h>
>>  #include <linux/msi.h>
>>  #include <linux/of.h>
>> +#include <linux/of_address.h>
>>  #include <linux/of_irq.h>
>>  #include <linux/of_pci.h>
>>  #include <linux/phy/phy.h>
>> @@ -26,6 +27,7 @@
>>  #include <linux/regmap.h>
>>  #include <linux/resource.h>
>>  #include <linux/signal.h>
>> +#include <linux/ti-pvu.h>
>>  
>>  #include "../../pci.h"
>>  #include "pcie-designware.h"
>> @@ -111,6 +113,16 @@
>>  
>>  #define PCI_DEVICE_ID_TI_AM654X		0xb00c
>>  
>> +#define KS_PCI_VIRTID			0
>> +
>> +#define PCIE_VMAP_xP_CTRL		0x0
>> +#define PCIE_VMAP_xP_REQID		0x4
>> +#define PCIE_VMAP_xP_VIRTID		0x8
>> +
>> +#define PCIE_VMAP_xP_CTRL_EN		BIT(0)
>> +
>> +#define PCIE_VMAP_xP_VIRTID_VID_MASK	0xfff
>> +
>>  struct ks_pcie_of_data {
>>  	enum dw_pcie_device_mode mode;
>>  	const struct dw_pcie_host_ops *host_ops;
>> @@ -1125,6 +1137,96 @@ static const struct of_device_id ks_pcie_of_match[] = {
>>  	{ },
>>  };
>>  
>> +#ifdef CONFIG_TI_PVU
>> +static int ks_init_vmap(struct platform_device *pdev, const char *vmap_name)
>> +{
>> +	struct resource *res;
>> +	void __iomem *base;
>> +	u32 val;
>> +
> 
> Nit:
> 
> 	if (!IS_ENABLED(CONFIG_TI_PVU))
> 		return 0;
> 
> 
> this looks cleaner than #ifdef.. #else..#endif .
> 

Can be done, but it would move the stubbing to include/linux/ti-pvu.h
and ti_pvu_create_region and ti_pvu_remove_region.

Jan

> 
> Rest LGTM
> 
>> +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, vmap_name);
>> +	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
>> +	if (IS_ERR(base))
>> +		return PTR_ERR(base);
>> +
>> +	writel(0, base + PCIE_VMAP_xP_REQID);
>> +
>> +	val = readl(base + PCIE_VMAP_xP_VIRTID);
>> +	val &= ~PCIE_VMAP_xP_VIRTID_VID_MASK;
>> +	val |= KS_PCI_VIRTID;
>> +	writel(val, base + PCIE_VMAP_xP_VIRTID);
>> +
>> +	val = readl(base + PCIE_VMAP_xP_CTRL);
>> +	val |= PCIE_VMAP_xP_CTRL_EN;
>> +	writel(val, base + PCIE_VMAP_xP_CTRL);
>> +
>> +	return 0;
>> +}
>> +
>> +static int ks_init_restricted_dma(struct platform_device *pdev)
>> +{
>> +	struct device *dev = &pdev->dev;
>> +	struct of_phandle_iterator it;
>> +	struct resource phys;
>> +	int err;
>> +
>> +	/* Only process the first restricted dma pool, more are not allowed */
>> +	of_for_each_phandle(&it, err, dev->of_node, "memory-region",
>> +			    NULL, 0) {
>> +		if (of_device_is_compatible(it.node, "restricted-dma-pool"))
>> +			break;
>> +	}
>> +	if (err)
>> +		return err == -ENOENT ? 0 : err;
>> +
>> +	err = of_address_to_resource(it.node, 0, &phys);
>> +	if (err < 0) {
>> +		dev_err(dev, "failed to parse memory region %pOF: %d\n",
>> +			it.node, err);
>> +		return 0;
>> +	}
>> +
>> +	/* Map all incoming requests on low and high prio port to virtID 0 */
>> +	err = ks_init_vmap(pdev, "vmap_lp");
>> +	if (err)
>> +		return err;
>> +	err = ks_init_vmap(pdev, "vmap_hp");
>> +	if (err)
>> +		return err;
>> +
>> +	/*
>> +	 * Enforce DMA pool usage with the help of the PVU.
>> +	 * Any request outside will be dropped and raise an error at the PVU.
>> +	 */
>> +	return ti_pvu_create_region(KS_PCI_VIRTID, &phys);
>> +}
>> +
>> +static void ks_release_restricted_dma(struct platform_device *pdev)
>> +{
>> +	struct of_phandle_iterator it;
>> +	struct resource phys;
>> +	int err;
>> +
>> +	of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
>> +			    NULL, 0) {
>> +		if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
>> +		    of_address_to_resource(it.node, 0, &phys) == 0) {
>> +			ti_pvu_remove_region(KS_PCI_VIRTID, &phys);
>> +			break;
>> +		}
>> +	}
>> +}
>> +#else
>> +static inline int ks_init_restricted_dma(struct platform_device *pdev)
>> +{
>> +	return 0;
>> +}
>> +
>> +static inline void ks_release_restricted_dma(struct platform_device *pdev)
>> +{
>> +}
>> +#endif
>> +
>>  static int ks_pcie_probe(struct platform_device *pdev)
>>  {
>>  	const struct dw_pcie_host_ops *host_ops;
>> @@ -1273,6 +1375,10 @@ static int ks_pcie_probe(struct platform_device *pdev)
>>  	if (ret < 0)
>>  		goto err_get_sync;
>>  
>> +	ret = ks_init_restricted_dma(pdev);
>> +	if (ret < 0)
>> +		goto err_get_sync;
>> +
>>  	switch (mode) {
>>  	case DW_PCIE_RC_TYPE:
>>  		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
>> @@ -1354,6 +1460,8 @@ static void ks_pcie_remove(struct platform_device *pdev)
>>  	int num_lanes = ks_pcie->num_lanes;
>>  	struct device *dev = &pdev->dev;
>>  
>> +	ks_release_restricted_dma(pdev);
>> +
>>  	pm_runtime_put(dev);
>>  	pm_runtime_disable(dev);
>>  	ks_pcie_disable_phy(ks_pcie);
>
diff mbox series

Patch

diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 2219b1a866fa..a5954cae6d5d 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -19,6 +19,7 @@ 
 #include <linux/mfd/syscon.h>
 #include <linux/msi.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include <linux/phy/phy.h>
@@ -26,6 +27,7 @@ 
 #include <linux/regmap.h>
 #include <linux/resource.h>
 #include <linux/signal.h>
+#include <linux/ti-pvu.h>
 
 #include "../../pci.h"
 #include "pcie-designware.h"
@@ -111,6 +113,16 @@ 
 
 #define PCI_DEVICE_ID_TI_AM654X		0xb00c
 
+#define KS_PCI_VIRTID			0
+
+#define PCIE_VMAP_xP_CTRL		0x0
+#define PCIE_VMAP_xP_REQID		0x4
+#define PCIE_VMAP_xP_VIRTID		0x8
+
+#define PCIE_VMAP_xP_CTRL_EN		BIT(0)
+
+#define PCIE_VMAP_xP_VIRTID_VID_MASK	0xfff
+
 struct ks_pcie_of_data {
 	enum dw_pcie_device_mode mode;
 	const struct dw_pcie_host_ops *host_ops;
@@ -1125,6 +1137,96 @@  static const struct of_device_id ks_pcie_of_match[] = {
 	{ },
 };
 
+#ifdef CONFIG_TI_PVU
+static int ks_init_vmap(struct platform_device *pdev, const char *vmap_name)
+{
+	struct resource *res;
+	void __iomem *base;
+	u32 val;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, vmap_name);
+	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	writel(0, base + PCIE_VMAP_xP_REQID);
+
+	val = readl(base + PCIE_VMAP_xP_VIRTID);
+	val &= ~PCIE_VMAP_xP_VIRTID_VID_MASK;
+	val |= KS_PCI_VIRTID;
+	writel(val, base + PCIE_VMAP_xP_VIRTID);
+
+	val = readl(base + PCIE_VMAP_xP_CTRL);
+	val |= PCIE_VMAP_xP_CTRL_EN;
+	writel(val, base + PCIE_VMAP_xP_CTRL);
+
+	return 0;
+}
+
+static int ks_init_restricted_dma(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct of_phandle_iterator it;
+	struct resource phys;
+	int err;
+
+	/* Only process the first restricted dma pool, more are not allowed */
+	of_for_each_phandle(&it, err, dev->of_node, "memory-region",
+			    NULL, 0) {
+		if (of_device_is_compatible(it.node, "restricted-dma-pool"))
+			break;
+	}
+	if (err)
+		return err == -ENOENT ? 0 : err;
+
+	err = of_address_to_resource(it.node, 0, &phys);
+	if (err < 0) {
+		dev_err(dev, "failed to parse memory region %pOF: %d\n",
+			it.node, err);
+		return 0;
+	}
+
+	/* Map all incoming requests on low and high prio port to virtID 0 */
+	err = ks_init_vmap(pdev, "vmap_lp");
+	if (err)
+		return err;
+	err = ks_init_vmap(pdev, "vmap_hp");
+	if (err)
+		return err;
+
+	/*
+	 * Enforce DMA pool usage with the help of the PVU.
+	 * Any request outside will be dropped and raise an error at the PVU.
+	 */
+	return ti_pvu_create_region(KS_PCI_VIRTID, &phys);
+}
+
+static void ks_release_restricted_dma(struct platform_device *pdev)
+{
+	struct of_phandle_iterator it;
+	struct resource phys;
+	int err;
+
+	of_for_each_phandle(&it, err, pdev->dev.of_node, "memory-region",
+			    NULL, 0) {
+		if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
+		    of_address_to_resource(it.node, 0, &phys) == 0) {
+			ti_pvu_remove_region(KS_PCI_VIRTID, &phys);
+			break;
+		}
+	}
+}
+#else
+static inline int ks_init_restricted_dma(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline void ks_release_restricted_dma(struct platform_device *pdev)
+{
+}
+#endif
+
 static int ks_pcie_probe(struct platform_device *pdev)
 {
 	const struct dw_pcie_host_ops *host_ops;
@@ -1273,6 +1375,10 @@  static int ks_pcie_probe(struct platform_device *pdev)
 	if (ret < 0)
 		goto err_get_sync;
 
+	ret = ks_init_restricted_dma(pdev);
+	if (ret < 0)
+		goto err_get_sync;
+
 	switch (mode) {
 	case DW_PCIE_RC_TYPE:
 		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
@@ -1354,6 +1460,8 @@  static void ks_pcie_remove(struct platform_device *pdev)
 	int num_lanes = ks_pcie->num_lanes;
 	struct device *dev = &pdev->dev;
 
+	ks_release_restricted_dma(pdev);
+
 	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 	ks_pcie_disable_phy(ks_pcie);