@@ -14,6 +14,16 @@ config PCIE_DW_EP
bool
select PCIE_DW
+config PCIE_AMD_MDB
+ bool "AMD PCIe controller (host mode)"
+ depends on OF || COMPILE_TEST
+ depends on PCI && PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here to enable PCIe controller support on AMD SoCs. The
+ PCIe controller is based on DesignWare Hardware and uses AMD
+ hardware wrappers.
+
config PCIE_AL
bool "Amazon Annapurna Labs PCIe controller"
depends on OF && (ARM64 || COMPILE_TEST)
@@ -3,6 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
new file mode 100644
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+
+#define AMD_MDB_PCIE_IDRN_SHIFT 16
+
+/* Interrupt registers definitions */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @mdb_base: MDB System Level Control and Status Register(SLCR) Base
+ * @intx_domain: Legacy IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *mdb_base;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static inline u32 pcie_read(struct amd_mdb_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mdb_base + reg);
+}
+
+static inline void pcie_write(struct amd_mdb_pcie *pcie,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mdb_base + reg);
+}
+
+static inline struct amd_mdb_pcie *get_mdb_pcie(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = container_of(pp, struct dw_pcie, pp);
+
+ return container_of(pci, struct amd_mdb_pcie, pci);
+}
+
+static void amd_mdb_mask_leg_irq(struct irq_data *data)
+{
+ struct dw_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct amd_mdb_pcie *pcie;
+ unsigned long flags;
+ u32 mask, val;
+
+ pcie = get_mdb_pcie(port);
+
+ mask = BIT(data->hwirq + AMD_MDB_PCIE_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+
+ val = pcie_read(pcie, AMD_MDB_TLP_IR_STATUS_MISC);
+ pcie_write(pcie, (val & (~mask)), AMD_MDB_TLP_IR_STATUS_MISC);
+
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_unmask_leg_irq(struct irq_data *data)
+{
+ struct dw_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct amd_mdb_pcie *pcie;
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ pcie = get_mdb_pcie(port);
+
+ mask = BIT(data->hwirq + AMD_MDB_PCIE_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+
+ val = pcie_read(pcie, AMD_MDB_TLP_IR_STATUS_MISC);
+ pcie_write(pcie, (val | mask), AMD_MDB_TLP_IR_STATUS_MISC);
+
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_leg_irq_chip = {
+ .name = "INTx",
+ .irq_mask = amd_mdb_mask_leg_irq,
+ .irq_unmask = amd_mdb_unmask_leg_irq,
+};
+
+/**
+ * amd_mdb_pcie_rp_intx_map - Set the handler for the INTx and mark IRQ
+ * as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int amd_mdb_pcie_rp_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_leg_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_rp_intx_map,
+};
+
+/**
+ * amd_mdb_pcie_rp_init_port - Initialize hardware
+ * @pcie: PCIe port information
+ * @pdev: platform device
+ */
+static int amd_mdb_pcie_rp_init_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ int val;
+
+ /* Disable all TLP Interrupts */
+ pcie_write(pcie, pcie_read(pcie, AMD_MDB_TLP_IR_ENABLE_MISC) &
+ ~AMD_MDB_PCIE_IMR_ALL_MASK,
+ AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ /* Clear pending TLP interrupts */
+ pcie_write(pcie, pcie_read(pcie, AMD_MDB_TLP_IR_STATUS_MISC) &
+ AMD_MDB_PCIE_IMR_ALL_MASK,
+ AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP Interrupts */
+ val = pcie_read(pcie, AMD_MDB_TLP_IR_ENABLE_MISC);
+ pcie_write(pcie, (val | AMD_MDB_PCIE_IMR_ALL_MASK),
+ AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+static irqreturn_t amd_mdb_pcie_rp_event_flow(int irq, void *args)
+{
+ struct dw_pcie_rp *port = args;
+ struct amd_mdb_pcie *pcie;
+ unsigned long val;
+ int i;
+
+ pcie = get_mdb_pcie(port);
+
+ val = pcie_read(pcie, AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~pcie_read(pcie, AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ pcie_write(pcie, val, AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_mask_event_irq(struct irq_data *d)
+{
+ struct dw_pcie_rp *port = irq_data_get_irq_chip_data(d);
+ struct amd_mdb_pcie *pcie;
+ u32 val;
+
+ pcie = get_mdb_pcie(port);
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(pcie, AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~BIT(d->hwirq);
+ pcie_write(pcie, val, AMD_MDB_TLP_IR_STATUS_MISC);
+ raw_spin_unlock(&port->lock);
+}
+
+static void amd_mdb_unmask_event_irq(struct irq_data *d)
+{
+ struct dw_pcie_rp *port = irq_data_get_irq_chip_data(d);
+ struct amd_mdb_pcie *pcie;
+ u32 val;
+
+ pcie = get_mdb_pcie(port);
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(pcie, AMD_MDB_TLP_IR_STATUS_MISC);
+ val |= BIT(d->hwirq);
+ pcie_write(pcie, val, AMD_MDB_TLP_IR_STATUS_MISC);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "RC-Event",
+ .irq_mask = amd_mdb_mask_event_irq,
+ .irq_unmask = amd_mdb_unmask_event_irq,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+/**
+ * amd_mdb_pcie_rp_init_irq_domain - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: platform device
+ * Return: '0' on success and error value on failure
+ */
+static int amd_mdb_pcie_rp_init_irq_domain(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -EINVAL;
+ }
+
+ pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32,
+ &event_domain_ops,
+ pp);
+ if (!pcie->mdb_domain)
+ goto out;
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &amd_intx_domain_ops, pp);
+ if (!pcie->intx_domain)
+ goto mdb_out;
+
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ dev_err(dev, "Failed to allocate IRQ domains\n");
+
+ return -ENOMEM;
+}
+
+static irqreturn_t amd_mdb_pcie_rp_intr_handler(int irq, void *dev_id)
+{
+ struct dw_pcie_rp *port = dev_id;
+ struct amd_mdb_pcie *pcie;
+ struct device *dev;
+ struct irq_data *d;
+
+ pcie = get_mdb_pcie(port);
+ dev = pcie->pci.dev;
+
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map mdb domain interrupt\n");
+ return -ENXIO;
+ }
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_rp_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ intr_cause[i].sym, pp);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+ return err;
+ }
+ }
+
+ /* Plug the main event chained handler */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_rp_event_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, "pcie_irq", pp);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d\n", pp->irq);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ pcie->mdb_base = devm_platform_ioremap_resource_byname(pdev, "mdb_pcie_slcr");
+ if (IS_ERR(pcie->mdb_base))
+ return PTR_ERR(pcie->mdb_base);
+
+ ret = amd_mdb_pcie_rp_init_irq_domain(pcie, pdev);
+ if (ret)
+ return ret;
+
+ amd_mdb_pcie_rp_init_port(pcie, pdev);
+
+ ret = amd_mdb_setup_irq(pcie, pdev);
+ if (ret) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto out;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return ret;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+builtin_platform_driver(amd_mdb_pcie_driver);
Add support for AMD MDB(Multimedia DMA Bridge) IP core as Root Port. The Versal2 devices include MDB Module. The integrated block for MDB along with the integrated bridge can function as PCIe Root Port controller at Gen5 speed. Bridge error and legacy interrupts in Versal2 MDB are handled using Versal2 MDB specific interrupt line. Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com> --- drivers/pci/controller/dwc/Kconfig | 10 + drivers/pci/controller/dwc/Makefile | 1 + drivers/pci/controller/dwc/pcie-amd-mdb.c | 455 ++++++++++++++++++++++ 3 files changed, 466 insertions(+) create mode 100644 drivers/pci/controller/dwc/pcie-amd-mdb.c