@@ -378,7 +378,7 @@ static u32 local_events(struct mc_pcie *port)
return val;
}
-static u32 get_events(struct plda_pcie_rp *port)
+static u32 mc_get_events(struct plda_pcie_rp *port)
{
struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
u32 events = 0;
@@ -416,7 +416,7 @@ static void mc_handle_event(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- events = get_events(port);
+ events = port->event_ops->get_events(port);
for_each_set_bit(bit, &events, NUM_EVENTS)
generic_handle_domain_irq(port->event_domain, bit);
@@ -519,7 +519,7 @@ static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
return 0;
}
-static const struct irq_domain_ops event_domain_ops = {
+static const struct irq_domain_ops mc_event_domain_ops = {
.map = mc_pcie_event_map,
};
@@ -562,11 +562,13 @@ static int mc_pcie_init_clks(struct device *dev)
return 0;
}
-static int mc_pcie_init_irq_domains(struct plda_pcie_rp *port)
+static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port,
+ const struct plda_event *event)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
+ const struct irq_domain_ops *ops;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -575,8 +577,9 @@ static int mc_pcie_init_irq_domains(struct plda_pcie_rp *port)
return -EINVAL;
}
- port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
- &event_domain_ops, port);
+ ops = event->domain_ops ? event->domain_ops : &mc_event_domain_ops;
+ port->event_domain = irq_domain_add_linear(pcie_intc_node, port->num_events,
+ ops, port);
if (!port->event_domain) {
dev_err(dev, "failed to get event domain\n");
of_node_put(pcie_intc_node);
@@ -661,14 +664,40 @@ static void mc_disable_interrupts(struct mc_pcie *port)
writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
}
-static int mc_init_interrupts(struct platform_device *pdev, struct plda_pcie_rp *port)
+static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
+ int event)
+{
+ return devm_request_irq(plda->dev, event_irq, mc_event_handler,
+ 0, event_cause[event].sym, plda);
+}
+
+static const struct plda_event_ops mc_event_ops = {
+ .get_events = mc_get_events,
+};
+
+static const struct plda_event mc_event = {
+ .domain_ops = &mc_event_domain_ops,
+ .event_ops = &mc_event_ops,
+ .request_event_irq = mc_request_event_irq,
+ .intx_event = EVENT_LOCAL_PM_MSI_INT_INTX,
+ .msi_event = EVENT_LOCAL_PM_MSI_INT_MSI,
+};
+
+static int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event)
{
struct device *dev = &pdev->dev;
int irq;
int i, intx_irq, msi_irq, event_irq;
int ret;
- ret = mc_pcie_init_irq_domains(port);
+ if (!event->event_ops || !event->event_ops->get_events) {
+ dev_err(dev, "no get events ops\n");
+ return -EINVAL;
+ }
+
+ ret = plda_pcie_init_irq_domains(port, event);
if (ret) {
dev_err(dev, "failed creating IRQ domains\n");
return ret;
@@ -678,15 +707,20 @@ static int mc_init_interrupts(struct platform_device *pdev, struct plda_pcie_rp
if (irq < 0)
return -ENODEV;
- for (i = 0; i < NUM_EVENTS; i++) {
+ port->event_ops = event->event_ops;
+
+ for (i = 0; i < port->num_events; i++) {
event_irq = irq_create_mapping(port->event_domain, i);
if (!event_irq) {
dev_err(dev, "failed to map hwirq %d\n", i);
return -ENXIO;
}
- ret = devm_request_irq(dev, event_irq, mc_event_handler,
- 0, event_cause[i].sym, port);
+ if (event->request_event_irq)
+ ret = event->request_event_irq(port, event_irq, i);
+ else
+ ret = devm_request_irq(dev, event_irq, plda_event_handler,
+ 0, NULL, port);
if (ret) {
dev_err(dev, "failed to request IRQ %d\n", event_irq);
return ret;
@@ -694,7 +728,7 @@ static int mc_init_interrupts(struct platform_device *pdev, struct plda_pcie_rp
}
intx_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_INTX);
+ event->intx_event);
if (!intx_irq) {
dev_err(dev, "failed to map INTx interrupt\n");
return -ENXIO;
@@ -704,7 +738,7 @@ static int mc_init_interrupts(struct platform_device *pdev, struct plda_pcie_rp
irq_set_chained_handler_and_data(intx_irq, plda_handle_intx, port);
msi_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_MSI);
+ event->msi_event);
if (!msi_irq)
return -ENXIO;
@@ -740,7 +774,7 @@ static int mc_platform_init(struct pci_config_window *cfg)
return ret;
/* Address translation is up; safe to enable interrupts */
- ret = mc_init_interrupts(pdev, &port->plda);
+ ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
if (ret)
return ret;
@@ -761,6 +795,7 @@ static int mc_host_probe(struct platform_device *pdev)
plda = &port->plda;
plda->dev = dev;
+ plda->num_events = NUM_EVENTS;
port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(port->axi_base_addr))
@@ -125,10 +125,20 @@ struct plda_pcie_rp {
struct irq_domain *event_domain;
raw_spinlock_t lock;
struct plda_msi msi;
+ const struct plda_event_ops *event_ops;
void __iomem *bridge_addr;
int num_events;
};
+struct plda_event {
+ const struct irq_domain_ops *domain_ops;
+ const struct plda_event_ops *event_ops;
+ int (*request_event_irq)(struct plda_pcie_rp *pcie,
+ int event_irq, int event);
+ int intx_event;
+ int msi_event;
+};
+
void plda_handle_msi(struct irq_desc *desc);
int plda_allocate_msi_domains(struct plda_pcie_rp *port);
irqreturn_t plda_event_handler(int irq, void *dev_id);
Rename IRQ init function and prepare for re-use IRQ init function. Add plda_event structure for IRQ init register. rename list: mc_init_interrupts() -> plda_init_interrupts() mc_pcie_init_irq_domain()-> plda_pcie_init_irq_domains() mc_handle_event() -> plda_handle_event() get_events() -> mc_get_events() Signed-off-by: Minda Chen <minda.chen@starfivetech.com> --- .../pci/controller/plda/pcie-microchip-host.c | 63 ++++++++++++++----- drivers/pci/controller/plda/pcie-plda.h | 10 +++ 2 files changed, 59 insertions(+), 14 deletions(-)