diff mbox series

[06/13] irqchip/armada-370-xp: Put static variables into driver private structure

Message ID 20240715105156.18388-7-kabel@kernel.org (mailing list archive)
State New, archived
Headers show
Series armada-370-xp irqchip updates round 5 | expand

Commit Message

Marek Behún July 15, 2024, 10:51 a.m. UTC
In preparation for converting the driver to modern style put all the
interrupt controller private static variables into driver private
structure.

Access to these variables changes as:
  main_int_base		mpic->base
  per_cpu_int_base	mpic->per_cpu
  mpic_domain		mpic->domain
  parent_irq		mpic->parent_irq
  ...

Signed-off-by: Marek Behún <kabel@kernel.org>
---
 drivers/irqchip/irq-armada-370-xp.c | 226 +++++++++++++++-------------
 1 file changed, 122 insertions(+), 104 deletions(-)

Comments

Thomas Gleixner July 28, 2024, 9:44 p.m. UTC | #1
On Mon, Jul 15 2024 at 12:51, Marek Behún wrote:
> +struct mpic {
> +	/* Register regions */
> +	void __iomem *base;
> +	void __iomem *per_cpu;
> +
> +	/* If MPIC is not top-level interrupt controller */
> +	int parent_irq;
> +
> +	/* Main IRQ domain */
> +	struct irq_domain *domain;
> +
> +#ifdef CONFIG_SMP
> +	/* IPI interrupts */
> +	struct irq_domain *ipi_domain;
> +#endif
> +
>  #ifdef CONFIG_PCI_MSI
> -static struct irq_domain *mpic_msi_domain;
> -static struct irq_domain *mpic_msi_inner_domain;
> -static DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
> -static DEFINE_MUTEX(msi_used_lock);
> -static phys_addr_t msi_doorbell_addr;
> +	/* MSI interrupts */
> +	struct irq_domain *msi_domain;
> +	struct irq_domain *msi_inner_domain;
> +	DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
> +	struct mutex msi_lock;
> +	phys_addr_t msi_doorbell_addr;
>  #endif
>  
> +	/* suspend & resume */
> +	u32 doorbell_mask;
> +};

Please see:

https://www.kernel.org/doc/html/latest/process/maintainer-tip.html#struct-declarations-and-initializers

Thanks,

        tglx
diff mbox series

Patch

diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 646a50d5724f..f688b038dc7b 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -66,11 +66,11 @@ 
  *
  * The "global interrupt mask/unmask" is modified using the
  * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE
- * registers, which are relative to "main_int_base".
+ * registers, which are relative to "mpic->base".
  *
  * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK
  * and MPIC_INT_CLEAR_MASK registers, which are relative to
- * "per_cpu_int_base". This base address points to a special address,
+ * "mpic->per_cpu". This base address points to a special address,
  * which automatically accesses the registers of the current CPU.
  *
  * The per-CPU mask/unmask can also be adjusted using the global
@@ -112,7 +112,7 @@ 
  *    at the per-CPU level.
  */
 
-/* Registers relative to main_int_base */
+/* Registers relative to mpic->base */
 #define MPIC_INT_CONTROL			0x00
 #define MPIC_INT_CONTROL_NUMINT_MASK		GENMASK(12, 2)
 #define MPIC_SW_TRIG_INT			0x04
@@ -122,7 +122,7 @@ 
 #define MPIC_INT_SOURCE_CPU_MASK		GENMASK(3, 0)
 #define MPIC_INT_IRQ_FIQ_MASK(cpuid)		((BIT(0) | BIT(8)) << (cpuid))
 
-/* Registers relative to per_cpu_int_base */
+/* Registers relative to mpic->per_cpu */
 #define MPIC_IN_DRBEL_CAUSE			0x08
 #define MPIC_IN_DRBEL_MASK			0x0c
 #define MPIC_PPI_CAUSE				0x10
@@ -149,19 +149,38 @@ 
 #define PCI_MSI_FULL_DOORBELL_SRC0_MASK		GENMASK(15, 0)
 #define PCI_MSI_FULL_DOORBELL_SRC1_MASK		GENMASK(31, 16)
 
-static void __iomem *per_cpu_int_base;
-static void __iomem *main_int_base;
-static struct irq_domain *mpic_domain;
-static u32 doorbell_mask_reg;
-static int parent_irq;
+struct mpic {
+	/* Register regions */
+	void __iomem *base;
+	void __iomem *per_cpu;
+
+	/* If MPIC is not top-level interrupt controller */
+	int parent_irq;
+
+	/* Main IRQ domain */
+	struct irq_domain *domain;
+
+#ifdef CONFIG_SMP
+	/* IPI interrupts */
+	struct irq_domain *ipi_domain;
+#endif
+
 #ifdef CONFIG_PCI_MSI
-static struct irq_domain *mpic_msi_domain;
-static struct irq_domain *mpic_msi_inner_domain;
-static DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
-static DEFINE_MUTEX(msi_used_lock);
-static phys_addr_t msi_doorbell_addr;
+	/* MSI interrupts */
+	struct irq_domain *msi_domain;
+	struct irq_domain *msi_inner_domain;
+	DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
+	struct mutex msi_lock;
+	phys_addr_t msi_doorbell_addr;
 #endif
 
+	/* suspend & resume */
+	u32 doorbell_mask;
+};
+
+static struct mpic mpic_data;
+static struct mpic * const mpic = &mpic_data;
+
 static inline bool mpic_is_ipi_available(void)
 {
 	/*
@@ -170,7 +189,7 @@  static inline bool mpic_is_ipi_available(void)
 	 * interrupt controller (e.g. GIC) that takes care of inter-processor
 	 * interrupts.
 	 */
-	return parent_irq <= 0;
+	return mpic->parent_irq <= 0;
 }
 
 static inline u32 msi_doorbell_mask(void)
@@ -206,9 +225,9 @@  static void mpic_irq_mask(struct irq_data *d)
 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
 	if (!mpic_is_percpu_irq(hwirq))
-		writel(hwirq, main_int_base + MPIC_INT_CLEAR_ENABLE);
+		writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE);
 	else
-		writel(hwirq, per_cpu_int_base + MPIC_INT_SET_MASK);
+		writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK);
 }
 
 static void mpic_irq_unmask(struct irq_data *d)
@@ -216,9 +235,9 @@  static void mpic_irq_unmask(struct irq_data *d)
 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
 	if (!mpic_is_percpu_irq(hwirq))
-		writel(hwirq, main_int_base + MPIC_INT_SET_ENABLE);
+		writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
 	else
-		writel(hwirq, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 }
 
 #ifdef CONFIG_PCI_MSI
@@ -239,8 +258,8 @@  static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
 {
 	unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
 
-	msg->address_lo = lower_32_bits(msi_doorbell_addr);
-	msg->address_hi = upper_32_bits(msi_doorbell_addr);
+	msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr);
+	msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr);
 	msg->data = BIT(cpu + 8) | (d->hwirq + msi_doorbell_start());
 }
 
@@ -273,10 +292,10 @@  static int mpic_msi_alloc(struct irq_domain *h, unsigned int virq,
 {
 	int hwirq;
 
-	mutex_lock(&msi_used_lock);
-	hwirq = bitmap_find_free_region(msi_used, msi_doorbell_size(),
+	mutex_lock(&mpic->msi_lock);
+	hwirq = bitmap_find_free_region(mpic->msi_used, msi_doorbell_size(),
 					order_base_2(nr_irqs));
-	mutex_unlock(&msi_used_lock);
+	mutex_unlock(&mpic->msi_lock);
 
 	if (hwirq < 0)
 		return -ENOSPC;
@@ -295,9 +314,9 @@  static void mpic_msi_free(struct irq_domain *h, unsigned int virq,
 {
 	struct irq_data *d = irq_domain_get_irq_data(h, virq);
 
-	mutex_lock(&msi_used_lock);
-	bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
-	mutex_unlock(&msi_used_lock);
+	mutex_lock(&mpic->msi_lock);
+	bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs));
+	mutex_unlock(&mpic->msi_lock);
 }
 
 static const struct irq_domain_ops mpic_msi_domain_ops = {
@@ -310,30 +329,32 @@  static void mpic_msi_reenable_percpu(void)
 	u32 reg;
 
 	/* Enable MSI doorbell mask and combined cpu local interrupt */
-	reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 	reg |= msi_doorbell_mask();
-	writel(reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	/* Unmask local doorbell interrupt */
-	writel(1, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+	writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 }
 
 static int __init mpic_msi_init(struct device_node *node,
 				phys_addr_t main_int_phys_base)
 {
-	msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT;
+	mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT;
 
-	mpic_msi_inner_domain = irq_domain_add_linear(NULL, msi_doorbell_size(),
-						      &mpic_msi_domain_ops,
-						      NULL);
-	if (!mpic_msi_inner_domain)
+	mutex_init(&mpic->msi_lock);
+
+	mpic->msi_inner_domain = irq_domain_add_linear(NULL, msi_doorbell_size(),
+						       &mpic_msi_domain_ops,
+						       NULL);
+	if (!mpic->msi_inner_domain)
 		return -ENOMEM;
 
-	mpic_msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
-						    &mpic_msi_domain_info,
-						    mpic_msi_inner_domain);
-	if (!mpic_msi_domain) {
-		irq_domain_remove(mpic_msi_inner_domain);
+	mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+						     &mpic_msi_domain_info,
+						     mpic->msi_inner_domain);
+	if (!mpic->msi_domain) {
+		irq_domain_remove(mpic->msi_inner_domain);
 		return -ENOMEM;
 	}
 
@@ -341,7 +362,7 @@  static int __init mpic_msi_init(struct device_node *node,
 
 	/* Unmask low 16 MSI irqs on non-IPI platforms */
 	if (!mpic_is_ipi_available())
-		writel(0, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 
 	return 0;
 }
@@ -369,29 +390,26 @@  static void mpic_perf_init(void)
 	cpuid = cpu_logical_map(smp_processor_id());
 
 	/* Enable Performance Counter Overflow interrupts */
-	writel(MPIC_INT_CAUSE_PERF(cpuid),
-	       per_cpu_int_base + MPIC_INT_FABRIC_MASK);
+	writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK);
 }
 
 #ifdef CONFIG_SMP
-static struct irq_domain *mpic_ipi_domain;
-
 static void mpic_ipi_mask(struct irq_data *d)
 {
 	u32 reg;
 
-	reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 	reg &= ~BIT(d->hwirq);
-	writel(reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 }
 
 static void mpic_ipi_unmask(struct irq_data *d)
 {
 	u32 reg;
 
-	reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 	reg |= BIT(d->hwirq);
-	writel(reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 }
 
 static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
@@ -410,12 +428,12 @@  static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
 	dsb();
 
 	/* submit softirq */
-	writel((map << 8) | d->hwirq, main_int_base + MPIC_SW_TRIG_INT);
+	writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT);
 }
 
 static void mpic_ipi_ack(struct irq_data *d)
 {
-	writel(~BIT(d->hwirq), per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 }
 
 static struct irq_chip mpic_ipi_irqchip = {
@@ -453,13 +471,13 @@  static const struct irq_domain_ops mpic_ipi_domain_ops = {
 static void mpic_ipi_resume(void)
 {
 	for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) {
-		unsigned int virq = irq_find_mapping(mpic_ipi_domain, i);
+		unsigned int virq = irq_find_mapping(mpic->ipi_domain, i);
 		struct irq_data *d;
 
 		if (!virq || !irq_percpu_is_enabled(virq))
 			continue;
 
-		d = irq_domain_get_irq_data(mpic_ipi_domain, virq);
+		d = irq_domain_get_irq_data(mpic->ipi_domain, virq);
 		mpic_ipi_unmask(d);
 	}
 }
@@ -468,14 +486,14 @@  static int __init mpic_ipi_init(struct device_node *node)
 {
 	int base_ipi;
 
-	mpic_ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
-						   IPI_DOORBELL_NR,
-						   &mpic_ipi_domain_ops, NULL);
-	if (WARN_ON(!mpic_ipi_domain))
+	mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+						    IPI_DOORBELL_NR,
+						    &mpic_ipi_domain_ops, NULL);
+	if (WARN_ON(!mpic->ipi_domain))
 		return -ENOMEM;
 
-	irq_domain_update_bus_token(mpic_ipi_domain, DOMAIN_BUS_IPI);
-	base_ipi = irq_domain_alloc_irqs(mpic_ipi_domain, IPI_DOORBELL_NR,
+	irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI);
+	base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR,
 					 NUMA_NO_NODE, NULL);
 	if (WARN_ON(!base_ipi))
 		return -ENOMEM;
@@ -494,7 +512,7 @@  static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 	/* Select a single core from the affinity mask which is online */
 	cpu = cpumask_any_and(mask_val, cpu_online_mask);
 
-	atomic_io_modify(main_int_base + MPIC_INT_SOURCE_CTL(hwirq),
+	atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq),
 			 MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu)));
 
 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -504,27 +522,27 @@  static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 
 static void mpic_smp_cpu_init(void)
 {
-	for (irq_hw_number_t i = 0; i < mpic_domain->hwirq_max; i++)
-		writel(i, per_cpu_int_base + MPIC_INT_SET_MASK);
+	for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++)
+		writel(i, mpic->per_cpu + MPIC_INT_SET_MASK);
 
 	if (!mpic_is_ipi_available())
 		return;
 
 	/* Disable all IPIs */
-	writel(0, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	/* Clear pending IPIs */
-	writel(0, per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 
 	/* Unmask IPI interrupt */
-	writel(0, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+	writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 }
 
 static void mpic_reenable_percpu(void)
 {
 	/* Re-enable per-CPU interrupts that were enabled before suspend */
 	for (irq_hw_number_t i = 0; i < MPIC_MAX_PER_CPU_IRQS; i++) {
-		unsigned int virq = irq_linear_revmap(mpic_domain, i);
+		unsigned int virq = irq_linear_revmap(mpic->domain, i);
 		struct irq_data *d;
 
 		if (!virq || !irq_percpu_is_enabled(virq))
@@ -553,7 +571,7 @@  static int mpic_cascaded_starting_cpu(unsigned int cpu)
 {
 	mpic_perf_init();
 	mpic_reenable_percpu();
-	enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+	enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE);
 
 	return 0;
 }
@@ -582,9 +600,9 @@  static int mpic_irq_map(struct irq_domain *h, unsigned int virq,
 
 	mpic_irq_mask(irq_get_irq_data(virq));
 	if (!mpic_is_percpu_irq(hwirq))
-		writel(hwirq, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 	else
-		writel(hwirq, main_int_base + MPIC_INT_SET_ENABLE);
+		writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
 	irq_set_status_flags(virq, IRQ_LEVEL);
 
 	if (mpic_is_percpu_irq(hwirq)) {
@@ -611,12 +629,12 @@  static void mpic_handle_msi_irq(void)
 	unsigned long cause;
 	unsigned int i;
 
-	cause = readl_relaxed(per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 	cause &= msi_doorbell_mask();
-	writel(~cause, per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 
 	for_each_set_bit(i, &cause, BITS_PER_LONG)
-		generic_handle_domain_irq(mpic_msi_inner_domain,
+		generic_handle_domain_irq(mpic->msi_inner_domain,
 					  i - msi_doorbell_start());
 }
 #else
@@ -629,11 +647,11 @@  static void mpic_handle_ipi_irq(void)
 	unsigned long cause;
 	irq_hw_number_t i;
 
-	cause = readl_relaxed(per_cpu_int_base + MPIC_IN_DRBEL_CAUSE);
+	cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
 	cause &= IPI_DOORBELL_MASK;
 
 	for_each_set_bit(i, &cause, IPI_DOORBELL_NR)
-		generic_handle_domain_irq(mpic_ipi_domain, i);
+		generic_handle_domain_irq(mpic->ipi_domain, i);
 }
 #else
 static inline void mpic_handle_ipi_irq(void) {}
@@ -648,11 +666,11 @@  static void mpic_handle_cascade_irq(struct irq_desc *desc)
 
 	chained_irq_enter(chip, desc);
 
-	cause = readl_relaxed(per_cpu_int_base + MPIC_PPI_CAUSE);
+	cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE);
 	cpuid = cpu_logical_map(smp_processor_id());
 
 	for_each_set_bit(i, &cause, BITS_PER_LONG) {
-		irqsrc = readl_relaxed(main_int_base + MPIC_INT_SOURCE_CTL(i));
+		irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i));
 
 		/* Check if the interrupt is not masked on current CPU.
 		 * Test IRQ (0-1) and FIQ (8-9) mask bits.
@@ -665,7 +683,7 @@  static void mpic_handle_cascade_irq(struct irq_desc *desc)
 			continue;
 		}
 
-		generic_handle_domain_irq(mpic_domain, i);
+		generic_handle_domain_irq(mpic->domain, i);
 	}
 
 	chained_irq_exit(chip, desc);
@@ -677,14 +695,14 @@  static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
 	u32 irqstat;
 
 	do {
-		irqstat = readl_relaxed(per_cpu_int_base + MPIC_CPU_INTACK);
+		irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK);
 		i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat);
 
 		if (i > 1022)
 			break;
 
 		if (i > 1)
-			generic_handle_domain_irq(mpic_domain, i);
+			generic_handle_domain_irq(mpic->domain, i);
 
 		/* MSI handling */
 		if (i == 1)
@@ -698,7 +716,7 @@  static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
 
 static int mpic_suspend(void)
 {
-	doorbell_mask_reg = readl(per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	return 0;
 }
@@ -708,8 +726,8 @@  static void mpic_resume(void)
 	bool src0, src1;
 
 	/* Re-enable interrupts */
-	for (irq_hw_number_t i = 0; i < mpic_domain->hwirq_max; i++) {
-		unsigned int virq = irq_linear_revmap(mpic_domain, i);
+	for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) {
+		unsigned int virq = irq_linear_revmap(mpic->domain, i);
 		struct irq_data *d;
 
 		if (!virq)
@@ -719,12 +737,12 @@  static void mpic_resume(void)
 
 		if (!mpic_is_percpu_irq(i)) {
 			/* Non per-CPU interrupts */
-			writel(i, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+			writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 			if (!irqd_irq_disabled(d))
 				mpic_irq_unmask(d);
 		} else {
 			/* Per-CPU interrupts */
-			writel(i, main_int_base + MPIC_INT_SET_ENABLE);
+			writel(i, mpic->base + MPIC_INT_SET_ENABLE);
 
 			/*
 			 * Re-enable on the current CPU, mpic_reenable_percpu()
@@ -736,20 +754,20 @@  static void mpic_resume(void)
 	}
 
 	/* Reconfigure doorbells for IPIs and MSIs */
-	writel(doorbell_mask_reg, per_cpu_int_base + MPIC_IN_DRBEL_MASK);
+	writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
 
 	if (mpic_is_ipi_available()) {
-		src0 = doorbell_mask_reg & IPI_DOORBELL_MASK;
-		src1 = doorbell_mask_reg & PCI_MSI_DOORBELL_MASK;
+		src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK;
+		src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK;
 	} else {
-		src0 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
-		src1 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
+		src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
+		src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
 	}
 
 	if (src0)
-		writel(0, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 	if (src1)
-		writel(1, per_cpu_int_base + MPIC_INT_CLEAR_MASK);
+		writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
 
 	if (mpic_is_ipi_available())
 		mpic_ipi_resume();
@@ -801,33 +819,33 @@  static int __init mpic_of_init(struct device_node *node,
 	unsigned int nr_irqs;
 	int err;
 
-	err = mpic_map_region(node, 0, &main_int_base, &phys_base);
+	err = mpic_map_region(node, 0, &mpic->base, &phys_base);
 	if (err)
 		return err;
 
-	err = mpic_map_region(node, 1, &per_cpu_int_base, NULL);
+	err = mpic_map_region(node, 1, &mpic->per_cpu, NULL);
 	if (err)
 		return err;
 
 	nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK,
-			    readl(main_int_base + MPIC_INT_CONTROL));
+			    readl(mpic->base + MPIC_INT_CONTROL));
 
 	for (irq_hw_number_t i = 0; i < nr_irqs; i++)
-		writel(i, main_int_base + MPIC_INT_CLEAR_ENABLE);
+		writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE);
 
-	mpic_domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, NULL);
-	if (!mpic_domain) {
+	mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, NULL);
+	if (!mpic->domain) {
 		pr_err("%pOF: Unable to add IRQ domain\n", node);
 		return -ENOMEM;
 	}
 
-	irq_domain_update_bus_token(mpic_domain, DOMAIN_BUS_WIRED);
+	irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED);
 
 	/*
-	 * Initialize parent_irq before calling any other functions, since it is
-	 * used to distinguish between IPI and non-IPI platforms.
+	 * Initialize mpic->parent_irq before calling any other functions, since
+	 * it is used to distinguish between IPI and non-IPI platforms.
 	 */
-	parent_irq = irq_of_parse_and_map(node, 0);
+	mpic->parent_irq = irq_of_parse_and_map(node, 0);
 
 	/* Setup for the boot CPU */
 	mpic_perf_init();
@@ -839,8 +857,8 @@  static int __init mpic_of_init(struct device_node *node,
 		return err;
 	}
 
-	if (parent_irq <= 0) {
-		irq_set_default_host(mpic_domain);
+	if (mpic->parent_irq <= 0) {
+		irq_set_default_host(mpic->domain);
 		set_handle_irq(mpic_handle_irq);
 #ifdef CONFIG_SMP
 		err = mpic_ipi_init(node);
@@ -859,7 +877,7 @@  static int __init mpic_of_init(struct device_node *node,
 					  "irqchip/armada/cascade:starting",
 					  mpic_cascaded_starting_cpu, NULL);
 #endif
-		irq_set_chained_handler(parent_irq, mpic_handle_cascade_irq);
+		irq_set_chained_handler(mpic->parent_irq, mpic_handle_cascade_irq);
 	}
 
 	register_syscore_ops(&mpic_syscore_ops);