diff mbox series

[2/6] irqchip/armada-370-xp: Implement SoC Error interrupts

Message ID 20220506134029.21470-3-pali@kernel.org (mailing list archive)
State New, archived
Headers show
Series PCI: mvebu: Add support for PME and AER interrupts | expand

Commit Message

Pali Rohár May 6, 2022, 1:40 p.m. UTC
MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
domain for accessing this IRQ hierarchy.

Signed-off-by: Pali Rohár <pali@kernel.org>
---
 drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
 1 file changed, 210 insertions(+), 3 deletions(-)

Comments

Marc Zyngier May 6, 2022, 6:19 p.m. UTC | #1
On Fri, 06 May 2022 14:40:25 +0100,
Pali Rohár <pali@kernel.org> wrote:
> 
> MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
> another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
> domain for accessing this IRQ hierarchy.
> 
> Signed-off-by: Pali Rohár <pali@kernel.org>
> ---
>  drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
>  1 file changed, 210 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
> index ebd76ea1c69b..71578b65f5c8 100644
> --- a/drivers/irqchip/irq-armada-370-xp.c
> +++ b/drivers/irqchip/irq-armada-370-xp.c
> @@ -117,6 +117,8 @@
>  /* Registers relative to main_int_base */
>  #define ARMADA_370_XP_INT_CONTROL		(0x00)
>  #define ARMADA_370_XP_SW_TRIG_INT_OFFS		(0x04)
> +#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS	(0x20)
> +#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS	(0x24)
>  #define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
>  #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
>  #define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
> @@ -130,6 +132,8 @@
>  #define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
>  #define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
>  #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
> +#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF	(0x50)
> +#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF	(0x54)
>  #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
>  #define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
>  
> @@ -146,6 +150,8 @@
>  static void __iomem *per_cpu_int_base;
>  static void __iomem *main_int_base;
>  static struct irq_domain *armada_370_xp_mpic_domain;
> +static struct irq_domain *armada_370_xp_soc_err_domain;
> +static unsigned int soc_err_irq_num_regs;
>  static u32 doorbell_mask_reg;
>  static int parent_irq;
>  #ifdef CONFIG_PCI_MSI
> @@ -156,6 +162,8 @@ static DEFINE_MUTEX(msi_used_lock);
>  static phys_addr_t msi_doorbell_addr;
>  #endif
>  
> +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> +
>  static inline bool is_percpu_irq(irq_hw_number_t irq)
>  {
>  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
>  		armada_370_xp_irq_unmask(data);
>  	}
>  
> +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> +		struct irq_data *data;
> +		int virq;
> +
> +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> +		if (virq == 0)
> +			continue;
> +
> +		data = irq_get_irq_data(virq);
> +
> +		if (!irq_percpu_is_enabled(virq))
> +			continue;
> +
> +		armada_370_xp_soc_err_irq_unmask(data);
> +	}

So you do this loop and all these lookups, both here and in the resume
function (duplicated code!) just to be able to call the unmask
function?  This would be better served by two straight writes of the
mask register, which you'd conveniently save on suspend.

Yes, you have only duplicated the existing logic. But surely there is
something better to do.

> +
> +	/* Unmask summary SoC Error Interrupt */
> +	if (soc_err_irq_num_regs > 0)
> +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> +
>  	ipi_resume();
>  }
>  
> @@ -546,8 +575,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
>  static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
>  				      unsigned int virq, irq_hw_number_t hw)
>  {
> -	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
> -	if (hw <= 1)
> +	/* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
> +	if (hw <= 1 || hw == 4)
>  		return -EINVAL;
>  
>  	armada_370_xp_irq_mask(irq_get_irq_data(virq));
> @@ -577,6 +606,99 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
>  	.xlate = irq_domain_xlate_onecell,
>  };
>  
> +static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
> +
> +static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
> +{
> +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> +	u32 reg, mask;
> +
> +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> +
> +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> +	mask = readl(per_cpu_int_base + reg);
> +	mask &= ~BIT(hwirq % 32);
> +	writel(mask, per_cpu_int_base + reg);
> +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> +}
> +
> +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
> +{
> +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> +	u32 reg, mask;
> +
> +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> +
> +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> +	mask = readl(per_cpu_int_base + reg);
> +	mask |= BIT(hwirq % 32);
> +	writel(mask, per_cpu_int_base + reg);
> +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> +}
> +
> +static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
> +{
> +	struct irq_data *d = par;
> +	armada_370_xp_soc_err_irq_mask(d);
> +	return 0;
> +}
> +
> +static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
> +{
> +	struct irq_data *d = par;
> +	armada_370_xp_soc_err_irq_unmask(d);
> +	return 0;
> +}
> +
> +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> +					      const struct cpumask *mask,
> +					      bool force)
> +{
> +	unsigned int cpu;
> +
> +	cpus_read_lock();
> +
> +	/* First disable IRQ on all cores */
> +	for_each_online_cpu(cpu)
> +		smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> +
> +	/* Select a single core from the affinity mask which is online */
> +	cpu = cpumask_any_and(mask, cpu_online_mask);
> +	smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> +
> +	cpus_read_unlock();
> +
> +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
> +
> +	return IRQ_SET_MASK_OK;
> +}

Aren't these per-CPU interrupts anyway? What does it mean to set their
affinity? /me rolls eyes...

> +
> +static struct irq_chip armada_370_xp_soc_err_irq_chip = {
> +	.name = "MPIC SOC",
> +	.irq_mask = armada_370_xp_soc_err_irq_mask,
> +	.irq_unmask = armada_370_xp_soc_err_irq_unmask,
> +	.irq_set_affinity = armada_xp_soc_err_irq_set_affinity,
> +};
> +
> +static int armada_370_xp_soc_err_irq_map(struct irq_domain *h,
> +					 unsigned int virq, irq_hw_number_t hw)
> +{
> +	armada_370_xp_soc_err_irq_mask(irq_get_irq_data(virq));
> +	irq_set_status_flags(virq, IRQ_LEVEL);
> +	irq_set_percpu_devid(virq);
> +	irq_set_chip_and_handler(virq, &armada_370_xp_soc_err_irq_chip,
> +				 handle_percpu_devid_irq);
> +	irq_set_probe(virq);
> +	return 0;
> +}
> +
> +static const struct irq_domain_ops armada_370_xp_soc_err_irq_ops = {
> +	.map = armada_370_xp_soc_err_irq_map,
> +	.xlate = irq_domain_xlate_onecell,
> +};
> +
>  #ifdef CONFIG_PCI_MSI
>  static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
>  {
> @@ -605,6 +727,32 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
>  static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
>  #endif
>  
> +static void armada_370_xp_handle_soc_err_irq(void)
> +{
> +	unsigned long status, bit;
> +	u32 mask, cause;
> +
> +	if (soc_err_irq_num_regs < 1)
> +		return;
> +
> +	mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF);
> +	cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS);
> +	status = cause & mask;
> +
> +	for_each_set_bit(bit, &status, 32)
> +		generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit);
> +
> +	if (soc_err_irq_num_regs < 2)
> +		return;
> +
> +	mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF);
> +	cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS);
> +	status = cause & mask;
> +
> +	for_each_set_bit(bit, &status, 32)
> +		generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit + 32);
> +}
> +
>  static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
>  {
>  	struct irq_chip *chip = irq_desc_get_chip(desc);
> @@ -630,6 +778,11 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
>  			continue;
>  		}
>  
> +		if (irqn == 4) {
> +			armada_370_xp_handle_soc_err_irq();
> +			continue;
> +		}
> +
>  		generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
>  	}
>  
> @@ -649,7 +802,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
>  		if (irqnr > 1022)
>  			break;
>  
> -		if (irqnr > 1) {
> +		if (irqnr > 1 && irqnr != 4) {
>  			generic_handle_domain_irq(armada_370_xp_mpic_domain,
>  						  irqnr);
>  			continue;
> @@ -659,6 +812,10 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
>  		if (irqnr == 1)
>  			armada_370_xp_handle_msi_irq(regs, false);
>  
> +		/* SoC Error handling */
> +		if (irqnr == 4)
> +			armada_370_xp_handle_soc_err_irq();
> +
>  #ifdef CONFIG_SMP
>  		/* IPI Handling */
>  		if (irqnr == 0) {
> @@ -722,6 +879,26 @@ static void armada_370_xp_mpic_resume(void)
>  		}
>  	}
>  
> +	/* Re-enable per-CPU SoC Error interrupts */
> +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> +		struct irq_data *data;
> +		int virq;
> +
> +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> +		if (virq == 0)
> +			continue;
> +
> +		data = irq_get_irq_data(virq);
> +
> +		/*
> +		 * Re-enable on the current CPU,
> +		 * armada_xp_mpic_reenable_percpu() will take
> +		 * care of secondary CPUs when they come up.
> +		 */
> +		if (irq_percpu_is_enabled(virq))
> +			armada_370_xp_soc_err_irq_unmask(data);
> +	}

As I said above, this is duplicated code that should be replaced with
a simple write to the corresponding MMIO registers.

> +
>  	/* Reconfigure doorbells for IPIs and MSIs */
>  	writel(doorbell_mask_reg,
>  	       per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
> @@ -730,6 +907,10 @@ static void armada_370_xp_mpic_resume(void)
>  	if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
>  		writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
>  
> +	/* Unmask summary SoC Error Interrupt */
> +	if (soc_err_irq_num_regs > 0)
> +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);

Magic value?

Also, writing to this register tends to indicate that the whole thing
should really be a chained irqchip... Maybe that's overkill in this
instance, but the whole thing is rather oddly architected.

> +
>  	ipi_resume();
>  }
>  
> @@ -742,6 +923,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
>  					     struct device_node *parent)
>  {
>  	struct resource main_int_res, per_cpu_int_res;
> +	struct device_node *soc_err_node;
>  	int nr_irqs, i;
>  	u32 control;
>  
> @@ -775,12 +957,37 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
>  	BUG_ON(!armada_370_xp_mpic_domain);
>  	irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
>  
> +	soc_err_node = of_get_next_child(node, NULL);
> +	if (!soc_err_node) {
> +		pr_warn("Missing SoC Error Interrupt Controller node\n");
> +		pr_warn("Extended interrupts are not supported\n");
> +	} else {
> +		pr_info("Registering MPIC SoC Error Interrupt Controller\n");
> +		/*
> +		 * Armada 370 and XP have only 32 SoC Error IRQs in one register
> +		 * and other Armada platforms have 64 IRQs in two registers.
> +		 */
> +		soc_err_irq_num_regs =
> +			of_machine_is_compatible("marvell,armada-370-xp") ? 1 : 2;

Don't you have an actual compatible string for the interrupt
controller?  It seems odd to rely on the SoC name.

> +		armada_370_xp_soc_err_domain =
> +			irq_domain_add_hierarchy(armada_370_xp_mpic_domain, 0,
> +						 soc_err_irq_num_regs * 32,
> +						 soc_err_node,
> +						 &armada_370_xp_soc_err_irq_ops,
> +						 NULL);
> +		BUG_ON(!armada_370_xp_soc_err_domain);
> +	}
> +
>  	/* Setup for the boot CPU */
>  	armada_xp_mpic_perf_init();
>  	armada_xp_mpic_smp_cpu_init();
>  
>  	armada_370_xp_msi_init(node, main_int_res.start);
>  
> +	/* Unmask summary SoC Error Interrupt */
> +	if (soc_err_irq_num_regs > 0)
> +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> +

Magic value, duplicated this time?

>  	parent_irq = irq_of_parse_and_map(node, 0);
>  	if (parent_irq <= 0) {
>  		irq_set_default_host(armada_370_xp_mpic_domain);
> -- 
> 2.20.1
> 
> 

	M.
Pali Rohár May 6, 2022, 6:30 p.m. UTC | #2
On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> On Fri, 06 May 2022 14:40:25 +0100,
> Pali Rohár <pali@kernel.org> wrote:
> > 
> > MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
> > another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
> > domain for accessing this IRQ hierarchy.
> > 
> > Signed-off-by: Pali Rohár <pali@kernel.org>
> > ---
> >  drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
> >  1 file changed, 210 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
> > index ebd76ea1c69b..71578b65f5c8 100644
> > --- a/drivers/irqchip/irq-armada-370-xp.c
> > +++ b/drivers/irqchip/irq-armada-370-xp.c
> > @@ -117,6 +117,8 @@
> >  /* Registers relative to main_int_base */
> >  #define ARMADA_370_XP_INT_CONTROL		(0x00)
> >  #define ARMADA_370_XP_SW_TRIG_INT_OFFS		(0x04)
> > +#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS	(0x20)
> > +#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS	(0x24)
> >  #define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
> >  #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
> >  #define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
> > @@ -130,6 +132,8 @@
> >  #define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
> >  #define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
> >  #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
> > +#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF	(0x50)
> > +#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF	(0x54)
> >  #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
> >  #define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
> >  
> > @@ -146,6 +150,8 @@
> >  static void __iomem *per_cpu_int_base;
> >  static void __iomem *main_int_base;
> >  static struct irq_domain *armada_370_xp_mpic_domain;
> > +static struct irq_domain *armada_370_xp_soc_err_domain;
> > +static unsigned int soc_err_irq_num_regs;
> >  static u32 doorbell_mask_reg;
> >  static int parent_irq;
> >  #ifdef CONFIG_PCI_MSI
> > @@ -156,6 +162,8 @@ static DEFINE_MUTEX(msi_used_lock);
> >  static phys_addr_t msi_doorbell_addr;
> >  #endif
> >  
> > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > +
> >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> >  {
> >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> >  		armada_370_xp_irq_unmask(data);
> >  	}
> >  
> > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > +		struct irq_data *data;
> > +		int virq;
> > +
> > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > +		if (virq == 0)
> > +			continue;
> > +
> > +		data = irq_get_irq_data(virq);
> > +
> > +		if (!irq_percpu_is_enabled(virq))
> > +			continue;
> > +
> > +		armada_370_xp_soc_err_irq_unmask(data);
> > +	}
> 
> So you do this loop and all these lookups, both here and in the resume
> function (duplicated code!) just to be able to call the unmask
> function?  This would be better served by two straight writes of the
> mask register, which you'd conveniently save on suspend.
> 
> Yes, you have only duplicated the existing logic. But surely there is
> something better to do.

Yes, I just used existing logic.

I'm not rewriting driver or doing big refactor of it, as this is not in
the scope of the PCIe AER interrupt support.

> > +
> > +	/* Unmask summary SoC Error Interrupt */
> > +	if (soc_err_irq_num_regs > 0)
> > +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> > +
> >  	ipi_resume();
> >  }
> >  
> > @@ -546,8 +575,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
> >  static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
> >  				      unsigned int virq, irq_hw_number_t hw)
> >  {
> > -	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
> > -	if (hw <= 1)
> > +	/* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
> > +	if (hw <= 1 || hw == 4)
> >  		return -EINVAL;
> >  
> >  	armada_370_xp_irq_mask(irq_get_irq_data(virq));
> > @@ -577,6 +606,99 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
> >  	.xlate = irq_domain_xlate_onecell,
> >  };
> >  
> > +static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
> > +
> > +static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
> > +{
> > +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > +	u32 reg, mask;
> > +
> > +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > +
> > +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> > +	mask = readl(per_cpu_int_base + reg);
> > +	mask &= ~BIT(hwirq % 32);
> > +	writel(mask, per_cpu_int_base + reg);
> > +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > +}
> > +
> > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
> > +{
> > +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > +	u32 reg, mask;
> > +
> > +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > +
> > +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> > +	mask = readl(per_cpu_int_base + reg);
> > +	mask |= BIT(hwirq % 32);
> > +	writel(mask, per_cpu_int_base + reg);
> > +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > +}
> > +
> > +static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
> > +{
> > +	struct irq_data *d = par;
> > +	armada_370_xp_soc_err_irq_mask(d);
> > +	return 0;
> > +}
> > +
> > +static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
> > +{
> > +	struct irq_data *d = par;
> > +	armada_370_xp_soc_err_irq_unmask(d);
> > +	return 0;
> > +}
> > +
> > +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> > +					      const struct cpumask *mask,
> > +					      bool force)
> > +{
> > +	unsigned int cpu;
> > +
> > +	cpus_read_lock();
> > +
> > +	/* First disable IRQ on all cores */
> > +	for_each_online_cpu(cpu)
> > +		smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> > +
> > +	/* Select a single core from the affinity mask which is online */
> > +	cpu = cpumask_any_and(mask, cpu_online_mask);
> > +	smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> > +
> > +	cpus_read_unlock();
> > +
> > +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > +
> > +	return IRQ_SET_MASK_OK;
> > +}
> 
> Aren't these per-CPU interrupts anyway? What does it mean to set their
> affinity? /me rolls eyes...

Yes, they are per-CPU interrupts. But to mask or unmask particular
interrupt for specific CPU is possible only from that CPU. CPU 0 just
cannot move interrupt from CPU 0 to CPU 1. CPU 0 can only mask that
interrupt and CPU 1 has to unmask it.

> > +
> > +static struct irq_chip armada_370_xp_soc_err_irq_chip = {
> > +	.name = "MPIC SOC",
> > +	.irq_mask = armada_370_xp_soc_err_irq_mask,
> > +	.irq_unmask = armada_370_xp_soc_err_irq_unmask,
> > +	.irq_set_affinity = armada_xp_soc_err_irq_set_affinity,
> > +};
> > +
> > +static int armada_370_xp_soc_err_irq_map(struct irq_domain *h,
> > +					 unsigned int virq, irq_hw_number_t hw)
> > +{
> > +	armada_370_xp_soc_err_irq_mask(irq_get_irq_data(virq));
> > +	irq_set_status_flags(virq, IRQ_LEVEL);
> > +	irq_set_percpu_devid(virq);
> > +	irq_set_chip_and_handler(virq, &armada_370_xp_soc_err_irq_chip,
> > +				 handle_percpu_devid_irq);
> > +	irq_set_probe(virq);
> > +	return 0;
> > +}
> > +
> > +static const struct irq_domain_ops armada_370_xp_soc_err_irq_ops = {
> > +	.map = armada_370_xp_soc_err_irq_map,
> > +	.xlate = irq_domain_xlate_onecell,
> > +};
> > +
> >  #ifdef CONFIG_PCI_MSI
> >  static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
> >  {
> > @@ -605,6 +727,32 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
> >  static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
> >  #endif
> >  
> > +static void armada_370_xp_handle_soc_err_irq(void)
> > +{
> > +	unsigned long status, bit;
> > +	u32 mask, cause;
> > +
> > +	if (soc_err_irq_num_regs < 1)
> > +		return;
> > +
> > +	mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF);
> > +	cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS);
> > +	status = cause & mask;
> > +
> > +	for_each_set_bit(bit, &status, 32)
> > +		generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit);
> > +
> > +	if (soc_err_irq_num_regs < 2)
> > +		return;
> > +
> > +	mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF);
> > +	cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS);
> > +	status = cause & mask;
> > +
> > +	for_each_set_bit(bit, &status, 32)
> > +		generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit + 32);
> > +}
> > +
> >  static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
> >  {
> >  	struct irq_chip *chip = irq_desc_get_chip(desc);
> > @@ -630,6 +778,11 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
> >  			continue;
> >  		}
> >  
> > +		if (irqn == 4) {
> > +			armada_370_xp_handle_soc_err_irq();
> > +			continue;
> > +		}
> > +
> >  		generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
> >  	}
> >  
> > @@ -649,7 +802,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
> >  		if (irqnr > 1022)
> >  			break;
> >  
> > -		if (irqnr > 1) {
> > +		if (irqnr > 1 && irqnr != 4) {
> >  			generic_handle_domain_irq(armada_370_xp_mpic_domain,
> >  						  irqnr);
> >  			continue;
> > @@ -659,6 +812,10 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
> >  		if (irqnr == 1)
> >  			armada_370_xp_handle_msi_irq(regs, false);
> >  
> > +		/* SoC Error handling */
> > +		if (irqnr == 4)
> > +			armada_370_xp_handle_soc_err_irq();
> > +
> >  #ifdef CONFIG_SMP
> >  		/* IPI Handling */
> >  		if (irqnr == 0) {
> > @@ -722,6 +879,26 @@ static void armada_370_xp_mpic_resume(void)
> >  		}
> >  	}
> >  
> > +	/* Re-enable per-CPU SoC Error interrupts */
> > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > +		struct irq_data *data;
> > +		int virq;
> > +
> > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > +		if (virq == 0)
> > +			continue;
> > +
> > +		data = irq_get_irq_data(virq);
> > +
> > +		/*
> > +		 * Re-enable on the current CPU,
> > +		 * armada_xp_mpic_reenable_percpu() will take
> > +		 * care of secondary CPUs when they come up.
> > +		 */
> > +		if (irq_percpu_is_enabled(virq))
> > +			armada_370_xp_soc_err_irq_unmask(data);
> > +	}
> 
> As I said above, this is duplicated code that should be replaced with
> a simple write to the corresponding MMIO registers.
> 
> > +
> >  	/* Reconfigure doorbells for IPIs and MSIs */
> >  	writel(doorbell_mask_reg,
> >  	       per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
> > @@ -730,6 +907,10 @@ static void armada_370_xp_mpic_resume(void)
> >  	if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
> >  		writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> >  
> > +	/* Unmask summary SoC Error Interrupt */
> > +	if (soc_err_irq_num_regs > 0)
> > +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> 
> Magic value?
> 
> Also, writing to this register tends to indicate that the whole thing
> should really be a chained irqchip... Maybe that's overkill in this
> instance, but the whole thing is rather oddly architected.

I used exactly same code style like it is used for MSI doorbell
interrupts.

Yes, it looks like chained irqchip, but it is overkill.

> > +
> >  	ipi_resume();
> >  }
> >  
> > @@ -742,6 +923,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
> >  					     struct device_node *parent)
> >  {
> >  	struct resource main_int_res, per_cpu_int_res;
> > +	struct device_node *soc_err_node;
> >  	int nr_irqs, i;
> >  	u32 control;
> >  
> > @@ -775,12 +957,37 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
> >  	BUG_ON(!armada_370_xp_mpic_domain);
> >  	irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
> >  
> > +	soc_err_node = of_get_next_child(node, NULL);
> > +	if (!soc_err_node) {
> > +		pr_warn("Missing SoC Error Interrupt Controller node\n");
> > +		pr_warn("Extended interrupts are not supported\n");
> > +	} else {
> > +		pr_info("Registering MPIC SoC Error Interrupt Controller\n");
> > +		/*
> > +		 * Armada 370 and XP have only 32 SoC Error IRQs in one register
> > +		 * and other Armada platforms have 64 IRQs in two registers.
> > +		 */
> > +		soc_err_irq_num_regs =
> > +			of_machine_is_compatible("marvell,armada-370-xp") ? 1 : 2;
> 
> Don't you have an actual compatible string for the interrupt
> controller?  It seems odd to rely on the SoC name.

Compatible string is same for all those 32-bit Armada SoCs. So it cannot
be used to distinguish between XP and 385. That is why I used
of_machine_is_compatible.

> > +		armada_370_xp_soc_err_domain =
> > +			irq_domain_add_hierarchy(armada_370_xp_mpic_domain, 0,
> > +						 soc_err_irq_num_regs * 32,
> > +						 soc_err_node,
> > +						 &armada_370_xp_soc_err_irq_ops,
> > +						 NULL);
> > +		BUG_ON(!armada_370_xp_soc_err_domain);
> > +	}
> > +
> >  	/* Setup for the boot CPU */
> >  	armada_xp_mpic_perf_init();
> >  	armada_xp_mpic_smp_cpu_init();
> >  
> >  	armada_370_xp_msi_init(node, main_int_res.start);
> >  
> > +	/* Unmask summary SoC Error Interrupt */
> > +	if (soc_err_irq_num_regs > 0)
> > +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> > +
> 
> Magic value, duplicated this time?
> 
> >  	parent_irq = irq_of_parse_and_map(node, 0);
> >  	if (parent_irq <= 0) {
> >  		irq_set_default_host(armada_370_xp_mpic_domain);
> > -- 
> > 2.20.1
> > 
> > 
> 
> 	M.
> 
> -- 
> Without deviation from the norm, progress is not possible.
Marc Zyngier May 6, 2022, 6:47 p.m. UTC | #3
On Fri, 06 May 2022 19:30:51 +0100,
Pali Rohár <pali@kernel.org> wrote:
> 
> On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > On Fri, 06 May 2022 14:40:25 +0100,
> > Pali Rohár <pali@kernel.org> wrote:
> > > 
> > > MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
> > > another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
> > > domain for accessing this IRQ hierarchy.
> > > 
> > > Signed-off-by: Pali Rohár <pali@kernel.org>
> > > ---
> > >  drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
> > >  1 file changed, 210 insertions(+), 3 deletions(-)
> > > 
> > > diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
> > > index ebd76ea1c69b..71578b65f5c8 100644
> > > --- a/drivers/irqchip/irq-armada-370-xp.c
> > > +++ b/drivers/irqchip/irq-armada-370-xp.c
> > > @@ -117,6 +117,8 @@
> > >  /* Registers relative to main_int_base */
> > >  #define ARMADA_370_XP_INT_CONTROL		(0x00)
> > >  #define ARMADA_370_XP_SW_TRIG_INT_OFFS		(0x04)
> > > +#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS	(0x20)
> > > +#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS	(0x24)
> > >  #define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
> > >  #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
> > >  #define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
> > > @@ -130,6 +132,8 @@
> > >  #define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
> > >  #define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
> > >  #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
> > > +#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF	(0x50)
> > > +#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF	(0x54)
> > >  #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
> > >  #define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
> > >  
> > > @@ -146,6 +150,8 @@
> > >  static void __iomem *per_cpu_int_base;
> > >  static void __iomem *main_int_base;
> > >  static struct irq_domain *armada_370_xp_mpic_domain;
> > > +static struct irq_domain *armada_370_xp_soc_err_domain;
> > > +static unsigned int soc_err_irq_num_regs;
> > >  static u32 doorbell_mask_reg;
> > >  static int parent_irq;
> > >  #ifdef CONFIG_PCI_MSI
> > > @@ -156,6 +162,8 @@ static DEFINE_MUTEX(msi_used_lock);
> > >  static phys_addr_t msi_doorbell_addr;
> > >  #endif
> > >  
> > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > +
> > >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> > >  {
> > >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > >  		armada_370_xp_irq_unmask(data);
> > >  	}
> > >  
> > > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > +		struct irq_data *data;
> > > +		int virq;
> > > +
> > > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > +		if (virq == 0)
> > > +			continue;
> > > +
> > > +		data = irq_get_irq_data(virq);
> > > +
> > > +		if (!irq_percpu_is_enabled(virq))
> > > +			continue;
> > > +
> > > +		armada_370_xp_soc_err_irq_unmask(data);
> > > +	}
> > 
> > So you do this loop and all these lookups, both here and in the resume
> > function (duplicated code!) just to be able to call the unmask
> > function?  This would be better served by two straight writes of the
> > mask register, which you'd conveniently save on suspend.
> > 
> > Yes, you have only duplicated the existing logic. But surely there is
> > something better to do.
> 
> Yes, I just used existing logic.
> 
> I'm not rewriting driver or doing big refactor of it, as this is not in
> the scope of the PCIe AER interrupt support.

Fair enough. By the same logic, I'm not taking any change to the
driver until it is put in a better shape. Your call.

> > > +
> > > +	/* Unmask summary SoC Error Interrupt */
> > > +	if (soc_err_irq_num_regs > 0)
> > > +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> > > +
> > >  	ipi_resume();
> > >  }
> > >  
> > > @@ -546,8 +575,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
> > >  static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
> > >  				      unsigned int virq, irq_hw_number_t hw)
> > >  {
> > > -	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
> > > -	if (hw <= 1)
> > > +	/* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
> > > +	if (hw <= 1 || hw == 4)
> > >  		return -EINVAL;
> > >  
> > >  	armada_370_xp_irq_mask(irq_get_irq_data(virq));
> > > @@ -577,6 +606,99 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
> > >  	.xlate = irq_domain_xlate_onecell,
> > >  };
> > >  
> > > +static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
> > > +
> > > +static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
> > > +{
> > > +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > > +	u32 reg, mask;
> > > +
> > > +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > > +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > > +
> > > +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> > > +	mask = readl(per_cpu_int_base + reg);
> > > +	mask &= ~BIT(hwirq % 32);
> > > +	writel(mask, per_cpu_int_base + reg);
> > > +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > > +}
> > > +
> > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
> > > +{
> > > +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > > +	u32 reg, mask;
> > > +
> > > +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > > +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > > +
> > > +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> > > +	mask = readl(per_cpu_int_base + reg);
> > > +	mask |= BIT(hwirq % 32);
> > > +	writel(mask, per_cpu_int_base + reg);
> > > +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > > +}
> > > +
> > > +static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
> > > +{
> > > +	struct irq_data *d = par;
> > > +	armada_370_xp_soc_err_irq_mask(d);
> > > +	return 0;
> > > +}
> > > +
> > > +static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
> > > +{
> > > +	struct irq_data *d = par;
> > > +	armada_370_xp_soc_err_irq_unmask(d);
> > > +	return 0;
> > > +}
> > > +
> > > +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> > > +					      const struct cpumask *mask,
> > > +					      bool force)
> > > +{
> > > +	unsigned int cpu;
> > > +
> > > +	cpus_read_lock();
> > > +
> > > +	/* First disable IRQ on all cores */
> > > +	for_each_online_cpu(cpu)
> > > +		smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> > > +
> > > +	/* Select a single core from the affinity mask which is online */
> > > +	cpu = cpumask_any_and(mask, cpu_online_mask);
> > > +	smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> > > +
> > > +	cpus_read_unlock();
> > > +
> > > +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > > +
> > > +	return IRQ_SET_MASK_OK;
> > > +}
> > 
> > Aren't these per-CPU interrupts anyway? What does it mean to set their
> > affinity? /me rolls eyes...
> 
> Yes, they are per-CPU interrupts. But to mask or unmask particular
> interrupt for specific CPU is possible only from that CPU. CPU 0 just
> cannot move interrupt from CPU 0 to CPU 1. CPU 0 can only mask that
> interrupt and CPU 1 has to unmask it.

And that's no different form other per-CPU interrupts that have the
exact same requirements. NAK to this sort of hacks.

	M.
Pali Rohár May 6, 2022, 6:55 p.m. UTC | #4
On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> On Fri, 06 May 2022 19:30:51 +0100,
> Pali Rohár <pali@kernel.org> wrote:
> > 
> > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > On Fri, 06 May 2022 14:40:25 +0100,
> > > Pali Rohár <pali@kernel.org> wrote:
> > > > 
> > > > MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
> > > > another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
> > > > domain for accessing this IRQ hierarchy.
> > > > 
> > > > Signed-off-by: Pali Rohár <pali@kernel.org>
> > > > ---
> > > >  drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
> > > >  1 file changed, 210 insertions(+), 3 deletions(-)
> > > > 
> > > > diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
> > > > index ebd76ea1c69b..71578b65f5c8 100644
> > > > --- a/drivers/irqchip/irq-armada-370-xp.c
> > > > +++ b/drivers/irqchip/irq-armada-370-xp.c
> > > > @@ -117,6 +117,8 @@
> > > >  /* Registers relative to main_int_base */
> > > >  #define ARMADA_370_XP_INT_CONTROL		(0x00)
> > > >  #define ARMADA_370_XP_SW_TRIG_INT_OFFS		(0x04)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS	(0x20)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS	(0x24)
> > > >  #define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
> > > >  #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
> > > >  #define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
> > > > @@ -130,6 +132,8 @@
> > > >  #define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
> > > >  #define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
> > > >  #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF	(0x50)
> > > > +#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF	(0x54)
> > > >  #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
> > > >  #define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
> > > >  
> > > > @@ -146,6 +150,8 @@
> > > >  static void __iomem *per_cpu_int_base;
> > > >  static void __iomem *main_int_base;
> > > >  static struct irq_domain *armada_370_xp_mpic_domain;
> > > > +static struct irq_domain *armada_370_xp_soc_err_domain;
> > > > +static unsigned int soc_err_irq_num_regs;
> > > >  static u32 doorbell_mask_reg;
> > > >  static int parent_irq;
> > > >  #ifdef CONFIG_PCI_MSI
> > > > @@ -156,6 +162,8 @@ static DEFINE_MUTEX(msi_used_lock);
> > > >  static phys_addr_t msi_doorbell_addr;
> > > >  #endif
> > > >  
> > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > +
> > > >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > >  {
> > > >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > >  		armada_370_xp_irq_unmask(data);
> > > >  	}
> > > >  
> > > > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > +		struct irq_data *data;
> > > > +		int virq;
> > > > +
> > > > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > +		if (virq == 0)
> > > > +			continue;
> > > > +
> > > > +		data = irq_get_irq_data(virq);
> > > > +
> > > > +		if (!irq_percpu_is_enabled(virq))
> > > > +			continue;
> > > > +
> > > > +		armada_370_xp_soc_err_irq_unmask(data);
> > > > +	}
> > > 
> > > So you do this loop and all these lookups, both here and in the resume
> > > function (duplicated code!) just to be able to call the unmask
> > > function?  This would be better served by two straight writes of the
> > > mask register, which you'd conveniently save on suspend.
> > > 
> > > Yes, you have only duplicated the existing logic. But surely there is
> > > something better to do.
> > 
> > Yes, I just used existing logic.
> > 
> > I'm not rewriting driver or doing big refactor of it, as this is not in
> > the scope of the PCIe AER interrupt support.
> 
> Fair enough. By the same logic, I'm not taking any change to the
> driver until it is put in a better shape. Your call.

If you are maintainer of this code then it is expected from _you_ to
move the current code into _better shape_ as you wrote and expect. And
then show us exactly, how new changes in this driver should look like,
in examples.

> > > > +
> > > > +	/* Unmask summary SoC Error Interrupt */
> > > > +	if (soc_err_irq_num_regs > 0)
> > > > +		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
> > > > +
> > > >  	ipi_resume();
> > > >  }
> > > >  
> > > > @@ -546,8 +575,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
> > > >  static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
> > > >  				      unsigned int virq, irq_hw_number_t hw)
> > > >  {
> > > > -	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
> > > > -	if (hw <= 1)
> > > > +	/* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
> > > > +	if (hw <= 1 || hw == 4)
> > > >  		return -EINVAL;
> > > >  
> > > >  	armada_370_xp_irq_mask(irq_get_irq_data(virq));
> > > > @@ -577,6 +606,99 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
> > > >  	.xlate = irq_domain_xlate_onecell,
> > > >  };
> > > >  
> > > > +static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
> > > > +
> > > > +static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
> > > > +{
> > > > +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > > > +	u32 reg, mask;
> > > > +
> > > > +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > > > +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > > > +
> > > > +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> > > > +	mask = readl(per_cpu_int_base + reg);
> > > > +	mask &= ~BIT(hwirq % 32);
> > > > +	writel(mask, per_cpu_int_base + reg);
> > > > +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > > > +}
> > > > +
> > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
> > > > +{
> > > > +	irq_hw_number_t hwirq = irqd_to_hwirq(d);
> > > > +	u32 reg, mask;
> > > > +
> > > > +	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
> > > > +			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
> > > > +
> > > > +	raw_spin_lock(&armada_370_xp_soc_err_lock);
> > > > +	mask = readl(per_cpu_int_base + reg);
> > > > +	mask |= BIT(hwirq % 32);
> > > > +	writel(mask, per_cpu_int_base + reg);
> > > > +	raw_spin_unlock(&armada_370_xp_soc_err_lock);
> > > > +}
> > > > +
> > > > +static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
> > > > +{
> > > > +	struct irq_data *d = par;
> > > > +	armada_370_xp_soc_err_irq_mask(d);
> > > > +	return 0;
> > > > +}
> > > > +
> > > > +static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
> > > > +{
> > > > +	struct irq_data *d = par;
> > > > +	armada_370_xp_soc_err_irq_unmask(d);
> > > > +	return 0;
> > > > +}
> > > > +
> > > > +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> > > > +					      const struct cpumask *mask,
> > > > +					      bool force)
> > > > +{
> > > > +	unsigned int cpu;
> > > > +
> > > > +	cpus_read_lock();
> > > > +
> > > > +	/* First disable IRQ on all cores */
> > > > +	for_each_online_cpu(cpu)
> > > > +		smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> > > > +
> > > > +	/* Select a single core from the affinity mask which is online */
> > > > +	cpu = cpumask_any_and(mask, cpu_online_mask);
> > > > +	smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> > > > +
> > > > +	cpus_read_unlock();
> > > > +
> > > > +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > > > +
> > > > +	return IRQ_SET_MASK_OK;
> > > > +}
> > > 
> > > Aren't these per-CPU interrupts anyway? What does it mean to set their
> > > affinity? /me rolls eyes...
> > 
> > Yes, they are per-CPU interrupts. But to mask or unmask particular
> > interrupt for specific CPU is possible only from that CPU. CPU 0 just
> > cannot move interrupt from CPU 0 to CPU 1. CPU 0 can only mask that
> > interrupt and CPU 1 has to unmask it.
> 
> And that's no different form other per-CPU interrupts that have the
> exact same requirements. NAK to this sort of hacks.

You forgot to mention in your previous email how to do it, right? So we
are waiting...
Marc Zyngier May 7, 2022, 9:01 a.m. UTC | #5
On Fri, 06 May 2022 19:55:46 +0100,
Pali Rohár <pali@kernel.org> wrote:
> 
> On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> > On Fri, 06 May 2022 19:30:51 +0100,
> > Pali Rohár <pali@kernel.org> wrote:
> > > 
> > > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > > On Fri, 06 May 2022 14:40:25 +0100,
> > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > 
> > > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > > +
> > > > >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > >  {
> > > > >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > >  		armada_370_xp_irq_unmask(data);
> > > > >  	}
> > > > >  
> > > > > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > > +		struct irq_data *data;
> > > > > +		int virq;
> > > > > +
> > > > > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > > +		if (virq == 0)
> > > > > +			continue;
> > > > > +
> > > > > +		data = irq_get_irq_data(virq);
> > > > > +
> > > > > +		if (!irq_percpu_is_enabled(virq))
> > > > > +			continue;
> > > > > +
> > > > > +		armada_370_xp_soc_err_irq_unmask(data);
> > > > > +	}
> > > > 
> > > > So you do this loop and all these lookups, both here and in the resume
> > > > function (duplicated code!) just to be able to call the unmask
> > > > function?  This would be better served by two straight writes of the
> > > > mask register, which you'd conveniently save on suspend.
> > > > 
> > > > Yes, you have only duplicated the existing logic. But surely there is
> > > > something better to do.
> > > 
> > > Yes, I just used existing logic.
> > > 
> > > I'm not rewriting driver or doing big refactor of it, as this is not in
> > > the scope of the PCIe AER interrupt support.
> > 
> > Fair enough. By the same logic, I'm not taking any change to the
> > driver until it is put in a better shape. Your call.
> 
> If you are maintainer of this code then it is expected from _you_ to
> move the current code into _better shape_ as you wrote and expect. And
> then show us exactly, how new changes in this driver should look like,
> in examples.

Sorry, but that's not how this works. You are the one willing to
change a sub-par piece of code, you get to make it better. You
obviously have the means (the HW) and the incentive (these patches).
But you don't get to make something even more unmaintainable because
you're unwilling to do some extra work.

If you're unhappy with my position, that's fine. I suggest you take it
with Thomas, and maybe even Linus. As I suggested before, you can also
post a patch removing me as the irqchip maintainer. I'm sure that will
spark an interesting discussion.

> > > > > +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> > > > > +					      const struct cpumask *mask,
> > > > > +					      bool force)
> > > > > +{
> > > > > +	unsigned int cpu;
> > > > > +
> > > > > +	cpus_read_lock();
> > > > > +
> > > > > +	/* First disable IRQ on all cores */
> > > > > +	for_each_online_cpu(cpu)
> > > > > +		smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> > > > > +
> > > > > +	/* Select a single core from the affinity mask which is online */
> > > > > +	cpu = cpumask_any_and(mask, cpu_online_mask);
> > > > > +	smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> > > > > +
> > > > > +	cpus_read_unlock();
> > > > > +
> > > > > +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > > > > +
> > > > > +	return IRQ_SET_MASK_OK;
> > > > > +}
> > > > 
> > > > Aren't these per-CPU interrupts anyway? What does it mean to set their
> > > > affinity? /me rolls eyes...
> > > 
> > > Yes, they are per-CPU interrupts. But to mask or unmask particular
> > > interrupt for specific CPU is possible only from that CPU. CPU 0 just
> > > cannot move interrupt from CPU 0 to CPU 1. CPU 0 can only mask that
> > > interrupt and CPU 1 has to unmask it.
> > 
> > And that's no different form other per-CPU interrupts that have the
> > exact same requirements. NAK to this sort of hacks.
> 
> You forgot to mention in your previous email how to do it, right? So we
> are waiting...

I didn't forget. I explained that it should be handled just like any
other per-CPU interrupt. There is plenty of example of how to do that
in the tree (timers, for example), and if you had even looked at it,
you'd have seen that your approach most probably results in an
arbitrary pointer dereference on anything but CPU0 because the
requesting driver knows nothing about per-CPU interrupts.

But you're obviously trying to make a very different point here. I'll
let you play that game for as long as you want, no skin off my nose.
Maybe in the future, you'll be more interested in actively
collaborating on the kernel code instead of throwing your toys out of
the pram.

Thanks,

	M.
Pali Rohár May 7, 2022, 9:20 a.m. UTC | #6
On Saturday 07 May 2022 10:01:52 Marc Zyngier wrote:
> On Fri, 06 May 2022 19:55:46 +0100,
> Pali Rohár <pali@kernel.org> wrote:
> > 
> > On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> > > On Fri, 06 May 2022 19:30:51 +0100,
> > > Pali Rohár <pali@kernel.org> wrote:
> > > > 
> > > > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > > > On Fri, 06 May 2022 14:40:25 +0100,
> > > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > > 
> > > > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > > > +
> > > > > >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > > >  {
> > > > > >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > > >  		armada_370_xp_irq_unmask(data);
> > > > > >  	}
> > > > > >  
> > > > > > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > > > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > > > +		struct irq_data *data;
> > > > > > +		int virq;
> > > > > > +
> > > > > > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > > > +		if (virq == 0)
> > > > > > +			continue;
> > > > > > +
> > > > > > +		data = irq_get_irq_data(virq);
> > > > > > +
> > > > > > +		if (!irq_percpu_is_enabled(virq))
> > > > > > +			continue;
> > > > > > +
> > > > > > +		armada_370_xp_soc_err_irq_unmask(data);
> > > > > > +	}
> > > > > 
> > > > > So you do this loop and all these lookups, both here and in the resume
> > > > > function (duplicated code!) just to be able to call the unmask
> > > > > function?  This would be better served by two straight writes of the
> > > > > mask register, which you'd conveniently save on suspend.
> > > > > 
> > > > > Yes, you have only duplicated the existing logic. But surely there is
> > > > > something better to do.
> > > > 
> > > > Yes, I just used existing logic.
> > > > 
> > > > I'm not rewriting driver or doing big refactor of it, as this is not in
> > > > the scope of the PCIe AER interrupt support.
> > > 
> > > Fair enough. By the same logic, I'm not taking any change to the
> > > driver until it is put in a better shape. Your call.
> > 
> > If you are maintainer of this code then it is expected from _you_ to
> > move the current code into _better shape_ as you wrote and expect. And
> > then show us exactly, how new changes in this driver should look like,
> > in examples.
> 
> Sorry, but that's not how this works. You are the one willing to
> change a sub-par piece of code, you get to make it better. You
> obviously have the means (the HW) and the incentive (these patches).
> But you don't get to make something even more unmaintainable because
> you're unwilling to do some extra work.
> 
> If you're unhappy with my position, that's fine. I suggest you take it
> with Thomas, and maybe even Linus. As I suggested before, you can also
> post a patch removing me as the irqchip maintainer. I'm sure that will
> spark an interesting discussion.

You have already suggested it in email [1] but apparently you are _not_
maintainer of mvebu pci controller. get_maintainer.pl for part about
which you have talked in [1] says:

$ ./scripts/get_maintainer.pl -f drivers/pci/controller/pci-aardvark.c
Thomas Petazzoni <thomas.petazzoni@bootlin.com> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
"Pali Rohár" <pali@kernel.org> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> (supporter:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
Rob Herring <robh@kernel.org> (reviewer:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
"Krzysztof Wilczyński" <kw@linux.com> (reviewer:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
Bjorn Helgaas <bhelgaas@google.com> (supporter:PCI SUBSYSTEM)
linux-pci@vger.kernel.org (open list:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
linux-arm-kernel@lists.infradead.org (moderated list:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
linux-kernel@vger.kernel.org (open list)

So I do not have to remove anything, you are _not_ on that list.
On the other hand, Thomas Petazzoni is on this list...

> > > > > > +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> > > > > > +					      const struct cpumask *mask,
> > > > > > +					      bool force)
> > > > > > +{
> > > > > > +	unsigned int cpu;
> > > > > > +
> > > > > > +	cpus_read_lock();
> > > > > > +
> > > > > > +	/* First disable IRQ on all cores */
> > > > > > +	for_each_online_cpu(cpu)
> > > > > > +		smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> > > > > > +
> > > > > > +	/* Select a single core from the affinity mask which is online */
> > > > > > +	cpu = cpumask_any_and(mask, cpu_online_mask);
> > > > > > +	smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> > > > > > +
> > > > > > +	cpus_read_unlock();
> > > > > > +
> > > > > > +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > > > > > +
> > > > > > +	return IRQ_SET_MASK_OK;
> > > > > > +}
> > > > > 
> > > > > Aren't these per-CPU interrupts anyway? What does it mean to set their
> > > > > affinity? /me rolls eyes...
> > > > 
> > > > Yes, they are per-CPU interrupts. But to mask or unmask particular
> > > > interrupt for specific CPU is possible only from that CPU. CPU 0 just
> > > > cannot move interrupt from CPU 0 to CPU 1. CPU 0 can only mask that
> > > > interrupt and CPU 1 has to unmask it.
> > > 
> > > And that's no different form other per-CPU interrupts that have the
> > > exact same requirements. NAK to this sort of hacks.
> > 
> > You forgot to mention in your previous email how to do it, right? So we
> > are waiting...
> 
> I didn't forget. I explained that it should be handled just like any
> other per-CPU interrupt. There is plenty of example of how to do that
> in the tree (timers, for example), and if you had even looked at it,
> you'd have seen that your approach most probably results in an
> arbitrary pointer dereference on anything but CPU0 because the
> requesting driver knows nothing about per-CPU interrupts.
> 
> But you're obviously trying to make a very different point here. I'll
> let you play that game for as long as you want, no skin off my nose.
> Maybe in the future, you'll be more interested in actively
> collaborating on the kernel code instead of throwing your toys out of
> the pram.
> 
> Thanks,

The only _toy_ here is your broken mvebu board which your ego was unable
to fix, and you have put it into recycling pile [2] and since than for
months you are trying to reject every change or improvement in mvebu
drivers and trying to find out a way how to remove all mvebu code, like
if you were not able to fix your toy, then broke it also to all other
people. You have already expressed this, but I'm not going to search
emails more and find these your statements.

Sorry, I'm stopping here. This is just a prove that you are not
qualified in reviewing mvebu code.

[1] - https://lore.kernel.org/linux-pci/87mtk3tzum.wl-maz@kernel.org/
[2] - https://lore.kernel.org/linux-pci/87pmx1zjjt.wl-maz@kernel.org/
Marc Zyngier May 7, 2022, 9:42 a.m. UTC | #7
On Sat, 07 May 2022 10:20:54 +0100,
Pali Rohár <pali@kernel.org> wrote:
> 
> On Saturday 07 May 2022 10:01:52 Marc Zyngier wrote:
> > On Fri, 06 May 2022 19:55:46 +0100,
> > Pali Rohár <pali@kernel.org> wrote:
> > > 
> > > On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> > > > On Fri, 06 May 2022 19:30:51 +0100,
> > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > 
> > > > > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > > > > On Fri, 06 May 2022 14:40:25 +0100,
> > > > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > > > 
> > > > > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > > > > +
> > > > > > >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > > > >  {
> > > > > > >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > > > >  		armada_370_xp_irq_unmask(data);
> > > > > > >  	}
> > > > > > >  
> > > > > > > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > > > > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > > > > +		struct irq_data *data;
> > > > > > > +		int virq;
> > > > > > > +
> > > > > > > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > > > > +		if (virq == 0)
> > > > > > > +			continue;
> > > > > > > +
> > > > > > > +		data = irq_get_irq_data(virq);
> > > > > > > +
> > > > > > > +		if (!irq_percpu_is_enabled(virq))
> > > > > > > +			continue;
> > > > > > > +
> > > > > > > +		armada_370_xp_soc_err_irq_unmask(data);
> > > > > > > +	}
> > > > > > 
> > > > > > So you do this loop and all these lookups, both here and in the resume
> > > > > > function (duplicated code!) just to be able to call the unmask
> > > > > > function?  This would be better served by two straight writes of the
> > > > > > mask register, which you'd conveniently save on suspend.
> > > > > > 
> > > > > > Yes, you have only duplicated the existing logic. But surely there is
> > > > > > something better to do.
> > > > > 
> > > > > Yes, I just used existing logic.
> > > > > 
> > > > > I'm not rewriting driver or doing big refactor of it, as this is not in
> > > > > the scope of the PCIe AER interrupt support.
> > > > 
> > > > Fair enough. By the same logic, I'm not taking any change to the
> > > > driver until it is put in a better shape. Your call.
> > > 
> > > If you are maintainer of this code then it is expected from _you_ to
> > > move the current code into _better shape_ as you wrote and expect. And
> > > then show us exactly, how new changes in this driver should look like,
> > > in examples.
> > 
> > Sorry, but that's not how this works. You are the one willing to
> > change a sub-par piece of code, you get to make it better. You
> > obviously have the means (the HW) and the incentive (these patches).
> > But you don't get to make something even more unmaintainable because
> > you're unwilling to do some extra work.
> > 
> > If you're unhappy with my position, that's fine. I suggest you take it
> > with Thomas, and maybe even Linus. As I suggested before, you can also
> > post a patch removing me as the irqchip maintainer. I'm sure that will
> > spark an interesting discussion.
> 
> You have already suggested it in email [1] but apparently you are _not_
> maintainer of mvebu pci controller. get_maintainer.pl for part about
> which you have talked in [1] says:
> 
> $ ./scripts/get_maintainer.pl -f drivers/pci/controller/pci-aardvark.c

Remind me which file this patch is touching?

> The only _toy_ here is your broken mvebu board which your ego was unable
> to fix, and you have put it into recycling pile [2] and since than for
> months you are trying to reject every change or improvement in mvebu
> drivers and trying to find out a way how to remove all mvebu code, like
> if you were not able to fix your toy, then broke it also to all other
> people. You have already expressed this, but I'm not going to search
> emails more and find these your statements.

At this stage, this is pure paranoia. Do you think I am so emotionally
attached to HW purity that I would plot the annihilation of some ugly
platform?

> Sorry, I'm stopping here. This is just a prove that you are not
> qualified in reviewing mvebu code.

Happy not to have to review this code. Just stop Cc'ing me on your
patches, and don't expect me to merge any IRQ related patches coming
from you.

	M.
Pali Rohár May 7, 2022, 11:15 a.m. UTC | #8
On Saturday 07 May 2022 10:42:49 Marc Zyngier wrote:
> On Sat, 07 May 2022 10:20:54 +0100,
> Pali Rohár <pali@kernel.org> wrote:
> > 
> > On Saturday 07 May 2022 10:01:52 Marc Zyngier wrote:
> > > On Fri, 06 May 2022 19:55:46 +0100,
> > > Pali Rohár <pali@kernel.org> wrote:
> > > > 
> > > > On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> > > > > On Fri, 06 May 2022 19:30:51 +0100,
> > > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > > 
> > > > > > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > > > > > On Fri, 06 May 2022 14:40:25 +0100,
> > > > > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > > > > 
> > > > > > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > > > > > +
> > > > > > > >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > > > > >  {
> > > > > > > >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > > > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > > > > >  		armada_370_xp_irq_unmask(data);
> > > > > > > >  	}
> > > > > > > >  
> > > > > > > > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > > > > > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > > > > > +		struct irq_data *data;
> > > > > > > > +		int virq;
> > > > > > > > +
> > > > > > > > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > > > > > +		if (virq == 0)
> > > > > > > > +			continue;
> > > > > > > > +
> > > > > > > > +		data = irq_get_irq_data(virq);
> > > > > > > > +
> > > > > > > > +		if (!irq_percpu_is_enabled(virq))
> > > > > > > > +			continue;
> > > > > > > > +
> > > > > > > > +		armada_370_xp_soc_err_irq_unmask(data);
> > > > > > > > +	}
> > > > > > > 
> > > > > > > So you do this loop and all these lookups, both here and in the resume
> > > > > > > function (duplicated code!) just to be able to call the unmask
> > > > > > > function?  This would be better served by two straight writes of the
> > > > > > > mask register, which you'd conveniently save on suspend.
> > > > > > > 
> > > > > > > Yes, you have only duplicated the existing logic. But surely there is
> > > > > > > something better to do.
> > > > > > 
> > > > > > Yes, I just used existing logic.
> > > > > > 
> > > > > > I'm not rewriting driver or doing big refactor of it, as this is not in
> > > > > > the scope of the PCIe AER interrupt support.
> > > > > 
> > > > > Fair enough. By the same logic, I'm not taking any change to the
> > > > > driver until it is put in a better shape. Your call.
> > > > 
> > > > If you are maintainer of this code then it is expected from _you_ to
> > > > move the current code into _better shape_ as you wrote and expect. And
> > > > then show us exactly, how new changes in this driver should look like,
> > > > in examples.
> > > 
> > > Sorry, but that's not how this works. You are the one willing to
> > > change a sub-par piece of code, you get to make it better. You
> > > obviously have the means (the HW) and the incentive (these patches).
> > > But you don't get to make something even more unmaintainable because
> > > you're unwilling to do some extra work.
> > > 
> > > If you're unhappy with my position, that's fine. I suggest you take it
> > > with Thomas, and maybe even Linus. As I suggested before, you can also
> > > post a patch removing me as the irqchip maintainer. I'm sure that will
> > > spark an interesting discussion.
> > 
> > You have already suggested it in email [1] but apparently you are _not_
> > maintainer of mvebu pci controller. get_maintainer.pl for part about
> > which you have talked in [1] says:
> > 
> > $ ./scripts/get_maintainer.pl -f drivers/pci/controller/pci-aardvark.c
> 
> Remind me which file this patch is touching?

So read again what you have presented in the past, in the email to which
you have referenced. I sent link to that your email in previous email.

Or you absolutely incompetent and I should have remind also previous
email to which you wrote your reaction?

> > The only _toy_ here is your broken mvebu board which your ego was unable
> > to fix, and you have put it into recycling pile [2] and since than for
> > months you are trying to reject every change or improvement in mvebu
> > drivers and trying to find out a way how to remove all mvebu code, like
> > if you were not able to fix your toy, then broke it also to all other
> > people. You have already expressed this, but I'm not going to search
> > emails more and find these your statements.
> 
> At this stage, this is pure paranoia.

No, just pure reality of your behavior of what you are doing and what
you are saying.

> Do you think I am so emotionally
> attached to HW purity that I would plot the annihilation of some ugly
> platform?

I do not think. You personally, have presented this statement, and I'm
just reminding it to you like you have asked for it.

> > Sorry, I'm stopping here. This is just a prove that you are not
> > qualified in reviewing mvebu code.
> 
> Happy not to have to review this code.

You are doing it for more than one year. Are you happy with it? Seem
absolutely.

> Just stop Cc'ing me on your patches

As there no progress from your side, nor change of your behavior from
more than one year, I'm accepting this offer.

This is my last email to you and I'm stopping right now to read your
emails.

I'm not obligated to remind you everything what you are asking just
because you are lazy to find you what you have wrote in the past.

> and don't expect me to merge any IRQ related patches coming
> from you.
> 
> 	M.
> 
> -- 
> Without deviation from the norm, progress is not possible.
Thomas Gleixner May 9, 2022, 8:51 a.m. UTC | #9
Pali,

On Fri, May 06 2022 at 20:55, Pali Rohár wrote:
> On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
>> > I'm not rewriting driver or doing big refactor of it, as this is not in
>> > the scope of the PCIe AER interrupt support.
>>
>> Fair enough. By the same logic, I'm not taking any change to the
>> driver until it is put in a better shape. Your call.
>
> If you are maintainer of this code then it is expected from _you_ to
> move the current code into _better shape_ as you wrote and expect. And
> then show us exactly, how new changes in this driver should look like,
> in examples.

this is not how kernel development works.

Maintainers are not the servants who mop up the mess which random people
dump into the tree. They are gatekeepers and one of their duties is to
prevent that mess is created or existing mess is proliferated.

You are asking the maintainer to take your changes, deal with the
fallout and maintain them for a long time free of charge. So it's a very
reasonable request from a maintainer to ask for refactoring of existing
code before adding new functionality to it.

With such a request the refactoring becomes scope of your work, whether
you and your manager like it or not. If you don't want to do that extra
work, then don't expect maintainers to care about your fancy new
features.

Marc gave you very reasonable and consice directions how the code should
be reworked. He spent a lot of time explaining it to you. Again, free of
charge. Now you expect him to do your homework free of charge, so you
can get your feature merged? Nice try.

Thanks,

        Thomas
Rob Herring May 9, 2022, 11:12 p.m. UTC | #10
On Sat, May 07, 2022 at 11:20:54AM +0200, Pali Rohár wrote:
> On Saturday 07 May 2022 10:01:52 Marc Zyngier wrote:
> > On Fri, 06 May 2022 19:55:46 +0100,
> > Pali Rohár <pali@kernel.org> wrote:
> > > 
> > > On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> > > > On Fri, 06 May 2022 19:30:51 +0100,
> > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > 
> > > > > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > > > > On Fri, 06 May 2022 14:40:25 +0100,
> > > > > > Pali Rohár <pali@kernel.org> wrote:
> > > > > > > 
> > > > > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > > > > +
> > > > > > >  static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > > > >  {
> > > > > > >  	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > > > >  		armada_370_xp_irq_unmask(data);
> > > > > > >  	}
> > > > > > >  
> > > > > > > +	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > > > > +	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > > > > +		struct irq_data *data;
> > > > > > > +		int virq;
> > > > > > > +
> > > > > > > +		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > > > > +		if (virq == 0)
> > > > > > > +			continue;
> > > > > > > +
> > > > > > > +		data = irq_get_irq_data(virq);
> > > > > > > +
> > > > > > > +		if (!irq_percpu_is_enabled(virq))
> > > > > > > +			continue;
> > > > > > > +
> > > > > > > +		armada_370_xp_soc_err_irq_unmask(data);
> > > > > > > +	}
> > > > > > 
> > > > > > So you do this loop and all these lookups, both here and in the resume
> > > > > > function (duplicated code!) just to be able to call the unmask
> > > > > > function?  This would be better served by two straight writes of the
> > > > > > mask register, which you'd conveniently save on suspend.
> > > > > > 
> > > > > > Yes, you have only duplicated the existing logic. But surely there is
> > > > > > something better to do.
> > > > > 
> > > > > Yes, I just used existing logic.
> > > > > 
> > > > > I'm not rewriting driver or doing big refactor of it, as this is not in
> > > > > the scope of the PCIe AER interrupt support.
> > > > 
> > > > Fair enough. By the same logic, I'm not taking any change to the
> > > > driver until it is put in a better shape. Your call.
> > > 
> > > If you are maintainer of this code then it is expected from _you_ to
> > > move the current code into _better shape_ as you wrote and expect. And
> > > then show us exactly, how new changes in this driver should look like,
> > > in examples.
> > 
> > Sorry, but that's not how this works. You are the one willing to
> > change a sub-par piece of code, you get to make it better. You
> > obviously have the means (the HW) and the incentive (these patches).
> > But you don't get to make something even more unmaintainable because
> > you're unwilling to do some extra work.
> > 
> > If you're unhappy with my position, that's fine. I suggest you take it
> > with Thomas, and maybe even Linus. As I suggested before, you can also
> > post a patch removing me as the irqchip maintainer. I'm sure that will
> > spark an interesting discussion.
> 
> You have already suggested it in email [1] but apparently you are _not_
> maintainer of mvebu pci controller. get_maintainer.pl for part about
> which you have talked in [1] says:
> 
> $ ./scripts/get_maintainer.pl -f drivers/pci/controller/pci-aardvark.c
> Thomas Petazzoni <thomas.petazzoni@bootlin.com> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
> "Pali Rohár" <pali@kernel.org> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
> Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> (supporter:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
> Rob Herring <robh@kernel.org> (reviewer:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)

Please just refactor the code in question. You've wasted more time 
arguing about it than it would take to do. Having done a bit of PCI 
refactoring, I can tell you hardly anyone else does. I can barely even 
get comments/acks on refactoring until I break platforms (which happens 
a lot). Maintainers have no other leverage other than what Marc pointed 
out.

In any case, I think there's no way the PCI maintainers will take this 
as-is at this point.

Rob
diff mbox series

Patch

diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index ebd76ea1c69b..71578b65f5c8 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -117,6 +117,8 @@ 
 /* Registers relative to main_int_base */
 #define ARMADA_370_XP_INT_CONTROL		(0x00)
 #define ARMADA_370_XP_SW_TRIG_INT_OFFS		(0x04)
+#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS	(0x20)
+#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS	(0x24)
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
@@ -130,6 +132,8 @@ 
 #define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
 #define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
+#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF	(0x50)
+#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF	(0x54)
 #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
 #define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
 
@@ -146,6 +150,8 @@ 
 static void __iomem *per_cpu_int_base;
 static void __iomem *main_int_base;
 static struct irq_domain *armada_370_xp_mpic_domain;
+static struct irq_domain *armada_370_xp_soc_err_domain;
+static unsigned int soc_err_irq_num_regs;
 static u32 doorbell_mask_reg;
 static int parent_irq;
 #ifdef CONFIG_PCI_MSI
@@ -156,6 +162,8 @@  static DEFINE_MUTEX(msi_used_lock);
 static phys_addr_t msi_doorbell_addr;
 #endif
 
+static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
+
 static inline bool is_percpu_irq(irq_hw_number_t irq)
 {
 	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
@@ -509,6 +517,27 @@  static void armada_xp_mpic_reenable_percpu(void)
 		armada_370_xp_irq_unmask(data);
 	}
 
+	/* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
+	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
+		struct irq_data *data;
+		int virq;
+
+		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
+		if (virq == 0)
+			continue;
+
+		data = irq_get_irq_data(virq);
+
+		if (!irq_percpu_is_enabled(virq))
+			continue;
+
+		armada_370_xp_soc_err_irq_unmask(data);
+	}
+
+	/* Unmask summary SoC Error Interrupt */
+	if (soc_err_irq_num_regs > 0)
+		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
 	ipi_resume();
 }
 
@@ -546,8 +575,8 @@  static struct irq_chip armada_370_xp_irq_chip = {
 static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
 				      unsigned int virq, irq_hw_number_t hw)
 {
-	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
-	if (hw <= 1)
+	/* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
+	if (hw <= 1 || hw == 4)
 		return -EINVAL;
 
 	armada_370_xp_irq_mask(irq_get_irq_data(virq));
@@ -577,6 +606,99 @@  static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
 	.xlate = irq_domain_xlate_onecell,
 };
 
+static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
+
+static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	u32 reg, mask;
+
+	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
+			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
+
+	raw_spin_lock(&armada_370_xp_soc_err_lock);
+	mask = readl(per_cpu_int_base + reg);
+	mask &= ~BIT(hwirq % 32);
+	writel(mask, per_cpu_int_base + reg);
+	raw_spin_unlock(&armada_370_xp_soc_err_lock);
+}
+
+static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
+{
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	u32 reg, mask;
+
+	reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
+			  : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
+
+	raw_spin_lock(&armada_370_xp_soc_err_lock);
+	mask = readl(per_cpu_int_base + reg);
+	mask |= BIT(hwirq % 32);
+	writel(mask, per_cpu_int_base + reg);
+	raw_spin_unlock(&armada_370_xp_soc_err_lock);
+}
+
+static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
+{
+	struct irq_data *d = par;
+	armada_370_xp_soc_err_irq_mask(d);
+	return 0;
+}
+
+static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
+{
+	struct irq_data *d = par;
+	armada_370_xp_soc_err_irq_unmask(d);
+	return 0;
+}
+
+static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
+					      const struct cpumask *mask,
+					      bool force)
+{
+	unsigned int cpu;
+
+	cpus_read_lock();
+
+	/* First disable IRQ on all cores */
+	for_each_online_cpu(cpu)
+		smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
+
+	/* Select a single core from the affinity mask which is online */
+	cpu = cpumask_any_and(mask, cpu_online_mask);
+	smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
+
+	cpus_read_unlock();
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip armada_370_xp_soc_err_irq_chip = {
+	.name = "MPIC SOC",
+	.irq_mask = armada_370_xp_soc_err_irq_mask,
+	.irq_unmask = armada_370_xp_soc_err_irq_unmask,
+	.irq_set_affinity = armada_xp_soc_err_irq_set_affinity,
+};
+
+static int armada_370_xp_soc_err_irq_map(struct irq_domain *h,
+					 unsigned int virq, irq_hw_number_t hw)
+{
+	armada_370_xp_soc_err_irq_mask(irq_get_irq_data(virq));
+	irq_set_status_flags(virq, IRQ_LEVEL);
+	irq_set_percpu_devid(virq);
+	irq_set_chip_and_handler(virq, &armada_370_xp_soc_err_irq_chip,
+				 handle_percpu_devid_irq);
+	irq_set_probe(virq);
+	return 0;
+}
+
+static const struct irq_domain_ops armada_370_xp_soc_err_irq_ops = {
+	.map = armada_370_xp_soc_err_irq_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
 #ifdef CONFIG_PCI_MSI
 static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
 {
@@ -605,6 +727,32 @@  static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
 static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
 #endif
 
+static void armada_370_xp_handle_soc_err_irq(void)
+{
+	unsigned long status, bit;
+	u32 mask, cause;
+
+	if (soc_err_irq_num_regs < 1)
+		return;
+
+	mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF);
+	cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS);
+	status = cause & mask;
+
+	for_each_set_bit(bit, &status, 32)
+		generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit);
+
+	if (soc_err_irq_num_regs < 2)
+		return;
+
+	mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF);
+	cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS);
+	status = cause & mask;
+
+	for_each_set_bit(bit, &status, 32)
+		generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit + 32);
+}
+
 static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -630,6 +778,11 @@  static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
 			continue;
 		}
 
+		if (irqn == 4) {
+			armada_370_xp_handle_soc_err_irq();
+			continue;
+		}
+
 		generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
 	}
 
@@ -649,7 +802,7 @@  armada_370_xp_handle_irq(struct pt_regs *regs)
 		if (irqnr > 1022)
 			break;
 
-		if (irqnr > 1) {
+		if (irqnr > 1 && irqnr != 4) {
 			generic_handle_domain_irq(armada_370_xp_mpic_domain,
 						  irqnr);
 			continue;
@@ -659,6 +812,10 @@  armada_370_xp_handle_irq(struct pt_regs *regs)
 		if (irqnr == 1)
 			armada_370_xp_handle_msi_irq(regs, false);
 
+		/* SoC Error handling */
+		if (irqnr == 4)
+			armada_370_xp_handle_soc_err_irq();
+
 #ifdef CONFIG_SMP
 		/* IPI Handling */
 		if (irqnr == 0) {
@@ -722,6 +879,26 @@  static void armada_370_xp_mpic_resume(void)
 		}
 	}
 
+	/* Re-enable per-CPU SoC Error interrupts */
+	for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
+		struct irq_data *data;
+		int virq;
+
+		virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
+		if (virq == 0)
+			continue;
+
+		data = irq_get_irq_data(virq);
+
+		/*
+		 * Re-enable on the current CPU,
+		 * armada_xp_mpic_reenable_percpu() will take
+		 * care of secondary CPUs when they come up.
+		 */
+		if (irq_percpu_is_enabled(virq))
+			armada_370_xp_soc_err_irq_unmask(data);
+	}
+
 	/* Reconfigure doorbells for IPIs and MSIs */
 	writel(doorbell_mask_reg,
 	       per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
@@ -730,6 +907,10 @@  static void armada_370_xp_mpic_resume(void)
 	if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
 		writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
 
+	/* Unmask summary SoC Error Interrupt */
+	if (soc_err_irq_num_regs > 0)
+		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
 	ipi_resume();
 }
 
@@ -742,6 +923,7 @@  static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 					     struct device_node *parent)
 {
 	struct resource main_int_res, per_cpu_int_res;
+	struct device_node *soc_err_node;
 	int nr_irqs, i;
 	u32 control;
 
@@ -775,12 +957,37 @@  static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 	BUG_ON(!armada_370_xp_mpic_domain);
 	irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
 
+	soc_err_node = of_get_next_child(node, NULL);
+	if (!soc_err_node) {
+		pr_warn("Missing SoC Error Interrupt Controller node\n");
+		pr_warn("Extended interrupts are not supported\n");
+	} else {
+		pr_info("Registering MPIC SoC Error Interrupt Controller\n");
+		/*
+		 * Armada 370 and XP have only 32 SoC Error IRQs in one register
+		 * and other Armada platforms have 64 IRQs in two registers.
+		 */
+		soc_err_irq_num_regs =
+			of_machine_is_compatible("marvell,armada-370-xp") ? 1 : 2;
+		armada_370_xp_soc_err_domain =
+			irq_domain_add_hierarchy(armada_370_xp_mpic_domain, 0,
+						 soc_err_irq_num_regs * 32,
+						 soc_err_node,
+						 &armada_370_xp_soc_err_irq_ops,
+						 NULL);
+		BUG_ON(!armada_370_xp_soc_err_domain);
+	}
+
 	/* Setup for the boot CPU */
 	armada_xp_mpic_perf_init();
 	armada_xp_mpic_smp_cpu_init();
 
 	armada_370_xp_msi_init(node, main_int_res.start);
 
+	/* Unmask summary SoC Error Interrupt */
+	if (soc_err_irq_num_regs > 0)
+		writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
 	parent_irq = irq_of_parse_and_map(node, 0);
 	if (parent_irq <= 0) {
 		irq_set_default_host(armada_370_xp_mpic_domain);