diff mbox series

irqchip: Add support for Sunplus SP7021 interrupt controller

Message ID 20211022082328.432357-1-qinjian@cqplus1.com (mailing list archive)
State New, archived
Headers show
Series irqchip: Add support for Sunplus SP7021 interrupt controller | expand

Commit Message

Qin Jian Oct. 22, 2021, 8:23 a.m. UTC
Add interrupt driver for Sunplus SP7021 SoC.

Signed-off-by: qinjian <qinjian@cqplus1.com>
---
 MAINTAINERS                       |   1 +
 drivers/irqchip/Kconfig           |   9 +
 drivers/irqchip/Makefile          |   1 +
 drivers/irqchip/irq-sp7021-intc.c | 557 ++++++++++++++++++++++++++++++
 4 files changed, 568 insertions(+)
 create mode 100644 drivers/irqchip/irq-sp7021-intc.c

Comments

Marc Zyngier Oct. 22, 2021, 9:28 a.m. UTC | #1
On Fri, 22 Oct 2021 09:23:28 +0100,
qinjian <qinjian@cqplus1.com> wrote:
> 
> Add interrupt driver for Sunplus SP7021 SoC.

Some more explanations wouldn't hurt. What architecture does this
target? Basic functionalities?

And please send this as a series with the DT binding, specially since
this driver cannot compile in isolation.

> 
> Signed-off-by: qinjian <qinjian@cqplus1.com>
> ---
>  MAINTAINERS                       |   1 +
>  drivers/irqchip/Kconfig           |   9 +
>  drivers/irqchip/Makefile          |   1 +
>  drivers/irqchip/irq-sp7021-intc.c | 557 ++++++++++++++++++++++++++++++
>  4 files changed, 568 insertions(+)
>  create mode 100644 drivers/irqchip/irq-sp7021-intc.c
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 123616bb9..65cd295e9 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -2663,6 +2663,7 @@ F:	Documentation/devicetree/bindings/arm/sunplus,sp7021.yaml
>  F:	Documentation/devicetree/bindings/clock/sunplus,sp7021-clkc.yaml
>  F:	Documentation/devicetree/bindings/interrupt-controller/sunplus,sp7021-intc.yaml
>  F:	Documentation/devicetree/bindings/reset/sunplus,reset.yaml
> +F:	drivers/irqchip/irq-sp7021-intc.c
>  F:	include/dt-bindings/clock/sp-sp7021.h
>  F:	include/dt-bindings/interrupt-controller/sp7021-intc.h
>  F:	include/dt-bindings/reset/sp-sp7021.h
> diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
> index aca7b595c..52bd16363 100644
> --- a/drivers/irqchip/Kconfig
> +++ b/drivers/irqchip/Kconfig
> @@ -602,4 +602,13 @@ config APPLE_AIC
>  	  Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
>  	  such as the M1.
>  
> +config SUNPLUS_SP7021_INTC
> +	bool "Sunplus SP7021 interrupt controller"
> +	help
> +	  Support for the Sunplus SP7021 Interrupt Controller IP core.
> +	  This is used as a primary controller with SP7021 ChipP and can also
> +	  be used as a secondary chained controller on SP7021 ChipC.
> +
> +	  If you don't know what to do here, say Y.

Or even better, select it from the platform config. Relying on the
user to get it right is not the best idea.

> +
>  endmenu
> diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
> index f88cbf36a..75411f654 100644
> --- a/drivers/irqchip/Makefile
> +++ b/drivers/irqchip/Makefile
> @@ -116,3 +116,4 @@ obj-$(CONFIG_MACH_REALTEK_RTL)		+= irq-realtek-rtl.o
>  obj-$(CONFIG_WPCM450_AIC)		+= irq-wpcm450-aic.o
>  obj-$(CONFIG_IRQ_IDT3243X)		+= irq-idt3243x.o
>  obj-$(CONFIG_APPLE_AIC)			+= irq-apple-aic.o
> +obj-$(CONFIG_SUNPLUS_SP7021_INTC)	+= irq-sp7021-intc.o
> diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c
> new file mode 100644
> index 000000000..2d26681d3
> --- /dev/null
> +++ b/drivers/irqchip/irq-sp7021-intc.c
> @@ -0,0 +1,557 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) Sunplus Technology Co., Ltd.
> + *       All rights reserved.
> + */
> +#include <linux/irq.h>
> +#include <linux/irqdomain.h>
> +#include <linux/io.h>
> +#ifdef CONFIG_ARM
> +#include <asm/exception.h>
> +#include <asm/mach/irq.h>
> +#else
> +#define __exception_irq_entry
> +#endif

Why do we need this?

> +#include <linux/irqchip.h>
> +#include <linux/irqchip/chained_irq.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <dt-bindings/interrupt-controller/sp7021-intc.h>
> +
> +#if defined(CONFIG_SOC_SP7021)

This symbol doesn't exist.

> +#define SUPPORT_IRQ_GRP_REG
> +#endif
> +
> +#define SP_INTC_HWIRQ_MIN     0
> +#define SP_INTC_HWIRQ_MAX     223
> +
> +struct intG0Reg_st {

Please avoid CaMelCaSe names.

> +	/* Interrupt G0 */
> +	u32 intr_type[7];
> +	u32 intr_polarity[7];
> +	u32 priority[7];
> +	u32 intr_mask[7];
> +	u32 g15_reserved_0[4];
> +};
> +
> +struct intG1Reg_st {
> +	/* Interrupt G1 */
> +	u32 intr_clr[7];
> +	u32 masked_fiqs[7];
> +	u32 masked_irqs[7];
> +	u32 g21_reserved_0[10];
> +#ifdef SUPPORT_IRQ_GRP_REG
> +	u32 intr_group;
> +#else
> +	u32 rsvd_31;
> +#endif
> +};
> +
> +static struct sp_intctl {
> +	__iomem struct intG0Reg_st *g0;
> +	__iomem struct intG1Reg_st *g1;
> +	int hwirq_start;
> +	int hwirq_end;   /* exclude */
> +	struct irq_domain *domain;
> +	struct device_node *node;
> +	spinlock_t lock;
> +	int virq[2];
> +} sp_intc;
> +
> +#define WORKAROUND_FOR_EDGE_TRIGGER_BUG 1
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG

Get rid of the #define. If the HW is buggy, the workaround has to be
permanently implemented.

> +#define HW_IRQ_GPIO_INT0                120
> +#define HW_IRQ_GPIO_INT7                127
> +#define SP_IRQ_TYPE_EDGE_NONE           0x00
> +#define SP_IRQ_TYPE_EDGE_RISING         0x01
> +#define SP_IRQ_TYPE_EDGE_FALLING        0x02
> +#define SP_IRQ_TYPE_EDGE_ACTIVE         0x80
> +static char edge_trigger[SP_INTC_HWIRQ_MAX-SP_INTC_HWIRQ_MIN];
> +#endif
> +
> +static void sp_intc_ack_irq(struct irq_data *data);
> +static void sp_intc_mask_irq(struct irq_data *data);
> +static void sp_intc_unmask_irq(struct irq_data *data);
> +static int sp_intc_set_type(struct irq_data *data, unsigned int flow_type);
> +
> +static struct irq_chip sp_intc_chip = {
> +	.name = "sp_intc",
> +	.irq_ack = sp_intc_ack_irq,
> +	.irq_mask = sp_intc_mask_irq,
> +	.irq_unmask = sp_intc_unmask_irq,
> +	.irq_set_type = sp_intc_set_type,
> +};

Place the structure after the relevant functions, and drop the early
declarations.

> +
> +static void sp_intc_ack_irq(struct irq_data *data)
> +{
> +	u32 idx, mask;
> +
> +	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
> +		return;

Why do we need these checks all over the place?

> +
> +	idx = data->hwirq / 32;
> +	mask = (1 << (data->hwirq % 32));

	mask = BIT(data->hwirq % 32);

> +
> +	spin_lock(&sp_intc.lock);

This must be a raw spinlock.

> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +	if (edge_trigger[data->hwirq] & (SP_IRQ_TYPE_EDGE_RISING|SP_IRQ_TYPE_EDGE_FALLING)) {

We already keep this information in the core code. Why do you need to
shadow it in the driver?

> +		u32 trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);

Urgh... Please don't do that. Use offsets from a base instead of
mapping things in a structure.

		u32 blah = readl_relaxed(base + BLAH_OFFSET);

> +
> +		if (edge_trigger[data->hwirq] == SP_IRQ_TYPE_EDGE_RISING)
> +			trig_lvl |= mask;
> +		else
> +			trig_lvl &= ~mask;
> +
> +		writel_relaxed(trig_lvl, &sp_intc.g0->intr_polarity[idx]);
> +		edge_trigger[data->hwirq] |= SP_IRQ_TYPE_EDGE_ACTIVE;
> +	}
> +#endif
> +	writel_relaxed(mask, &sp_intc.g1->intr_clr[idx]);
> +	spin_unlock(&sp_intc.lock);
> +}
> +
> +static void sp_intc_mask_irq(struct irq_data *data)
> +{
> +	u32 idx, mask;
> +
> +	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
> +		return;
> +
> +	idx = data->hwirq / 32;
> +
> +	spin_lock(&sp_intc.lock);

You can end-up here with interrupts enabled if a driver calls
irq_disable(). If you take an interrupt here, you're dead.

> +	mask = readl_relaxed(&sp_intc.g0->intr_mask[idx]);
> +	mask &= ~(1 << (data->hwirq % 32));
> +	writel_relaxed(mask, &sp_intc.g0->intr_mask[idx]);
> +	spin_unlock(&sp_intc.lock);
> +}
> +
> +static void sp_intc_unmask_irq(struct irq_data *data)
> +{
> +	u32 idx, mask;
> +
> +	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
> +		return;
> +
> +	idx = data->hwirq / 32;
> +	spin_lock(&sp_intc.lock);
> +	mask = readl_relaxed(&sp_intc.g0->intr_mask[idx]);
> +	mask |= (1 << (data->hwirq % 32));
> +	writel_relaxed(mask, &sp_intc.g0->intr_mask[idx]);
> +	spin_unlock(&sp_intc.lock);
> +}
> +
> +static int sp_intc_set_type(struct irq_data *data, unsigned int flow_type)
> +{
> +	u32 idx, mask;
> +	u32 edge_type; /* 0=level, 1=edge */
> +	u32 trig_lvl;  /* 0=high, 1=low */
> +	unsigned long flags;
> +
> +	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
> +		return -EBADR;
> +
> +	/* update the chip/handler */
> +	if (flow_type & IRQ_TYPE_LEVEL_MASK)
> +		irq_set_chip_handler_name_locked(data, &sp_intc_chip,
> +						   handle_level_irq, NULL);
> +	else
> +		irq_set_chip_handler_name_locked(data, &sp_intc_chip,
> +						   handle_edge_irq, NULL);
> +
> +	idx = data->hwirq / 32;
> +
> +	spin_lock_irqsave(&sp_intc.lock, flags);
> +
> +	edge_type = readl_relaxed(&sp_intc.g0->intr_type[idx]);
> +	trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);
> +	mask = (1 << (data->hwirq % 32));
> +
> +	switch (flow_type) {
> +	case IRQ_TYPE_EDGE_RISING:
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +		if ((data->hwirq >= HW_IRQ_GPIO_INT0) && (data->hwirq <= HW_IRQ_GPIO_INT7)) {
> +			edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_RISING;
> +			writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
> +		} else {
> +			writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
> +		}
> +#else
> +		writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
> +#endif
> +		writel_relaxed((trig_lvl & ~mask), &sp_intc.g0->intr_polarity[idx]);
> +		break;
> +	case IRQ_TYPE_EDGE_FALLING:
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +		if ((data->hwirq >= HW_IRQ_GPIO_INT0) && (data->hwirq <= HW_IRQ_GPIO_INT7)) {
> +			edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_FALLING;
> +			writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
> +		} else {
> +			writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
> +		}
> +#else
> +		writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
> +#endif
> +		writel_relaxed((trig_lvl | mask), &sp_intc.g0->intr_polarity[idx]);
> +		break;
> +	case IRQ_TYPE_LEVEL_HIGH:
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +		edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_NONE;
> +#endif
> +		writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
> +		writel_relaxed((trig_lvl & ~mask), &sp_intc.g0->intr_polarity[idx]);
> +		break;
> +	case IRQ_TYPE_LEVEL_LOW:
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +		edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_NONE;
> +#endif
> +		writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
> +		writel_relaxed((trig_lvl | mask), &sp_intc.g0->intr_polarity[idx]);

So each case ends up writing to both the type and polarity
registers. what does it tell you?

> +		break;
> +	default:
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +		edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_NONE;
> +#endif
> +		spin_unlock_irqrestore(&sp_intc.lock, flags);
> +		pr_err("%s: type=%d\n", __func__, flow_type);

Drop this print.

> +		return -EBADR;
> +	}
> +
> +	spin_unlock_irqrestore(&sp_intc.lock, flags);
> +
> +	return IRQ_SET_MASK_OK;
> +}
> +
> +/* prio=1 (normal), prio=0 (dedicated) */

What does this mean?

> +static void sp_intc_set_prio(u32 hwirq, u32 prio)
> +{
> +	u32 idx, mask;
> +
> +	if ((hwirq < sp_intc.hwirq_start) || (hwirq >= sp_intc.hwirq_end))
> +		return;
> +
> +	idx = hwirq / 32;
> +
> +	spin_lock(&sp_intc.lock);
> +	mask = readl_relaxed(&sp_intc.g0->priority[idx]);
> +	if (prio)
> +		mask |= (1 << (hwirq % 32));
> +	else
> +		mask &= ~(1 << (hwirq % 32));
> +	writel_relaxed(mask, &sp_intc.g0->priority[idx]);
> +	spin_unlock(&sp_intc.lock);
> +}
> +
> +/* return -1 if no interrupt # */
> +static int sp_intc_get_ext0_irq(void)
> +{
> +	int hwirq, mask;
> +	int i;
> +
> +#ifdef SUPPORT_IRQ_GRP_REG
> +	mask = (readl_relaxed(&sp_intc.g1->intr_group) >> 8) & 0x7f; /* [14:8] for irq group */
> +	if (mask) {
> +		i = fls(mask) - 1;
> +#else
> +	for (i = 0; i < 7; i++) {
> +#endif

This is really tasteless. If you have different behaviours, treat them
separately. There is also no reason why this behaviour should be
defined at compile time.

> +		mask = readl_relaxed(&sp_intc.g1->masked_irqs[i]);
> +		if (mask) {
> +			hwirq = (i << 5) + fls(mask) - 1;
> +			return hwirq;
> +		}
> +	}
> +	return -1;
> +}
> +
> +static int sp_intc_get_ext1_irq(void)
> +{
> +	int hwirq, mask;
> +	int i;
> +
> +#ifdef SUPPORT_IRQ_GRP_REG
> +	mask = (readl_relaxed(&sp_intc.g1->intr_group) >> 0) & 0x7f; /* [6:0] for fiq group */
> +	if (mask) {
> +		i = fls(mask) - 1;
> +#else
> +	for (i = 0; i < 7; i++) {
> +#endif
> +		mask = readl_relaxed(&sp_intc.g1->masked_fiqs[i]);
> +		if (mask) {
> +			hwirq = (i << 5) + fls(mask) - 1;
> +			return hwirq;
> +		}
> +	}
> +	return -1;
> +}
> +
> +static void sp_intc_handle_ext0_cascaded(struct irq_desc *desc)
> +{
> +	struct irq_chip *host_chip = irq_desc_get_chip(desc);
> +	int hwirq;
> +
> +	chained_irq_enter(host_chip, desc);
> +
> +	while ((hwirq = sp_intc_get_ext0_irq()) >= 0) {
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +		if (edge_trigger[hwirq] & SP_IRQ_TYPE_EDGE_ACTIVE) {
> +			u32 idx = hwirq / 32;
> +			u32 trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);
> +			u32 mask = 1 << (hwirq % 32);
> +
> +			edge_trigger[hwirq] &= ~SP_IRQ_TYPE_EDGE_ACTIVE;
> +			if (edge_trigger[hwirq] == SP_IRQ_TYPE_EDGE_RISING)
> +				trig_lvl &= ~mask;
> +			else
> +				trig_lvl |= mask;
> +
> +			writel_relaxed(trig_lvl, &sp_intc.g0->intr_polarity[idx]);
> +		} else
> +			generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));
> +#else
> +		generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));

Use generic_handle_domain_irq().

> +#endif
> +	}
> +
> +	chained_irq_exit(host_chip, desc);
> +}
> +
> +static void sp_intc_handle_ext1_cascaded(struct irq_desc *desc)
> +{
> +	struct irq_chip *host_chip = irq_desc_get_chip(desc);
> +	int hwirq;
> +
> +	chained_irq_enter(host_chip, desc);
> +
> +	while ((hwirq = sp_intc_get_ext1_irq()) >= 0) {
> +#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
> +		if (edge_trigger[hwirq] & SP_IRQ_TYPE_EDGE_ACTIVE) {
> +			u32 idx = hwirq / 32;
> +			u32 trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);
> +			u32 mask = 1 << (hwirq % 32);
> +
> +			edge_trigger[hwirq] &= ~SP_IRQ_TYPE_EDGE_ACTIVE;
> +			if (edge_trigger[hwirq] == SP_IRQ_TYPE_EDGE_RISING)
> +				trig_lvl &= ~mask;
> +			else
> +				trig_lvl |= mask;
> +
> +			writel_relaxed(trig_lvl, &sp_intc.g0->intr_polarity[idx]);
> +		} else
> +			generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));
> +#else
> +		generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));
> +#endif
> +	}
> +
> +	chained_irq_exit(host_chip, desc);
> +}

This is almost the same thing twice. What does it tell you?

> +
> +static int sp_intc_handle_one_round(struct pt_regs *regs)
> +{
> +	int ret = -EINVAL;
> +	int hwirq;
> +
> +	while ((hwirq = sp_intc_get_ext0_irq()) >= 0) {
> +		handle_domain_irq(sp_intc.domain, hwirq, regs);

This is going away, see 20211021180236.37428-1-mark.rutland@arm.com.

> +		ret = 0;
> +	}
> +
> +	return ret;
> +}
> +
> +/* 8388: level-triggered hwirq# may come slower than IRQ */
> +#define SPURIOUS_RETRY  30

Details, please.

> +
> +static void __exception_irq_entry sp_intc_handle_irq(struct pt_regs *regs)
> +{
> +	int err;
> +	int first_int = 1;
> +	int retry = 0;
> +
> +	do {
> +		err = sp_intc_handle_one_round(regs);
> +
> +		if (!err)
> +			first_int = 0;
> +
> +		if (first_int && err) { /* spurious irq */
> +			if (retry++ < SPURIOUS_RETRY)
> +				continue;
> +		}
> +	} while (!err);
> +}
> +
> +static int sp_intc_irq_domain_map(struct irq_domain *domain, unsigned int irq,
> +				  irq_hw_number_t hwirq)
> +{
> +	irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq);
> +	irq_set_chip_data(irq, &sp_intc_chip);
> +	irq_set_noprobe(irq);
> +
> +	return 0;
> +}
> +
> +static void __init sp_intc_chip_init(int hwirq_start, int hwirq_end,
> +				     void __iomem *base0, void __iomem *base1)
> +{
> +	int i;
> +
> +	sp_intc.g0 = base0;
> +	sp_intc.g1 = base1;
> +	sp_intc.hwirq_start = hwirq_start;
> +	sp_intc.hwirq_end = hwirq_end;
> +
> +	for (i = 0; i < 7; i++) {
> +		/* all mask */
> +		writel_relaxed(0, &sp_intc.g0->intr_mask[i]);
> +		/* all edge */
> +		writel_relaxed(~0, &sp_intc.g0->intr_type[i]);
> +		/* all high-active */
> +		writel_relaxed(0, &sp_intc.g0->intr_polarity[i]);
> +		/* all irq */
> +		writel_relaxed(~0, &sp_intc.g0->priority[i]);
> +		/* all clear */
> +		writel_relaxed(~0, &sp_intc.g1->intr_clr[i]);
> +	}
> +}
> +
> +int sp_intc_xlate_of(struct irq_domain *d, struct device_node *node,
> +			  const u32 *intspec, unsigned int intsize,
> +			  irq_hw_number_t *out_hwirq, unsigned int *out_type)
> +{
> +	int ret = 0;
> +	u32 ext_num;
> +
> +	if (WARN_ON(intsize < 2))
> +		return -EINVAL;
> +
> +	*out_hwirq = intspec[0];
> +	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
> +	ext_num = (intspec[1] & SP_INTC_EXT_INT_MASK) >> SP_INTC_EXT_INT_SHFIT;
> +
> +	/* set ext_int1 to high priority */

Why?

> +	if (ext_num != 1)
> +		ext_num = 0;
> +
> +	if (ext_num)
> +		sp_intc_set_prio(*out_hwirq, 0); /* priority=0 */

Changes to the HW should only happen at map time, not translate.

> +
> +	return ret;
> +}
> +
> +static const struct irq_domain_ops sp_intc_dm_ops = {
> +	.xlate = sp_intc_xlate_of,

Why can't you use irq_domain_xlate_twocell()?

> +	.map = sp_intc_irq_domain_map,
> +};
> +
> +int __init sp_intc_init(int hwirq_start, int irqs, void __iomem *base0, void __iomem *base1)
> +{
> +	sp_intc_chip_init(hwirq_start, hwirq_start + irqs, base0, base1);
> +
> +	sp_intc.domain = irq_domain_add_legacy(NULL, irqs, hwirq_start,
> +			sp_intc.hwirq_start, &sp_intc_dm_ops, &sp_intc);

We don't take *any* new users of the legacy domains. This is new code,
not code that has been in the tree for 15 years. So get rid of your
hardcoded hwirq ranges, and start writing a driver that makes sense
for the current state of the upstream kernel. Which means using
irq_domain_add_linear(), for example. And using proper device nodes to
refer to the domain.

> +	if (!sp_intc.domain)
> +		panic("%s: unable to create legacy domain\n", __func__);
> +
> +	set_handle_irq(sp_intc_handle_irq);
> +
> +	return 0;
> +}
> +
> +#if defined(CONFIG_SOC_SP7021)
> +static cpumask_t *u2cpumask(u32 mask, cpumask_t *cpumask)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < 4; i++) {
> +		if (mask & (1 << i))
> +			cpumask_set_cpu(i, cpumask);
> +		else
> +			cpumask_clear_cpu(i, cpumask);
> +	}
> +	return cpumask;
> +}
> +
> +static int __init sp_intc_ext_adjust(void)
> +{
> +	u32 mask;
> +	static cpumask_t cpumask;
> +	struct device_node *node = sp_intc.node;
> +
> +	if (num_online_cpus() <= 1) {
> +		pr_info("single core: skip ext adjust\n");
> +		return 0;
> +	}
> +
> +	cpumask = CPU_MASK_NONE;
> +	if (!of_property_read_u32(node, "ext0-mask", &mask)) {
> +		pr_info("%d: ext0-mask=0x%x\n", sp_intc.virq[0], mask);
> +		if (irq_set_affinity(sp_intc.virq[0], u2cpumask(mask, &cpumask)))
> +			pr_err("failed to set ext0 cpumask=0x%x\n", mask);
> +	}
> +
> +	cpumask = CPU_MASK_NONE;
> +	if (!of_property_read_u32(node, "ext1-mask", &mask)) {
> +		pr_info("%d: ext1-mask=0x%x\n", sp_intc.virq[1], mask);
> +		if (irq_set_affinity(sp_intc.virq[1], u2cpumask(mask, &cpumask)))
> +			pr_err("failed to set ext1 cpumask=0x%x\n", mask);
> +	}
> +
> +	return 0;
> +}
> +arch_initcall(sp_intc_ext_adjust)

No, never. You don't call set_affinity from the driver, you don't add
extra initcalls, and you don't change the affinity of the chained
interrupt, period.

> +#endif
> +
> +#ifdef CONFIG_OF
> +int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
> +{
> +	void __iomem *base0, *base1;
> +
> +	base0 = of_iomap(node, 0);
> +	if (!base0)
> +		panic("unable to map sp-intc base 0\n");
> +
> +	base1 = of_iomap(node, 1);
> +	if (!base1)
> +		panic("unable to map sp-intc base 1\n");
> +
> +	sp_intc.node = node;
> +
> +	sp_intc_chip_init(SP_INTC_HWIRQ_MIN, SP_INTC_HWIRQ_MAX, base0, base1);
> +
> +	sp_intc.domain = irq_domain_add_linear(node, sp_intc.hwirq_end - sp_intc.hwirq_start,
> +			&sp_intc_dm_ops, &sp_intc);

So what is the legacy domain above?

> +	if (!sp_intc.domain)
> +		panic("%s: unable to create linear domain\n", __func__);

No panic. Return an error.

> +
> +	spin_lock_init(&sp_intc.lock);
> +
> +	if (parent) {
> +		sp_intc.virq[0] = irq_of_parse_and_map(node, 0);
> +		if (sp_intc.virq[0]) {
> +			irq_set_chained_handler_and_data(sp_intc.virq[0],
> +				sp_intc_handle_ext0_cascaded, &sp_intc);
> +		} else {
> +			panic("%s: missed ext0 irq in DT\n", __func__);
> +		}
> +
> +		sp_intc.virq[1] = irq_of_parse_and_map(node, 1);
> +		if (sp_intc.virq[1]) {
> +			irq_set_chained_handler_and_data(sp_intc.virq[1],
> +				sp_intc_handle_ext1_cascaded, &sp_intc);
> +		} else {
> +			panic("%s: missed ext1 irq in DT\n", __func__);
> +		}
> +	} else {
> +		set_handle_irq(sp_intc_handle_irq);
> +	}
> +
> +	return 0;
> +}
> +IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt);
> +#endif
> +
> +MODULE_AUTHOR("Qin Jian <qinjian@cqplus1.com>");
> +MODULE_DESCRIPTION("Sunplus SP7021 Interrupt Controller Driver");
> +MODULE_LICENSE("GPL v2");

This driver needs some major rework. I've only picked on the most
glaring issues, but there is a lot to say on every single line.

	M.
diff mbox series

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index 123616bb9..65cd295e9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2663,6 +2663,7 @@  F:	Documentation/devicetree/bindings/arm/sunplus,sp7021.yaml
 F:	Documentation/devicetree/bindings/clock/sunplus,sp7021-clkc.yaml
 F:	Documentation/devicetree/bindings/interrupt-controller/sunplus,sp7021-intc.yaml
 F:	Documentation/devicetree/bindings/reset/sunplus,reset.yaml
+F:	drivers/irqchip/irq-sp7021-intc.c
 F:	include/dt-bindings/clock/sp-sp7021.h
 F:	include/dt-bindings/interrupt-controller/sp7021-intc.h
 F:	include/dt-bindings/reset/sp-sp7021.h
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index aca7b595c..52bd16363 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -602,4 +602,13 @@  config APPLE_AIC
 	  Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
 	  such as the M1.
 
+config SUNPLUS_SP7021_INTC
+	bool "Sunplus SP7021 interrupt controller"
+	help
+	  Support for the Sunplus SP7021 Interrupt Controller IP core.
+	  This is used as a primary controller with SP7021 ChipP and can also
+	  be used as a secondary chained controller on SP7021 ChipC.
+
+	  If you don't know what to do here, say Y.
+
 endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index f88cbf36a..75411f654 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -116,3 +116,4 @@  obj-$(CONFIG_MACH_REALTEK_RTL)		+= irq-realtek-rtl.o
 obj-$(CONFIG_WPCM450_AIC)		+= irq-wpcm450-aic.o
 obj-$(CONFIG_IRQ_IDT3243X)		+= irq-idt3243x.o
 obj-$(CONFIG_APPLE_AIC)			+= irq-apple-aic.o
+obj-$(CONFIG_SUNPLUS_SP7021_INTC)	+= irq-sp7021-intc.o
diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c
new file mode 100644
index 000000000..2d26681d3
--- /dev/null
+++ b/drivers/irqchip/irq-sp7021-intc.c
@@ -0,0 +1,557 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ *       All rights reserved.
+ */
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#ifdef CONFIG_ARM
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+#else
+#define __exception_irq_entry
+#endif
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <dt-bindings/interrupt-controller/sp7021-intc.h>
+
+#if defined(CONFIG_SOC_SP7021)
+#define SUPPORT_IRQ_GRP_REG
+#endif
+
+#define SP_INTC_HWIRQ_MIN     0
+#define SP_INTC_HWIRQ_MAX     223
+
+struct intG0Reg_st {
+	/* Interrupt G0 */
+	u32 intr_type[7];
+	u32 intr_polarity[7];
+	u32 priority[7];
+	u32 intr_mask[7];
+	u32 g15_reserved_0[4];
+};
+
+struct intG1Reg_st {
+	/* Interrupt G1 */
+	u32 intr_clr[7];
+	u32 masked_fiqs[7];
+	u32 masked_irqs[7];
+	u32 g21_reserved_0[10];
+#ifdef SUPPORT_IRQ_GRP_REG
+	u32 intr_group;
+#else
+	u32 rsvd_31;
+#endif
+};
+
+static struct sp_intctl {
+	__iomem struct intG0Reg_st *g0;
+	__iomem struct intG1Reg_st *g1;
+	int hwirq_start;
+	int hwirq_end;   /* exclude */
+	struct irq_domain *domain;
+	struct device_node *node;
+	spinlock_t lock;
+	int virq[2];
+} sp_intc;
+
+#define WORKAROUND_FOR_EDGE_TRIGGER_BUG 1
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+#define HW_IRQ_GPIO_INT0                120
+#define HW_IRQ_GPIO_INT7                127
+#define SP_IRQ_TYPE_EDGE_NONE           0x00
+#define SP_IRQ_TYPE_EDGE_RISING         0x01
+#define SP_IRQ_TYPE_EDGE_FALLING        0x02
+#define SP_IRQ_TYPE_EDGE_ACTIVE         0x80
+static char edge_trigger[SP_INTC_HWIRQ_MAX-SP_INTC_HWIRQ_MIN];
+#endif
+
+static void sp_intc_ack_irq(struct irq_data *data);
+static void sp_intc_mask_irq(struct irq_data *data);
+static void sp_intc_unmask_irq(struct irq_data *data);
+static int sp_intc_set_type(struct irq_data *data, unsigned int flow_type);
+
+static struct irq_chip sp_intc_chip = {
+	.name = "sp_intc",
+	.irq_ack = sp_intc_ack_irq,
+	.irq_mask = sp_intc_mask_irq,
+	.irq_unmask = sp_intc_unmask_irq,
+	.irq_set_type = sp_intc_set_type,
+};
+
+static void sp_intc_ack_irq(struct irq_data *data)
+{
+	u32 idx, mask;
+
+	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
+		return;
+
+	idx = data->hwirq / 32;
+	mask = (1 << (data->hwirq % 32));
+
+	spin_lock(&sp_intc.lock);
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+	if (edge_trigger[data->hwirq] & (SP_IRQ_TYPE_EDGE_RISING|SP_IRQ_TYPE_EDGE_FALLING)) {
+		u32 trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);
+
+		if (edge_trigger[data->hwirq] == SP_IRQ_TYPE_EDGE_RISING)
+			trig_lvl |= mask;
+		else
+			trig_lvl &= ~mask;
+
+		writel_relaxed(trig_lvl, &sp_intc.g0->intr_polarity[idx]);
+		edge_trigger[data->hwirq] |= SP_IRQ_TYPE_EDGE_ACTIVE;
+	}
+#endif
+	writel_relaxed(mask, &sp_intc.g1->intr_clr[idx]);
+	spin_unlock(&sp_intc.lock);
+}
+
+static void sp_intc_mask_irq(struct irq_data *data)
+{
+	u32 idx, mask;
+
+	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
+		return;
+
+	idx = data->hwirq / 32;
+
+	spin_lock(&sp_intc.lock);
+	mask = readl_relaxed(&sp_intc.g0->intr_mask[idx]);
+	mask &= ~(1 << (data->hwirq % 32));
+	writel_relaxed(mask, &sp_intc.g0->intr_mask[idx]);
+	spin_unlock(&sp_intc.lock);
+}
+
+static void sp_intc_unmask_irq(struct irq_data *data)
+{
+	u32 idx, mask;
+
+	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
+		return;
+
+	idx = data->hwirq / 32;
+	spin_lock(&sp_intc.lock);
+	mask = readl_relaxed(&sp_intc.g0->intr_mask[idx]);
+	mask |= (1 << (data->hwirq % 32));
+	writel_relaxed(mask, &sp_intc.g0->intr_mask[idx]);
+	spin_unlock(&sp_intc.lock);
+}
+
+static int sp_intc_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	u32 idx, mask;
+	u32 edge_type; /* 0=level, 1=edge */
+	u32 trig_lvl;  /* 0=high, 1=low */
+	unsigned long flags;
+
+	if ((data->hwirq < sp_intc.hwirq_start) || (data->hwirq >= sp_intc.hwirq_end))
+		return -EBADR;
+
+	/* update the chip/handler */
+	if (flow_type & IRQ_TYPE_LEVEL_MASK)
+		irq_set_chip_handler_name_locked(data, &sp_intc_chip,
+						   handle_level_irq, NULL);
+	else
+		irq_set_chip_handler_name_locked(data, &sp_intc_chip,
+						   handle_edge_irq, NULL);
+
+	idx = data->hwirq / 32;
+
+	spin_lock_irqsave(&sp_intc.lock, flags);
+
+	edge_type = readl_relaxed(&sp_intc.g0->intr_type[idx]);
+	trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);
+	mask = (1 << (data->hwirq % 32));
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_RISING:
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+		if ((data->hwirq >= HW_IRQ_GPIO_INT0) && (data->hwirq <= HW_IRQ_GPIO_INT7)) {
+			edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_RISING;
+			writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
+		} else {
+			writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
+		}
+#else
+		writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
+#endif
+		writel_relaxed((trig_lvl & ~mask), &sp_intc.g0->intr_polarity[idx]);
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+		if ((data->hwirq >= HW_IRQ_GPIO_INT0) && (data->hwirq <= HW_IRQ_GPIO_INT7)) {
+			edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_FALLING;
+			writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
+		} else {
+			writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
+		}
+#else
+		writel_relaxed((edge_type | mask), &sp_intc.g0->intr_type[idx]);
+#endif
+		writel_relaxed((trig_lvl | mask), &sp_intc.g0->intr_polarity[idx]);
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+		edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_NONE;
+#endif
+		writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
+		writel_relaxed((trig_lvl & ~mask), &sp_intc.g0->intr_polarity[idx]);
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+		edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_NONE;
+#endif
+		writel_relaxed((edge_type & ~mask), &sp_intc.g0->intr_type[idx]);
+		writel_relaxed((trig_lvl | mask), &sp_intc.g0->intr_polarity[idx]);
+		break;
+	default:
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+		edge_trigger[data->hwirq] = SP_IRQ_TYPE_EDGE_NONE;
+#endif
+		spin_unlock_irqrestore(&sp_intc.lock, flags);
+		pr_err("%s: type=%d\n", __func__, flow_type);
+		return -EBADR;
+	}
+
+	spin_unlock_irqrestore(&sp_intc.lock, flags);
+
+	return IRQ_SET_MASK_OK;
+}
+
+/* prio=1 (normal), prio=0 (dedicated) */
+static void sp_intc_set_prio(u32 hwirq, u32 prio)
+{
+	u32 idx, mask;
+
+	if ((hwirq < sp_intc.hwirq_start) || (hwirq >= sp_intc.hwirq_end))
+		return;
+
+	idx = hwirq / 32;
+
+	spin_lock(&sp_intc.lock);
+	mask = readl_relaxed(&sp_intc.g0->priority[idx]);
+	if (prio)
+		mask |= (1 << (hwirq % 32));
+	else
+		mask &= ~(1 << (hwirq % 32));
+	writel_relaxed(mask, &sp_intc.g0->priority[idx]);
+	spin_unlock(&sp_intc.lock);
+}
+
+/* return -1 if no interrupt # */
+static int sp_intc_get_ext0_irq(void)
+{
+	int hwirq, mask;
+	int i;
+
+#ifdef SUPPORT_IRQ_GRP_REG
+	mask = (readl_relaxed(&sp_intc.g1->intr_group) >> 8) & 0x7f; /* [14:8] for irq group */
+	if (mask) {
+		i = fls(mask) - 1;
+#else
+	for (i = 0; i < 7; i++) {
+#endif
+		mask = readl_relaxed(&sp_intc.g1->masked_irqs[i]);
+		if (mask) {
+			hwirq = (i << 5) + fls(mask) - 1;
+			return hwirq;
+		}
+	}
+	return -1;
+}
+
+static int sp_intc_get_ext1_irq(void)
+{
+	int hwirq, mask;
+	int i;
+
+#ifdef SUPPORT_IRQ_GRP_REG
+	mask = (readl_relaxed(&sp_intc.g1->intr_group) >> 0) & 0x7f; /* [6:0] for fiq group */
+	if (mask) {
+		i = fls(mask) - 1;
+#else
+	for (i = 0; i < 7; i++) {
+#endif
+		mask = readl_relaxed(&sp_intc.g1->masked_fiqs[i]);
+		if (mask) {
+			hwirq = (i << 5) + fls(mask) - 1;
+			return hwirq;
+		}
+	}
+	return -1;
+}
+
+static void sp_intc_handle_ext0_cascaded(struct irq_desc *desc)
+{
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
+	int hwirq;
+
+	chained_irq_enter(host_chip, desc);
+
+	while ((hwirq = sp_intc_get_ext0_irq()) >= 0) {
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+		if (edge_trigger[hwirq] & SP_IRQ_TYPE_EDGE_ACTIVE) {
+			u32 idx = hwirq / 32;
+			u32 trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);
+			u32 mask = 1 << (hwirq % 32);
+
+			edge_trigger[hwirq] &= ~SP_IRQ_TYPE_EDGE_ACTIVE;
+			if (edge_trigger[hwirq] == SP_IRQ_TYPE_EDGE_RISING)
+				trig_lvl &= ~mask;
+			else
+				trig_lvl |= mask;
+
+			writel_relaxed(trig_lvl, &sp_intc.g0->intr_polarity[idx]);
+		} else
+			generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));
+#else
+		generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));
+#endif
+	}
+
+	chained_irq_exit(host_chip, desc);
+}
+
+static void sp_intc_handle_ext1_cascaded(struct irq_desc *desc)
+{
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
+	int hwirq;
+
+	chained_irq_enter(host_chip, desc);
+
+	while ((hwirq = sp_intc_get_ext1_irq()) >= 0) {
+#ifdef WORKAROUND_FOR_EDGE_TRIGGER_BUG
+		if (edge_trigger[hwirq] & SP_IRQ_TYPE_EDGE_ACTIVE) {
+			u32 idx = hwirq / 32;
+			u32 trig_lvl = readl_relaxed(&sp_intc.g0->intr_polarity[idx]);
+			u32 mask = 1 << (hwirq % 32);
+
+			edge_trigger[hwirq] &= ~SP_IRQ_TYPE_EDGE_ACTIVE;
+			if (edge_trigger[hwirq] == SP_IRQ_TYPE_EDGE_RISING)
+				trig_lvl &= ~mask;
+			else
+				trig_lvl |= mask;
+
+			writel_relaxed(trig_lvl, &sp_intc.g0->intr_polarity[idx]);
+		} else
+			generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));
+#else
+		generic_handle_irq(irq_find_mapping(sp_intc.domain, (unsigned int)hwirq));
+#endif
+	}
+
+	chained_irq_exit(host_chip, desc);
+}
+
+static int sp_intc_handle_one_round(struct pt_regs *regs)
+{
+	int ret = -EINVAL;
+	int hwirq;
+
+	while ((hwirq = sp_intc_get_ext0_irq()) >= 0) {
+		handle_domain_irq(sp_intc.domain, hwirq, regs);
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/* 8388: level-triggered hwirq# may come slower than IRQ */
+#define SPURIOUS_RETRY  30
+
+static void __exception_irq_entry sp_intc_handle_irq(struct pt_regs *regs)
+{
+	int err;
+	int first_int = 1;
+	int retry = 0;
+
+	do {
+		err = sp_intc_handle_one_round(regs);
+
+		if (!err)
+			first_int = 0;
+
+		if (first_int && err) { /* spurious irq */
+			if (retry++ < SPURIOUS_RETRY)
+				continue;
+		}
+	} while (!err);
+}
+
+static int sp_intc_irq_domain_map(struct irq_domain *domain, unsigned int irq,
+				  irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq);
+	irq_set_chip_data(irq, &sp_intc_chip);
+	irq_set_noprobe(irq);
+
+	return 0;
+}
+
+static void __init sp_intc_chip_init(int hwirq_start, int hwirq_end,
+				     void __iomem *base0, void __iomem *base1)
+{
+	int i;
+
+	sp_intc.g0 = base0;
+	sp_intc.g1 = base1;
+	sp_intc.hwirq_start = hwirq_start;
+	sp_intc.hwirq_end = hwirq_end;
+
+	for (i = 0; i < 7; i++) {
+		/* all mask */
+		writel_relaxed(0, &sp_intc.g0->intr_mask[i]);
+		/* all edge */
+		writel_relaxed(~0, &sp_intc.g0->intr_type[i]);
+		/* all high-active */
+		writel_relaxed(0, &sp_intc.g0->intr_polarity[i]);
+		/* all irq */
+		writel_relaxed(~0, &sp_intc.g0->priority[i]);
+		/* all clear */
+		writel_relaxed(~0, &sp_intc.g1->intr_clr[i]);
+	}
+}
+
+int sp_intc_xlate_of(struct irq_domain *d, struct device_node *node,
+			  const u32 *intspec, unsigned int intsize,
+			  irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+	int ret = 0;
+	u32 ext_num;
+
+	if (WARN_ON(intsize < 2))
+		return -EINVAL;
+
+	*out_hwirq = intspec[0];
+	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+	ext_num = (intspec[1] & SP_INTC_EXT_INT_MASK) >> SP_INTC_EXT_INT_SHFIT;
+
+	/* set ext_int1 to high priority */
+	if (ext_num != 1)
+		ext_num = 0;
+
+	if (ext_num)
+		sp_intc_set_prio(*out_hwirq, 0); /* priority=0 */
+
+	return ret;
+}
+
+static const struct irq_domain_ops sp_intc_dm_ops = {
+	.xlate = sp_intc_xlate_of,
+	.map = sp_intc_irq_domain_map,
+};
+
+int __init sp_intc_init(int hwirq_start, int irqs, void __iomem *base0, void __iomem *base1)
+{
+	sp_intc_chip_init(hwirq_start, hwirq_start + irqs, base0, base1);
+
+	sp_intc.domain = irq_domain_add_legacy(NULL, irqs, hwirq_start,
+			sp_intc.hwirq_start, &sp_intc_dm_ops, &sp_intc);
+	if (!sp_intc.domain)
+		panic("%s: unable to create legacy domain\n", __func__);
+
+	set_handle_irq(sp_intc_handle_irq);
+
+	return 0;
+}
+
+#if defined(CONFIG_SOC_SP7021)
+static cpumask_t *u2cpumask(u32 mask, cpumask_t *cpumask)
+{
+	unsigned int i;
+
+	for (i = 0; i < 4; i++) {
+		if (mask & (1 << i))
+			cpumask_set_cpu(i, cpumask);
+		else
+			cpumask_clear_cpu(i, cpumask);
+	}
+	return cpumask;
+}
+
+static int __init sp_intc_ext_adjust(void)
+{
+	u32 mask;
+	static cpumask_t cpumask;
+	struct device_node *node = sp_intc.node;
+
+	if (num_online_cpus() <= 1) {
+		pr_info("single core: skip ext adjust\n");
+		return 0;
+	}
+
+	cpumask = CPU_MASK_NONE;
+	if (!of_property_read_u32(node, "ext0-mask", &mask)) {
+		pr_info("%d: ext0-mask=0x%x\n", sp_intc.virq[0], mask);
+		if (irq_set_affinity(sp_intc.virq[0], u2cpumask(mask, &cpumask)))
+			pr_err("failed to set ext0 cpumask=0x%x\n", mask);
+	}
+
+	cpumask = CPU_MASK_NONE;
+	if (!of_property_read_u32(node, "ext1-mask", &mask)) {
+		pr_info("%d: ext1-mask=0x%x\n", sp_intc.virq[1], mask);
+		if (irq_set_affinity(sp_intc.virq[1], u2cpumask(mask, &cpumask)))
+			pr_err("failed to set ext1 cpumask=0x%x\n", mask);
+	}
+
+	return 0;
+}
+arch_initcall(sp_intc_ext_adjust)
+#endif
+
+#ifdef CONFIG_OF
+int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
+{
+	void __iomem *base0, *base1;
+
+	base0 = of_iomap(node, 0);
+	if (!base0)
+		panic("unable to map sp-intc base 0\n");
+
+	base1 = of_iomap(node, 1);
+	if (!base1)
+		panic("unable to map sp-intc base 1\n");
+
+	sp_intc.node = node;
+
+	sp_intc_chip_init(SP_INTC_HWIRQ_MIN, SP_INTC_HWIRQ_MAX, base0, base1);
+
+	sp_intc.domain = irq_domain_add_linear(node, sp_intc.hwirq_end - sp_intc.hwirq_start,
+			&sp_intc_dm_ops, &sp_intc);
+	if (!sp_intc.domain)
+		panic("%s: unable to create linear domain\n", __func__);
+
+	spin_lock_init(&sp_intc.lock);
+
+	if (parent) {
+		sp_intc.virq[0] = irq_of_parse_and_map(node, 0);
+		if (sp_intc.virq[0]) {
+			irq_set_chained_handler_and_data(sp_intc.virq[0],
+				sp_intc_handle_ext0_cascaded, &sp_intc);
+		} else {
+			panic("%s: missed ext0 irq in DT\n", __func__);
+		}
+
+		sp_intc.virq[1] = irq_of_parse_and_map(node, 1);
+		if (sp_intc.virq[1]) {
+			irq_set_chained_handler_and_data(sp_intc.virq[1],
+				sp_intc_handle_ext1_cascaded, &sp_intc);
+		} else {
+			panic("%s: missed ext1 irq in DT\n", __func__);
+		}
+	} else {
+		set_handle_irq(sp_intc_handle_irq);
+	}
+
+	return 0;
+}
+IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt);
+#endif
+
+MODULE_AUTHOR("Qin Jian <qinjian@cqplus1.com>");
+MODULE_DESCRIPTION("Sunplus SP7021 Interrupt Controller Driver");
+MODULE_LICENSE("GPL v2");