Message ID | 20211209043249.65474-7-marcan@marcan.st (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | irqchip/apple-aic: Add support for AICv2 | expand |
On Thu, 09 Dec 2021 04:32:49 +0000, Hector Martin <marcan@marcan.st> wrote: > > Introduce support for the new AICv2 hardware block in t6000/t6001 SoCs. > > It seems these blocks are missing the information required to compute > the event register offset in the capability registers, so we specify > that in the DT. > > Signed-off-by: Hector Martin <marcan@marcan.st> > --- > drivers/irqchip/irq-apple-aic.c | 146 ++++++++++++++++++++++++++++---- > 1 file changed, 128 insertions(+), 18 deletions(-) > > diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c > index 46b7750548a0..226d5232dd14 100644 > --- a/drivers/irqchip/irq-apple-aic.c > +++ b/drivers/irqchip/irq-apple-aic.c > @@ -101,6 +101,57 @@ > > #define AIC_MAX_IRQ 0x400 > > +/* > + * AIC v2 registers (MMIO) > + */ > + > +#define AIC2_VERSION 0x0000 > +#define AIC2_VERSION_VER GENMASK(7, 0) > + > +#define AIC2_INFO1 0x0004 > +#define AIC2_INFO1_NR_IRQ GENMASK(15, 0) > +#define AIC2_INFO1_LAST_DIE GENMASK(27, 24) > + > +#define AIC2_INFO2 0x0008 > + > +#define AIC2_INFO3 0x000c > +#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0) > +#define AIC2_INFO3_MAX_DIE GENMASK(27, 24) > + > +#define AIC2_RESET 0x0010 > +#define AIC2_RESET_RESET BIT(0) > + > +#define AIC2_CONFIG 0x0014 > +#define AIC2_CONFIG_ENABLE BIT(0) > +#define AIC2_CONFIG_PREFER_PCPU BIT(28) > + > +#define AIC2_TIMEOUT 0x0028 > +#define AIC2_CLUSTER_PRIO 0x0030 > +#define AIC2_DELAY_GROUPS 0x0100 > + > +#define AIC2_IRQ_CFG 0x2000 > + > +/* > + * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG: > + * > + * Repeat for each die: > + * IRQ_CFG: u32 * MAX_IRQS > + * SW_SET: u32 * (MAX_IRQS / 32) > + * SW_CLR: u32 * (MAX_IRQS / 32) > + * MASK_SET: u32 * (MAX_IRQS / 32) > + * MASK_CLR: u32 * (MAX_IRQS / 32) > + * HW_STATE: u32 * (MAX_IRQS / 32) > + * > + * This is followed by a set of event registers, each 16K page aligned. > + * The first one is the AP event register we will use. Unfortunately, > + * the actual implemented die count is not specified anywhere in the > + * capability registers, so we have to explcitly specify the event explicitly > + * register offset in the device tree to remain forward-compatible. Do the current machines actually have more than a single die? > + */ > + > +#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0) > +#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5) > + > #define MASK_REG(x) (4 * ((x) >> 5)) > #define MASK_BIT(x) BIT((x) & GENMASK(4, 0)) > > @@ -187,6 +238,7 @@ struct aic_info { > /* Register offsets */ > u32 event; > u32 target_cpu; > + u32 irq_cfg; > u32 sw_set; > u32 sw_clr; > u32 mask_set; > @@ -214,6 +266,14 @@ static const struct aic_info aic1_fipi_info = { > .fast_ipi = true, > }; > > +static const struct aic_info aic2_info = { > + .version = 2, > + > + .irq_cfg = AIC2_IRQ_CFG, > + > + .fast_ipi = true, > +}; > + > static const struct of_device_id aic_info_match[] = { > { > .compatible = "apple,t8103-aic", > @@ -223,6 +283,10 @@ static const struct of_device_id aic_info_match[] = { > .compatible = "apple,aic", > .data = &aic1_info, > }, > + { > + .compatible = "apple,aic2", > + .data = &aic2_info, > + }, > {} > }; > > @@ -368,6 +432,14 @@ static struct irq_chip aic_chip = { > .irq_set_type = aic_irq_set_type, > }; > > +static struct irq_chip aic2_chip = { > + .name = "AIC2", > + .irq_mask = aic_irq_mask, > + .irq_unmask = aic_irq_unmask, > + .irq_eoi = aic_irq_eoi, > + .irq_set_type = aic_irq_set_type, > +}; How is the affinity managed if you don't have a callback? A number of things are bound to break if you don't have one. And a description of how an interrupt gets routed wouldn't go amiss! > + > /* > * FIQ irqchip > */ > @@ -524,10 +596,15 @@ static struct irq_chip fiq_chip = { > static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq, > irq_hw_number_t hw) > { > + struct aic_irq_chip *ic = id->host_data; > u32 type = FIELD_GET(AIC_EVENT_TYPE, hw); > + struct irq_chip *chip = &aic_chip; > + > + if (ic->info.version == 2) > + chip = &aic2_chip; > > if (type == AIC_EVENT_TYPE_HW) { > - irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data, > + irq_domain_set_info(id, irq, hw, chip, id->host_data, > handle_fasteoi_irq, NULL, NULL); > irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); > } else if (type == AIC_EVENT_TYPE_FIQ) { > @@ -882,23 +959,25 @@ static int aic_init_cpu(unsigned int cpu) > /* Commit all of the above */ > isb(); > > - /* > - * Make sure the kernel's idea of logical CPU order is the same as AIC's > - * If we ever end up with a mismatch here, we will have to introduce > - * a mapping table similar to what other irqchip drivers do. > - */ > - WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); > + if (aic_irqc->info.version == 1) { > + /* > + * Make sure the kernel's idea of logical CPU order is the same as AIC's > + * If we ever end up with a mismatch here, we will have to introduce > + * a mapping table similar to what other irqchip drivers do. > + */ > + WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); > > - /* > - * Always keep IPIs unmasked at the hardware level (except auto-masking > - * by AIC during processing). We manage masks at the vIPI level. > - */ > - aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); > - if (!aic_irqc->info.fast_ipi) { > - aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); > - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); > - } else { > - aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); > + /* > + * Always keep IPIs unmasked at the hardware level (except auto-masking > + * by AIC during processing). We manage masks at the vIPI level. > + */ > + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); > + if (!aic_irqc->info.fast_ipi) { > + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); > + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); > + } else { > + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); > + } Why is this specific to v1 and not affecting v2? I'm sure there is a good reason, but documenting these differences would certainly help reviewing (which version implement which registers, for example). Thanks, M.
On 13/12/2021 03.47, Marc Zyngier wrote: >> + * This is followed by a set of event registers, each 16K page aligned. >> + * The first one is the AP event register we will use. Unfortunately, >> + * the actual implemented die count is not specified anywhere in the >> + * capability registers, so we have to explcitly specify the event > > explicitly Thanks, fixed! > >> + * register offset in the device tree to remain forward-compatible. > > Do the current machines actually have more than a single die? Not the current ones, but there are loud rumors everywhere of multi-die products... might as well try to support them ahead of time. The current machines *do* have two register sets, implying support for 2-die configurations, and although no IRQs are ever asserted from hardware, SW_GEN mode works and you can trigger die-ID 1 events. The interpretation of the capability registers comes from what the macOS driver does (that's the only part I looked at it for, since it's kind of hard to divine with only a single data point from the hardware). Their driver is definitely designed for multi die machines already. The register layout I worked out by probing the hardware; it was blatantly obvious that there was a second set of IRQ mask arrays after the first, that macOS didn't use (yet)... >> +static struct irq_chip aic2_chip = { >> + .name = "AIC2", >> + .irq_mask = aic_irq_mask, >> + .irq_unmask = aic_irq_unmask, >> + .irq_eoi = aic_irq_eoi, >> + .irq_set_type = aic_irq_set_type, >> +}; > > How is the affinity managed if you don't have a callback? A number of > things are bound to break if you don't have one. And a description of > how an interrupt gets routed wouldn't go amiss! It isn't... we don't know all the details yet, but it seems to be Some Kind Of Magicâ„¢. There definitely is no way of individually mapping IRQs to specific CPUs; there just aren't enough implemented register bits to allow that. What we do have is a per-IRQ config consisting of: - Target CPU, 4 bits. This seems to be for pointing IRQs at coprocessors (there's actually an init dance to map a few IRQs to specific coprocessors; m1n1 takes care of that right now*). Only 0 sends IRQs to the AP here, so this is not useful to us. - IRQ config group, 3 bits. This selects one of 8 IRQ config registers. These do indirectly control how the IRQ is delivered; at least they have some kind of delay value (coalescing?) and I suspect may do some kind of priority control, though the details of that aren't clear yet. I don't recall seeing macOS do anything interesting with these groups, I think it always uses group 0. Then each CPU has an IMP-DEF sysreg that allows it to opt-in or opt-out of receiving IRQs (!). It actually has two bits, so there may be some kind of priority/subset control here too. By default all other CPUs are opted out, which isn't great... so m1n1 initializes it to opt in all CPUs to IRQ delivery. The actual delivery flow here seems to be something like AIC/something picks a CPU (using some kind of heuristic/CPU state? I noticed WFI seems to have an effect here) for initial delivery, and if the IRQ isn't acked in a timely manner, it punts and broadcasts the IRQ to all CPUs. The IRQ ack register is shared by all CPUs; I don't know if there is some kind of per-CPU difference in what it can return, but I haven't observed that yet, so I guess whatever CPU gets the IRQ gets to handle anything that is pending. There are also some extra features; e.g. there is definitely a set of registers for measuring IRQ latency (tells you how long it took from IRQ assertion to the CPU acking it). There's also some kind of global control over which CPU *cluster* is tried first for delivery (defaults to e-cluster, but you can change it to either p-cluster). We don't use those right now. So there is definitely room for further research here, but the current state of affairs is the driver doesn't do affinity at all, and IRQs are handled by "some" CPU. In practice, I see a decent (but not completely uniform) spread of which CPU handles any given IRQ. I assume it's something like it prefers a busy CPU, to avoid waking up a core just to handle an IRQ. * I don't know how masks are supposed to be managed for those IRQs used by copros; I guess we'll find out when we get there and notice something is broken if we don't unmask them... but I guess given the IRQ handling flow here, that copro should be doing the masking/unmasking itself. >> + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); >> + if (!aic_irqc->info.fast_ipi) { >> + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); >> + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); >> + } else { >> + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); >> + } > > Why is this specific to v1 and not affecting v2? I'm sure there is a > good reason, but documenting these differences would certainly help > reviewing (which version implement which registers, for example). Only v1 has legacy IPIs (which is why we had to implement Fast IPIs for this one). AIC_IPI_* registers are for AICv1 specifically; other than the event register fields which are the same (but not the register offset itself), and the general concept of the mask/sw_gen/hw_status register arrays, there aren't really any shared registers between AICv1 and AICv2. I'll add a comment to clarify this.
On Sat, 18 Dec 2021 06:02:24 +0000, Hector Martin <marcan@marcan.st> wrote: > > On 13/12/2021 03.47, Marc Zyngier wrote: > >> + * This is followed by a set of event registers, each 16K page aligned. > >> + * The first one is the AP event register we will use. Unfortunately, > >> + * the actual implemented die count is not specified anywhere in the > >> + * capability registers, so we have to explcitly specify the event > > > > explicitly > > Thanks, fixed! > > > > >> + * register offset in the device tree to remain forward-compatible. > > > > Do the current machines actually have more than a single die? > > Not the current ones, but there are loud rumors everywhere of multi-die > products... might as well try to support them ahead of time. The current > machines *do* have two register sets, implying support for 2-die > configurations, and although no IRQs are ever asserted from hardware, > SW_GEN mode works and you can trigger die-ID 1 events. > > The interpretation of the capability registers comes from what the macOS > driver does (that's the only part I looked at it for, since it's kind of > hard to divine with only a single data point from the hardware). Their > driver is definitely designed for multi die machines already. The > register layout I worked out by probing the hardware; it was blatantly > obvious that there was a second set of IRQ mask arrays after the first, > that macOS didn't use (yet)... > > >> +static struct irq_chip aic2_chip = { > >> + .name = "AIC2", > >> + .irq_mask = aic_irq_mask, > >> + .irq_unmask = aic_irq_unmask, > >> + .irq_eoi = aic_irq_eoi, > >> + .irq_set_type = aic_irq_set_type, > >> +}; > > > > How is the affinity managed if you don't have a callback? A number of > > things are bound to break if you don't have one. And a description of > > how an interrupt gets routed wouldn't go amiss! > > It isn't... we don't know all the details yet, but it seems to be Some > Kind Of Magicâ„¢. > > There definitely is no way of individually mapping IRQs to specific > CPUs; there just aren't enough implemented register bits to allow that. > > What we do have is a per-IRQ config consisting of: > > - Target CPU, 4 bits. This seems to be for pointing IRQs at coprocessors > (there's actually an init dance to map a few IRQs to specific > coprocessors; m1n1 takes care of that right now*). Only 0 sends IRQs to > the AP here, so this is not useful to us. > > - IRQ config group, 3 bits. This selects one of 8 IRQ config registers. > These do indirectly control how the IRQ is delivered; at least they have > some kind of delay value (coalescing?) and I suspect may do some kind of > priority control, though the details of that aren't clear yet. I don't > recall seeing macOS do anything interesting with these groups, I think > it always uses group 0. > > Then each CPU has an IMP-DEF sysreg that allows it to opt-in or opt-out > of receiving IRQs (!). It actually has two bits, so there may be some > kind of priority/subset control here too. By default all other CPUs are > opted out, which isn't great... so m1n1 initializes it to opt in all > CPUs to IRQ delivery. > > The actual delivery flow here seems to be something like AIC/something > picks a CPU (using some kind of heuristic/CPU state? I noticed WFI seems > to have an effect here) for initial delivery, and if the IRQ isn't acked > in a timely manner, it punts and broadcasts the IRQ to all CPUs. The IRQ > ack register is shared by all CPUs; I don't know if there is some kind > of per-CPU difference in what it can return, but I haven't observed that > yet, so I guess whatever CPU gets the IRQ gets to handle anything that > is pending. > > There are also some extra features; e.g. there is definitely a set of > registers for measuring IRQ latency (tells you how long it took from IRQ > assertion to the CPU acking it). There's also some kind of global > control over which CPU *cluster* is tried first for delivery (defaults > to e-cluster, but you can change it to either p-cluster). We don't use > those right now. > > So there is definitely room for further research here, but the current > state of affairs is the driver doesn't do affinity at all, and IRQs are > handled by "some" CPU. In practice, I see a decent (but not completely > uniform) spread of which CPU handles any given IRQ. I assume it's > something like it prefers a busy CPU, to avoid waking up a core just to > handle an IRQ. The main issue with such magic is that a number of things will break in a spectacular way for a bunch of drivers. We have a whole class of (mostly PCI) devices that have per-queue interrupts, each one bound to a CPU core. The drivers fully expect the interrupt for a given queue to fire on a given CPU, and *only* this one as they would, for example, use per-CPU data to get the context of the queue. With an automatic spread of interrupts, this totally breaks. Probably because the core will refuse to use managed interrupts due to the lack of affinity setting callback. And even if you provide a dummy one, it is the endpoint drivers that will explode. The only way I can imagine to make this work is to force these interrupts to be threaded so that the thread can run on a different CPU than the one the interrupt has been taken on. Performance-wise, this is likely to be a pig. I guess we will have to find ways to live with this in the long run, and maybe teach the core code of these weird behaviours. Thanks, M.
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 46b7750548a0..226d5232dd14 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -101,6 +101,57 @@ #define AIC_MAX_IRQ 0x400 +/* + * AIC v2 registers (MMIO) + */ + +#define AIC2_VERSION 0x0000 +#define AIC2_VERSION_VER GENMASK(7, 0) + +#define AIC2_INFO1 0x0004 +#define AIC2_INFO1_NR_IRQ GENMASK(15, 0) +#define AIC2_INFO1_LAST_DIE GENMASK(27, 24) + +#define AIC2_INFO2 0x0008 + +#define AIC2_INFO3 0x000c +#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0) +#define AIC2_INFO3_MAX_DIE GENMASK(27, 24) + +#define AIC2_RESET 0x0010 +#define AIC2_RESET_RESET BIT(0) + +#define AIC2_CONFIG 0x0014 +#define AIC2_CONFIG_ENABLE BIT(0) +#define AIC2_CONFIG_PREFER_PCPU BIT(28) + +#define AIC2_TIMEOUT 0x0028 +#define AIC2_CLUSTER_PRIO 0x0030 +#define AIC2_DELAY_GROUPS 0x0100 + +#define AIC2_IRQ_CFG 0x2000 + +/* + * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG: + * + * Repeat for each die: + * IRQ_CFG: u32 * MAX_IRQS + * SW_SET: u32 * (MAX_IRQS / 32) + * SW_CLR: u32 * (MAX_IRQS / 32) + * MASK_SET: u32 * (MAX_IRQS / 32) + * MASK_CLR: u32 * (MAX_IRQS / 32) + * HW_STATE: u32 * (MAX_IRQS / 32) + * + * This is followed by a set of event registers, each 16K page aligned. + * The first one is the AP event register we will use. Unfortunately, + * the actual implemented die count is not specified anywhere in the + * capability registers, so we have to explcitly specify the event + * register offset in the device tree to remain forward-compatible. + */ + +#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0) +#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5) + #define MASK_REG(x) (4 * ((x) >> 5)) #define MASK_BIT(x) BIT((x) & GENMASK(4, 0)) @@ -187,6 +238,7 @@ struct aic_info { /* Register offsets */ u32 event; u32 target_cpu; + u32 irq_cfg; u32 sw_set; u32 sw_clr; u32 mask_set; @@ -214,6 +266,14 @@ static const struct aic_info aic1_fipi_info = { .fast_ipi = true, }; +static const struct aic_info aic2_info = { + .version = 2, + + .irq_cfg = AIC2_IRQ_CFG, + + .fast_ipi = true, +}; + static const struct of_device_id aic_info_match[] = { { .compatible = "apple,t8103-aic", @@ -223,6 +283,10 @@ static const struct of_device_id aic_info_match[] = { .compatible = "apple,aic", .data = &aic1_info, }, + { + .compatible = "apple,aic2", + .data = &aic2_info, + }, {} }; @@ -368,6 +432,14 @@ static struct irq_chip aic_chip = { .irq_set_type = aic_irq_set_type, }; +static struct irq_chip aic2_chip = { + .name = "AIC2", + .irq_mask = aic_irq_mask, + .irq_unmask = aic_irq_unmask, + .irq_eoi = aic_irq_eoi, + .irq_set_type = aic_irq_set_type, +}; + /* * FIQ irqchip */ @@ -524,10 +596,15 @@ static struct irq_chip fiq_chip = { static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq, irq_hw_number_t hw) { + struct aic_irq_chip *ic = id->host_data; u32 type = FIELD_GET(AIC_EVENT_TYPE, hw); + struct irq_chip *chip = &aic_chip; + + if (ic->info.version == 2) + chip = &aic2_chip; if (type == AIC_EVENT_TYPE_HW) { - irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data, + irq_domain_set_info(id, irq, hw, chip, id->host_data, handle_fasteoi_irq, NULL, NULL); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); } else if (type == AIC_EVENT_TYPE_FIQ) { @@ -882,23 +959,25 @@ static int aic_init_cpu(unsigned int cpu) /* Commit all of the above */ isb(); - /* - * Make sure the kernel's idea of logical CPU order is the same as AIC's - * If we ever end up with a mismatch here, we will have to introduce - * a mapping table similar to what other irqchip drivers do. - */ - WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); + if (aic_irqc->info.version == 1) { + /* + * Make sure the kernel's idea of logical CPU order is the same as AIC's + * If we ever end up with a mismatch here, we will have to introduce + * a mapping table similar to what other irqchip drivers do. + */ + WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); - /* - * Always keep IPIs unmasked at the hardware level (except auto-masking - * by AIC during processing). We manage masks at the vIPI level. - */ - aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); - if (!aic_irqc->info.fast_ipi) { - aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); - } else { - aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); + /* + * Always keep IPIs unmasked at the hardware level (except auto-masking + * by AIC during processing). We manage masks at the vIPI level. + */ + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); + if (!aic_irqc->info.fast_ipi) { + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); + } else { + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); + } } /* Initialize the local mask state */ @@ -953,6 +1032,29 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p break; } + case 2: { + u32 info1, info3; + + info1 = aic_ic_read(irqc, AIC2_INFO1); + info3 = aic_ic_read(irqc, AIC2_INFO3); + + irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1); + irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3); + irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1; + irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3); + + off = start_off = irqc->info.irq_cfg; + off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */ + + if (of_property_read_u32(node, "apple,event-reg", &irqc->info.event) < 0) { + pr_err("Failed to get apple,event-reg property"); + iounmap(irqc->base); + kfree(irqc); + return -ENODEV; + } + + break; + } } irqc->info.sw_set = off; @@ -999,6 +1101,13 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p off += irqc->info.die_stride; } + if (irqc->info.version == 2) { + u32 config = aic_ic_read(irqc, AIC2_CONFIG); + + config |= AIC2_CONFIG_ENABLE; + aic_ic_write(irqc, AIC2_CONFIG, config); + } + if (!is_kernel_in_hyp_mode()) pr_info("Kernel running in EL1, mapping interrupts"); @@ -1017,4 +1126,5 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p return 0; } -IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init); +IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init); +IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
Introduce support for the new AICv2 hardware block in t6000/t6001 SoCs. It seems these blocks are missing the information required to compute the event register offset in the capability registers, so we specify that in the DT. Signed-off-by: Hector Martin <marcan@marcan.st> --- drivers/irqchip/irq-apple-aic.c | 146 ++++++++++++++++++++++++++++---- 1 file changed, 128 insertions(+), 18 deletions(-)