diff mbox series

[v3,6/6] irqchip: sifive-plic: Implement irq_set_affinity() for SMP host

Message ID 20181130080207.20505-7-anup@brainfault.org (mailing list archive)
State New, archived
Headers show
Series IRQ affinity support in PLIC driver | expand

Commit Message

Anup Patel Nov. 30, 2018, 8:02 a.m. UTC
Currently on SMP host, all CPUs take external interrupts routed via
PLIC. All CPUs will try to claim a given external interrupt but only
one of them will succeed while other CPUs would simply resume whatever
they were doing before. This means if we have N CPUs then for every
external interrupt N-1 CPUs will always fail to claim it and waste
their CPU time.

Instead of above, external interrupts should be taken by only one CPU
and we should have provision to explicitly specify IRQ affinity from
kernel-space or user-space.

This patch provides irq_set_affinity() implementation for PLIC driver.
It also updates irq_enable() such that PLIC interrupts are only enabled
for one of CPUs specified in IRQ affinity mask.

With this patch in-place, we can change IRQ affinity at any-time from
user-space using procfs.

Example:

/ # cat /proc/interrupts
           CPU0       CPU1       CPU2       CPU3
  8:         44          0          0          0  SiFive PLIC   8  virtio0
 10:         48          0          0          0  SiFive PLIC  10  ttyS0
IPI0:        55        663         58        363  Rescheduling interrupts
IPI1:         0          1          3         16  Function call interrupts
/ #
/ #
/ # echo 4 > /proc/irq/10/smp_affinity
/ #
/ # cat /proc/interrupts
           CPU0       CPU1       CPU2       CPU3
  8:         45          0          0          0  SiFive PLIC   8  virtio0
 10:        160          0         17          0  SiFive PLIC  10  ttyS0
IPI0:        68        693         77        410  Rescheduling interrupts
IPI1:         0          2          3         16  Function call interrupts

Signed-off-by: Anup Patel <anup@brainfault.org>
---
 drivers/irqchip/irq-sifive-plic.c | 35 +++++++++++++++++++++++++++++--
 1 file changed, 33 insertions(+), 2 deletions(-)

Comments

Christoph Hellwig Dec. 17, 2018, 6:32 p.m. UTC | #1
On Fri, Nov 30, 2018 at 01:32:07PM +0530, Anup Patel wrote:
> This patch provides irq_set_affinity() implementation for PLIC driver.
> It also updates irq_enable() such that PLIC interrupts are only enabled
> for one of CPUs specified in IRQ affinity mask.

But normally our affinity masks are that - masks of CPUs that can take
it.  It seems a bit odd to then just pick the first one, as this means
with default all-CPU masks we'll have all interrupts handled by the
first CPU only.


> --- a/drivers/irqchip/irq-sifive-plic.c
> +++ b/drivers/irqchip/irq-sifive-plic.c
> @@ -106,14 +106,42 @@ static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
>  
>  static void plic_irq_enable(struct irq_data *d)
>  {
> -	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
> +	unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> +					   cpu_online_mask);
> +	WARN_ON(cpu >= nr_cpu_ids);

I think this should be WARN_ON_ONCE and actually return instead of then
proceeding using the invalid cpu index. 

> +#ifdef CONFIG_SMP
static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
> +			    bool force)
> +{
> +	unsigned int cpu;
> +
> +	if (!force)
> +		cpu = cpumask_any_and(mask_val, cpu_online_mask);
> +	else
> +		cpu = cpumask_first(mask_val);

maybe swap the two branches around to avoid the inversion of the force
flag?
Anup Patel Dec. 18, 2018, 10:32 a.m. UTC | #2
On Tue, Dec 18, 2018 at 12:02 AM Christoph Hellwig <hch@infradead.org> wrote:
>
> On Fri, Nov 30, 2018 at 01:32:07PM +0530, Anup Patel wrote:
> > This patch provides irq_set_affinity() implementation for PLIC driver.
> > It also updates irq_enable() such that PLIC interrupts are only enabled
> > for one of CPUs specified in IRQ affinity mask.
>
> But normally our affinity masks are that - masks of CPUs that can take
> it.  It seems a bit odd to then just pick the first one, as this means
> with default all-CPU masks we'll have all interrupts handled by the
> first CPU only.

Yes, affinity mask are CPUs which can take but there is also effective
affinity mask which represent CPUs which will actually receive IRQ.

Interrupt controllers (unlike PLIC) can support hardware IRQ balancing.
For such interrupt controllers, we inform all CPUs that can take IRQ but
interrupt controller will only deliver IRQ to only one of the CPUs.

There are quite a few interrupt controllers which only allow IRQ to
be taken by exactly one CPU. For such interrupt controllers, the
interrupt controller driver has to to pick one CPU out of CPUs which
can take IRQ (Example GICv2, GICv3, etc).

>
>
> > --- a/drivers/irqchip/irq-sifive-plic.c
> > +++ b/drivers/irqchip/irq-sifive-plic.c
> > @@ -106,14 +106,42 @@ static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
> >
> >  static void plic_irq_enable(struct irq_data *d)
> >  {
> > -     plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
> > +     unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> > +                                        cpu_online_mask);
> > +     WARN_ON(cpu >= nr_cpu_ids);
>
> I think this should be WARN_ON_ONCE and actually return instead of then
> proceeding using the invalid cpu index.

Sure, will update.

>
> > +#ifdef CONFIG_SMP
> static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
> > +                         bool force)
> > +{
> > +     unsigned int cpu;
> > +
> > +     if (!force)
> > +             cpu = cpumask_any_and(mask_val, cpu_online_mask);
> > +     else
> > +             cpu = cpumask_first(mask_val);
>
> maybe swap the two branches around to avoid the inversion of the force
> flag?

Sure, will update.

Regards,
Anup
diff mbox series

Patch

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 17269622be21..c5bbb12e74e0 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -106,14 +106,42 @@  static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
 
 static void plic_irq_enable(struct irq_data *d)
 {
-	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
+	unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+					   cpu_online_mask);
+	WARN_ON(cpu >= nr_cpu_ids);
+	plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 }
 
 static void plic_irq_disable(struct irq_data *d)
 {
-	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
+	plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
 }
 
+#ifdef CONFIG_SMP
+static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
+{
+	unsigned int cpu;
+
+	if (!force)
+		cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	else
+		cpu = cpumask_first(mask_val);
+
+	if (cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	if (!irqd_irq_disabled(d)) {
+		plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+		plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+	}
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
 static struct irq_chip plic_chip = {
 	.name		= "SiFive PLIC",
 	/*
@@ -122,6 +150,9 @@  static struct irq_chip plic_chip = {
 	 */
 	.irq_enable	= plic_irq_enable,
 	.irq_disable	= plic_irq_disable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = plic_set_affinity,
+#endif
 };
 
 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,