diff mbox series

[v2,4/4] irqchip: sifive-plic: Implement irq_set_affinity() for SMP host

Message ID 20181127100317.12809-5-anup@brainfault.org (mailing list archive)
State New, archived
Headers show
Series IRQ affinity support in PLIC driver | expand

Commit Message

Anup Patel Nov. 27, 2018, 10:03 a.m. UTC
Currently on SMP host, all CPUs take external interrupts routed via
PLIC. All CPUs will try to claim a given external interrupt but only
one of them will succeed while other CPUs would simply resume whatever
they were doing before. This means if we have N CPUs then for every
external interrupt N-1 CPUs will always fail to claim it and waste
their CPU time.

Instead of above, external interrupts should be taken by only one CPU
and we should have provision to explicity specify IRQ affinity from
kernel-space or user-space.

This patch provides irq_set_affinity() implementation for PLIC driver.
It also updates irq_enable() such that PLIC interrupts are only enabled
for one of CPUs specified in IRQ affinity mask.

With this patch in-place, we can change IRQ affinity at any-time from
user-space using procfs.

Example:

/ # cat /proc/interrupts
           CPU0       CPU1       CPU2       CPU3
  8:         44          0          0          0  SiFive PLIC   8  virtio0
 10:         48          0          0          0  SiFive PLIC  10  ttyS0
IPI0:        55        663         58        363  Rescheduling interrupts
IPI1:         0          1          3         16  Function call interrupts
/ #
/ #
/ # echo 4 > /proc/irq/10/smp_affinity
/ #
/ # cat /proc/interrupts
           CPU0       CPU1       CPU2       CPU3
  8:         45          0          0          0  SiFive PLIC   8  virtio0
 10:        160          0         17          0  SiFive PLIC  10  ttyS0
IPI0:        68        693         77        410  Rescheduling interrupts
IPI1:         0          2          3         16  Function call interrupts

Signed-off-by: Anup Patel <anup@brainfault.org>
---
 drivers/irqchip/irq-sifive-plic.c | 35 +++++++++++++++++++++++++++++--
 1 file changed, 33 insertions(+), 2 deletions(-)

Comments

Atish Patra Nov. 30, 2018, 5:59 a.m. UTC | #1
On 11/27/18 2:04 AM, Anup Patel wrote:
> Currently on SMP host, all CPUs take external interrupts routed via
> PLIC. All CPUs will try to claim a given external interrupt but only
> one of them will succeed while other CPUs would simply resume whatever
> they were doing before. This means if we have N CPUs then for every
> external interrupt N-1 CPUs will always fail to claim it and waste
> their CPU time.
> 
> Instead of above, external interrupts should be taken by only one CPU
> and we should have provision to explicity specify IRQ affinity from
s/explicity/explicitly

> kernel-space or user-space.
> 
> This patch provides irq_set_affinity() implementation for PLIC driver.
> It also updates irq_enable() such that PLIC interrupts are only enabled
> for one of CPUs specified in IRQ affinity mask.
> 
> With this patch in-place, we can change IRQ affinity at any-time from
> user-space using procfs.
> 
> Example:
> 
> / # cat /proc/interrupts
>             CPU0       CPU1       CPU2       CPU3
>    8:         44          0          0          0  SiFive PLIC   8  virtio0
>   10:         48          0          0          0  SiFive PLIC  10  ttyS0
> IPI0:        55        663         58        363  Rescheduling interrupts
> IPI1:         0          1          3         16  Function call interrupts
> / #
> / #
> / # echo 4 > /proc/irq/10/smp_affinity
> / #
> / # cat /proc/interrupts
>             CPU0       CPU1       CPU2       CPU3
>    8:         45          0          0          0  SiFive PLIC   8  virtio0
>   10:        160          0         17          0  SiFive PLIC  10  ttyS0
> IPI0:        68        693         77        410  Rescheduling interrupts
> IPI1:         0          2          3         16  Function call interrupts
> 
> Signed-off-by: Anup Patel <anup@brainfault.org>
> ---
>   drivers/irqchip/irq-sifive-plic.c | 35 +++++++++++++++++++++++++++++--
>   1 file changed, 33 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
> index ffd4deaca057..fec7da3797fa 100644
> --- a/drivers/irqchip/irq-sifive-plic.c
> +++ b/drivers/irqchip/irq-sifive-plic.c
> @@ -98,14 +98,42 @@ static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
>   
>   static void plic_irq_enable(struct irq_data *d)
>   {
> -	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
> +	unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> +					   cpu_online_mask);
> +	WARN_ON(cpu >= nr_cpu_ids);
> +	plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
>   }
>   
>   static void plic_irq_disable(struct irq_data *d)
>   {
> -	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
> +	plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
>   }
>   
> +#ifdef CONFIG_SMP
> +static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
> +			    bool force)
> +{
> +	unsigned int cpu;
> +
> +	if (!force)
> +		cpu = cpumask_any_and(mask_val, cpu_online_mask);
> +	else
> +		cpu = cpumask_first(mask_val);
> +
> +	if (cpu >= nr_cpu_ids)
> +		return -EINVAL;
> +
> +	if (!irqd_irq_disabled(d)) {
> +		plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> +		plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);

irq is disabled for a fraction of time for cpu as well.
You can use cpumask_andnot to avoid that.


Moreover, something is weird here. I tested the patch in Unleashed with 
a debug statement.

Here are the cpumask plic_set_affinity receives.

# echo 0 > /proc[  280.810000] plic: plic_set_affinity: set affinity [0-1]
[  280.810000] plic: plic_set_affinity: cpu = [0] irq = 4
# echo 1 > /proc[  286.290000] plic: plic_set_affinity: set affinity [0]
[  286.290000] plic: plic_set_affinity: cpu = [0] irq = 4
# echo 2 > /proc[  292.130000] plic: plic_set_affinity: set affinity [1]
[  292.130000] plic: plic_set_affinity: cpu = [1] irq = 4
# echo 3 > /proc[  297.750000] plic: plic_set_affinity: set affinity [0-1]
[  297.750000] plic: plic_set_affinity: cpu = [0] irq = 4

# echo 2 > /proc/irq/4/smp_affinity
[  322.850000] plic: plic_set_affinity: set affinity [1]
[  322.850000] plic: plic_set_affinity: cpu = [1] irq = 4

I have not figured out why it receive cpu mask for 0 & 3.
Not sure if logical cpu id to hart id mapping is responsible for other 
two case. I will continue to test tomorrow.

Regards,
Atish
> +	}
> +

> +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
> +
> +	return IRQ_SET_MASK_OK_DONE;
> +}
> +#endif
> +
>   static struct irq_chip plic_chip = {
>   	.name		= "SiFive PLIC",
>   	/*
> @@ -114,6 +142,9 @@ static struct irq_chip plic_chip = {
>   	 */
>   	.irq_enable	= plic_irq_enable,
>   	.irq_disable	= plic_irq_disable,
> +#ifdef CONFIG_SMP
> +	.irq_set_affinity = plic_set_affinity,
> +#endif
>   };
>   
>   static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
>
Anup Patel Nov. 30, 2018, 7:51 a.m. UTC | #2
On Fri, Nov 30, 2018 at 11:29 AM Atish Patra <atish.patra@wdc.com> wrote:
>
> On 11/27/18 2:04 AM, Anup Patel wrote:
> > Currently on SMP host, all CPUs take external interrupts routed via
> > PLIC. All CPUs will try to claim a given external interrupt but only
> > one of them will succeed while other CPUs would simply resume whatever
> > they were doing before. This means if we have N CPUs then for every
> > external interrupt N-1 CPUs will always fail to claim it and waste
> > their CPU time.
> >
> > Instead of above, external interrupts should be taken by only one CPU
> > and we should have provision to explicity specify IRQ affinity from
> s/explicity/explicitly

Sure, I will update it.

>
> > kernel-space or user-space.
> >
> > This patch provides irq_set_affinity() implementation for PLIC driver.
> > It also updates irq_enable() such that PLIC interrupts are only enabled
> > for one of CPUs specified in IRQ affinity mask.
> >
> > With this patch in-place, we can change IRQ affinity at any-time from
> > user-space using procfs.
> >
> > Example:
> >
> > / # cat /proc/interrupts
> >             CPU0       CPU1       CPU2       CPU3
> >    8:         44          0          0          0  SiFive PLIC   8  virtio0
> >   10:         48          0          0          0  SiFive PLIC  10  ttyS0
> > IPI0:        55        663         58        363  Rescheduling interrupts
> > IPI1:         0          1          3         16  Function call interrupts
> > / #
> > / #
> > / # echo 4 > /proc/irq/10/smp_affinity
> > / #
> > / # cat /proc/interrupts
> >             CPU0       CPU1       CPU2       CPU3
> >    8:         45          0          0          0  SiFive PLIC   8  virtio0
> >   10:        160          0         17          0  SiFive PLIC  10  ttyS0
> > IPI0:        68        693         77        410  Rescheduling interrupts
> > IPI1:         0          2          3         16  Function call interrupts
> >
> > Signed-off-by: Anup Patel <anup@brainfault.org>
> > ---
> >   drivers/irqchip/irq-sifive-plic.c | 35 +++++++++++++++++++++++++++++--
> >   1 file changed, 33 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
> > index ffd4deaca057..fec7da3797fa 100644
> > --- a/drivers/irqchip/irq-sifive-plic.c
> > +++ b/drivers/irqchip/irq-sifive-plic.c
> > @@ -98,14 +98,42 @@ static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
> >
> >   static void plic_irq_enable(struct irq_data *d)
> >   {
> > -     plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
> > +     unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> > +                                        cpu_online_mask);
> > +     WARN_ON(cpu >= nr_cpu_ids);
> > +     plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> >   }
> >
> >   static void plic_irq_disable(struct irq_data *d)
> >   {
> > -     plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
> > +     plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> >   }
> >
> > +#ifdef CONFIG_SMP
> > +static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
> > +                         bool force)
> > +{
> > +     unsigned int cpu;
> > +
> > +     if (!force)
> > +             cpu = cpumask_any_and(mask_val, cpu_online_mask);
> > +     else
> > +             cpu = cpumask_first(mask_val);
> > +
> > +     if (cpu >= nr_cpu_ids)
> > +             return -EINVAL;
> > +
> > +     if (!irqd_irq_disabled(d)) {
> > +             plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> > +             plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
>
> irq is disabled for a fraction of time for cpu as well.
> You can use cpumask_andnot to avoid that.
>
>
> Moreover, something is weird here. I tested the patch in Unleashed with
> a debug statement.
>
> Here are the cpumask plic_set_affinity receives.

The smp_affinity in procfs takes hex values as input.

1 = CPU0
2 = CPU1
3 = CPU0-1
4 = CPU2
... and so on ...

>
> # echo 0 > /proc[  280.810000] plic: plic_set_affinity: set affinity [0-1]
> [  280.810000] plic: plic_set_affinity: cpu = [0] irq = 4

OK, this is strange.

> # echo 1 > /proc[  286.290000] plic: plic_set_affinity: set affinity [0]
> [  286.290000] plic: plic_set_affinity: cpu = [0] irq = 4

This is correct.

> # echo 2 > /proc[  292.130000] plic: plic_set_affinity: set affinity [1]
> [  292.130000] plic: plic_set_affinity: cpu = [1] irq = 4

This is correct.

> # echo 3 > /proc[  297.750000] plic: plic_set_affinity: set affinity [0-1]
> [  297.750000] plic: plic_set_affinity: cpu = [0] irq = 4

This is correct.

>
> # echo 2 > /proc/irq/4/smp_affinity
> [  322.850000] plic: plic_set_affinity: set affinity [1]
> [  322.850000] plic: plic_set_affinity: cpu = [1] irq = 4

This is correct.

>
> I have not figured out why it receive cpu mask for 0 & 3.
> Not sure if logical cpu id to hart id mapping is responsible for other
> two case. I will continue to test tomorrow.

Except value '0', all cases are correct.

Regards,
Anup
Atish Patra Nov. 30, 2018, 7:54 a.m. UTC | #3
On 11/29/18 9:59 PM, Atish Patra wrote:
> On 11/27/18 2:04 AM, Anup Patel wrote:
>> Currently on SMP host, all CPUs take external interrupts routed via
>> PLIC. All CPUs will try to claim a given external interrupt but only
>> one of them will succeed while other CPUs would simply resume whatever
>> they were doing before. This means if we have N CPUs then for every
>> external interrupt N-1 CPUs will always fail to claim it and waste
>> their CPU time.
>>
>> Instead of above, external interrupts should be taken by only one CPU
>> and we should have provision to explicity specify IRQ affinity from
> s/explicity/explicitly
> 
>> kernel-space or user-space.
>>
>> This patch provides irq_set_affinity() implementation for PLIC driver.
>> It also updates irq_enable() such that PLIC interrupts are only enabled
>> for one of CPUs specified in IRQ affinity mask.
>>
>> With this patch in-place, we can change IRQ affinity at any-time from
>> user-space using procfs.
>>
>> Example:
>>
>> / # cat /proc/interrupts
>>              CPU0       CPU1       CPU2       CPU3
>>     8:         44          0          0          0  SiFive PLIC   8  virtio0
>>    10:         48          0          0          0  SiFive PLIC  10  ttyS0
>> IPI0:        55        663         58        363  Rescheduling interrupts
>> IPI1:         0          1          3         16  Function call interrupts
>> / #
>> / #
>> / # echo 4 > /proc/irq/10/smp_affinity
>> / #
>> / # cat /proc/interrupts
>>              CPU0       CPU1       CPU2       CPU3
>>     8:         45          0          0          0  SiFive PLIC   8  virtio0
>>    10:        160          0         17          0  SiFive PLIC  10  ttyS0
>> IPI0:        68        693         77        410  Rescheduling interrupts
>> IPI1:         0          2          3         16  Function call interrupts
>>
>> Signed-off-by: Anup Patel <anup@brainfault.org>
>> ---
>>    drivers/irqchip/irq-sifive-plic.c | 35 +++++++++++++++++++++++++++++--
>>    1 file changed, 33 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
>> index ffd4deaca057..fec7da3797fa 100644
>> --- a/drivers/irqchip/irq-sifive-plic.c
>> +++ b/drivers/irqchip/irq-sifive-plic.c
>> @@ -98,14 +98,42 @@ static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
>>    
>>    static void plic_irq_enable(struct irq_data *d)
>>    {
>> -	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
>> +	unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
>> +					   cpu_online_mask);
>> +	WARN_ON(cpu >= nr_cpu_ids);
>> +	plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
>>    }
>>    
>>    static void plic_irq_disable(struct irq_data *d)
>>    {
>> -	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
>> +	plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
>>    }
>>    
>> +#ifdef CONFIG_SMP
>> +static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
>> +			    bool force)
>> +{
>> +	unsigned int cpu;
>> +
>> +	if (!force)
>> +		cpu = cpumask_any_and(mask_val, cpu_online_mask);
>> +	else
>> +		cpu = cpumask_first(mask_val);
>> +
>> +	if (cpu >= nr_cpu_ids)
>> +		return -EINVAL;
>> +
>> +	if (!irqd_irq_disabled(d)) {
>> +		plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
>> +		plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> 
> irq is disabled for a fraction of time for cpu as well.
> You can use cpumask_andnot to avoid that.
> 
> 
> Moreover, something is weird here. I tested the patch in Unleashed with
> a debug statement.
> 
> Here are the cpumask plic_set_affinity receives.
> 
> # echo 0 > /proc[  280.810000] plic: plic_set_affinity: set affinity [0-1]
> [  280.810000] plic: plic_set_affinity: cpu = [0] irq = 4
> # echo 1 > /proc[  286.290000] plic: plic_set_affinity: set affinity [0]
> [  286.290000] plic: plic_set_affinity: cpu = [0] irq = 4
> # echo 2 > /proc[  292.130000] plic: plic_set_affinity: set affinity [1]
> [  292.130000] plic: plic_set_affinity: cpu = [1] irq = 4
> # echo 3 > /proc[  297.750000] plic: plic_set_affinity: set affinity [0-1]
> [  297.750000] plic: plic_set_affinity: cpu = [0] irq = 4
> 
> # echo 2 > /proc/irq/4/smp_affinity
> [  322.850000] plic: plic_set_affinity: set affinity [1]
> [  322.850000] plic: plic_set_affinity: cpu = [1] irq = 4
> 
> I have not figured out why it receive cpu mask for 0 & 3.
> Not sure if logical cpu id to hart id mapping is responsible for other
> two case. I will continue to test tomorrow.
> 

Never mind.
The input is in hex which explains the cpumask which explains all three 
cases except 0.
If we pass zero, it just passing the previous affinity mask.

Regards,
Atish
> Regards,
> Atish
>> +	}
>> +
> 
>> +	irq_data_update_effective_affinity(d, cpumask_of(cpu));
>> +
>> +	return IRQ_SET_MASK_OK_DONE;
>> +}
>> +#endif
>> +
>>    static struct irq_chip plic_chip = {
>>    	.name		= "SiFive PLIC",
>>    	/*
>> @@ -114,6 +142,9 @@ static struct irq_chip plic_chip = {
>>    	 */
>>    	.irq_enable	= plic_irq_enable,
>>    	.irq_disable	= plic_irq_disable,
>> +#ifdef CONFIG_SMP
>> +	.irq_set_affinity = plic_set_affinity,
>> +#endif
>>    };
>>    
>>    static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
>>
> 
>
diff mbox series

Patch

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index ffd4deaca057..fec7da3797fa 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -98,14 +98,42 @@  static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
 
 static void plic_irq_enable(struct irq_data *d)
 {
-	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
+	unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+					   cpu_online_mask);
+	WARN_ON(cpu >= nr_cpu_ids);
+	plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 }
 
 static void plic_irq_disable(struct irq_data *d)
 {
-	plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
+	plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
 }
 
+#ifdef CONFIG_SMP
+static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
+{
+	unsigned int cpu;
+
+	if (!force)
+		cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	else
+		cpu = cpumask_first(mask_val);
+
+	if (cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	if (!irqd_irq_disabled(d)) {
+		plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+		plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+	}
+
+	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+	return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
 static struct irq_chip plic_chip = {
 	.name		= "SiFive PLIC",
 	/*
@@ -114,6 +142,9 @@  static struct irq_chip plic_chip = {
 	 */
 	.irq_enable	= plic_irq_enable,
 	.irq_disable	= plic_irq_disable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = plic_set_affinity,
+#endif
 };
 
 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,