@@ -26,7 +26,6 @@ static bool_t tasklets_initialised;
DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
static DEFINE_PER_CPU(struct list_head, tasklet_list);
-static DEFINE_PER_CPU(struct list_head, softirq_tasklet_list);
/* Protects all lists and tasklet structures. */
static DEFINE_SPINLOCK(tasklet_lock);
@@ -56,7 +55,7 @@ static void percpu_tasklet_feed(void *arg)
dst_list = &__get_cpu_var(softirq_list);
list_add_tail(&t->list, dst_list);
}
- raise_softirq(TASKLET_SOFTIRQ_PERCPU);
+ raise_softirq(TASKLET_SOFTIRQ);
out:
spin_unlock_irqrestore(&feeder_lock, flags);
}
@@ -89,18 +88,14 @@ static void tasklet_enqueue(struct tasklet *t)
list = &__get_cpu_var(softirq_list);
list_add_tail(&t->list, list);
- raise_softirq(TASKLET_SOFTIRQ_PERCPU);
+ raise_softirq(TASKLET_SOFTIRQ);
local_irq_restore(flags);
return;
}
if ( t->is_softirq )
{
- struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
- bool_t was_empty = list_empty(list);
- list_add_tail(&t->list, list);
- if ( was_empty )
- cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ BUG();
}
else
{
@@ -218,7 +213,7 @@ void do_tasklet_work_percpu(void)
t->func(t->data);
tasklet_unlock(t);
if ( poke )
- raise_softirq(TASKLET_SOFTIRQ_PERCPU);
+ raise_softirq(TASKLET_SOFTIRQ);
/* We could reinit the t->list but tasklet_enqueue does it for us. */
return;
}
@@ -228,7 +223,7 @@ void do_tasklet_work_percpu(void)
INIT_LIST_HEAD(&t->list);
list_add_tail(&t->list, &__get_cpu_var(softirq_list));
smp_wmb();
- raise_softirq(TASKLET_SOFTIRQ_PERCPU);
+ raise_softirq(TASKLET_SOFTIRQ);
local_irq_enable();
}
@@ -259,24 +254,9 @@ void do_tasklet(void)
spin_unlock_irq(&tasklet_lock);
}
-/* Softirq context work */
-static void tasklet_softirq_action(void)
-{
- unsigned int cpu = smp_processor_id();
- struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
-
- spin_lock_irq(&tasklet_lock);
-
- do_tasklet_work(cpu, list);
-
- if ( !list_empty(list) && !cpu_is_offline(cpu) )
- raise_softirq(TASKLET_SOFTIRQ);
-
- spin_unlock_irq(&tasklet_lock);
-}
/* Per CPU softirq context work. */
-static void tasklet_softirq_percpu_action(void)
+static void tasklet_softirq_action(void)
{
do_tasklet_work_percpu();
}
@@ -365,14 +345,12 @@ static int cpu_callback(
{
case CPU_UP_PREPARE:
INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
- INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu));
INIT_LIST_HEAD(&per_cpu(softirq_list, cpu));
INIT_LIST_HEAD(&per_cpu(tasklet_feeder, cpu));
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_list, cpu));
- migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_tasklet_list, cpu));
migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_list, cpu));
migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_feeder, cpu));
break;
@@ -394,7 +372,6 @@ void __init tasklet_subsys_init(void)
cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
register_cpu_notifier(&cpu_nfb);
open_softirq(TASKLET_SOFTIRQ, tasklet_softirq_action);
- open_softirq(TASKLET_SOFTIRQ_PERCPU, tasklet_softirq_percpu_action);
tasklets_initialised = 1;
}
@@ -7,7 +7,6 @@ enum {
SCHEDULE_SOFTIRQ,
NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
RCU_SOFTIRQ,
- TASKLET_SOFTIRQ_PERCPU,
TASKLET_SOFTIRQ,
NR_COMMON_SOFTIRQS
};
@@ -34,7 +34,7 @@ struct tasklet
#define DECLARE_TASKLET(name, func, data) \
_DECLARE_TASKLET(name, func, data, 0, 0)
#define DECLARE_SOFTIRQ_TASKLET(name, func, data) \
- _DECLARE_TASKLET(name, func, data, 1, 0)
+ _DECLARE_TASKLET(name, func, data, 1, 1)
/* Indicates status of tasklet work on each CPU. */
DECLARE_PER_CPU(unsigned long, tasklet_work_to_do);
With the new percpu tasklet (see "tasklet: Introduce per-cpu tasklet." and "tasklet: Add cross CPU feeding of per-cpu-tasklets") we have now in a place a working version of per-cpu softirq tasklets. We can now remove the old implementation of the softirq tasklet. We also remove the temporary scaffolding of TASKLET_SOFTIRQ_PERCPU. Further removal of code will be done in "tasklet: Remove the old scaffolding" once the schedule tasklet code is in. This could be squashed in "tasklet: Introduce per-cpu tasklet for softirq." but the author thought it would be an easier aid in understanding the code with these parts split out. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> --- RFC: First version v1: Posted, folks asked if ticketlocks fixed it. v2: Intel confirmed at XPDS 2016 that the problem is still present with large guests. Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: "Lan, Tianyu" <tianyu.lan@intel.com> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Jun Nakajima <jun.nakajima@intel.com> Cc: George Dunlap <George.Dunlap@eu.citrix.com> Cc: Ian Jackson <ian.jackson@eu.citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Tim Deegan <tim@xen.org> Cc: Wei Liu <wei.liu2@citrix.com> --- xen/common/tasklet.c | 35 ++++++----------------------------- xen/include/xen/softirq.h | 1 - xen/include/xen/tasklet.h | 2 +- 3 files changed, 7 insertions(+), 31 deletions(-)