@@ -49,7 +49,6 @@ static void percpu_tasklet_feed(void *arg)
while ( !list_empty(list) )
{
t = list_entry(list->next, struct tasklet, list);
- BUG_ON(!t->is_percpu);
list_del(&t->list);
if ( t->is_softirq )
@@ -76,59 +75,44 @@ out:
static void tasklet_enqueue(struct tasklet *t)
{
unsigned int cpu = t->scheduled_on;
+ unsigned long flags;
+ struct list_head *list;
- if ( t->is_percpu )
- {
- unsigned long flags;
- struct list_head *list;
-
- INIT_LIST_HEAD(&t->list);
-
- if ( cpu != smp_processor_id() )
- {
- spin_lock_irqsave(&feeder_lock, flags);
-
- list = &per_cpu(tasklet_feeder, cpu);
- list_add_tail(&t->list, list);
-
- spin_unlock_irqrestore(&feeder_lock, flags);
- on_selected_cpus(cpumask_of(cpu), percpu_tasklet_feed, NULL, 1);
- return;
- }
- if ( t->is_softirq )
- {
-
- local_irq_save(flags);
-
- list = &__get_cpu_var(softirq_list);
- list_add_tail(&t->list, list);
- raise_softirq(TASKLET_SOFTIRQ);
+ INIT_LIST_HEAD(&t->list);
- local_irq_restore(flags);
- return;
- }
- else
- {
- unsigned long *work_to_do = &__get_cpu_var(tasklet_work_to_do);
+ if ( cpu != smp_processor_id() )
+ {
+ spin_lock_irqsave(&feeder_lock, flags);
- local_irq_save(flags);
+ list = &per_cpu(tasklet_feeder, cpu);
+ list_add_tail(&t->list, list);
- list = &__get_cpu_var(tasklet_list);
- list_add_tail(&t->list, list);
- if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
- raise_softirq(SCHEDULE_SOFTIRQ);
+ spin_unlock_irqrestore(&feeder_lock, flags);
+ on_selected_cpus(cpumask_of(cpu), percpu_tasklet_feed, NULL, 1);
+ return;
+ }
+ if ( t->is_softirq )
+ {
+ local_irq_save(flags);
+
+ list = &__get_cpu_var(softirq_list);
+ list_add_tail(&t->list, list);
+ raise_softirq(TASKLET_SOFTIRQ);
+
+ local_irq_restore(flags);
+ }
+ else
+ {
+ unsigned long *work_to_do = &__get_cpu_var(tasklet_work_to_do);
+
+ local_irq_save(flags);
+
+ list = &__get_cpu_var(tasklet_list);
+ list_add_tail(&t->list, list);
+ if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
+ raise_softirq(SCHEDULE_SOFTIRQ);
- local_irq_restore(flags);
- return;
- }
- }
- if ( t->is_softirq )
- {
- BUG();
- }
- else
- {
- BUG();
+ local_irq_restore(flags);
}
}
@@ -137,16 +121,11 @@ void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
if ( !tasklets_initialised || t->is_dead )
return;
- if ( t->is_percpu )
+ if ( !test_and_set_bit(TASKLET_STATE_SCHED, &t->state) )
{
- if ( !test_and_set_bit(TASKLET_STATE_SCHED, &t->state) )
- {
- t->scheduled_on = cpu;
- tasklet_enqueue(t);
- }
- return;
+ t->scheduled_on = cpu;
+ tasklet_enqueue(t);
}
- BUG();
}
void tasklet_schedule(struct tasklet *t)
@@ -306,19 +285,15 @@ static void tasklet_softirq_action(void)
void tasklet_kill(struct tasklet *t)
{
- if ( t->is_percpu )
+ while ( test_and_set_bit(TASKLET_STATE_SCHED, &t->state) )
{
- while ( test_and_set_bit(TASKLET_STATE_SCHED, &t->state) )
- {
- do {
+ do {
process_pending_softirqs();
- } while ( test_bit(TASKLET_STATE_SCHED, &t->state) );
- }
- tasklet_unlock_wait(t);
- clear_bit(TASKLET_STATE_SCHED, &t->state);
- t->is_dead = 1;
- return;
+ } while ( test_bit(TASKLET_STATE_SCHED, &t->state) );
}
+ tasklet_unlock_wait(t);
+ clear_bit(TASKLET_STATE_SCHED, &t->state);
+ t->is_dead = 1;
}
static void migrate_tasklets_from_cpu(unsigned int cpu, struct list_head *list)
@@ -348,7 +323,6 @@ void tasklet_init(
t->scheduled_on = -1;
t->func = func;
t->data = data;
- t->is_percpu = 1;
}
void softirq_tasklet_init(
@@ -22,19 +22,18 @@ struct tasklet
bool_t is_softirq;
bool_t is_running;
bool_t is_dead;
- bool_t is_percpu;
void (*func)(unsigned long);
unsigned long data;
};
-#define _DECLARE_TASKLET(name, func, data, softirq, percpu) \
+#define _DECLARE_TASKLET(name, func, data, softirq) \
struct tasklet name = { \
- LIST_HEAD_INIT(name.list), 0, -1, softirq, 0, 0, percpu, \
+ LIST_HEAD_INIT(name.list), 0, -1, softirq, 0, 0, \
func, data }
#define DECLARE_TASKLET(name, func, data) \
- _DECLARE_TASKLET(name, func, data, 0, 0)
+ _DECLARE_TASKLET(name, func, data, 0)
#define DECLARE_SOFTIRQ_TASKLET(name, func, data) \
- _DECLARE_TASKLET(name, func, data, 1, 1)
+ _DECLARE_TASKLET(name, func, data, 1)
/* Indicates status of tasklet work on each CPU. */
DECLARE_PER_CPU(unsigned long, tasklet_work_to_do);
To catch any bisection issues, we had been replacing parts of the tasklet code one functionality on top of each other. Now that all of it is per-cpu and working we can remove the old scaffolding and collapse functions. We also remove the 'is_percpu' flag that is not needed anymore. Most of this is code deletion and code motion. No new functionality is added. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> --- RFC: First version v1: Posted, folks asked if ticketlocks fixed it. v2: Intel confirmed at XPDS 2016 that the problem is still present with large guests. Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: "Lan, Tianyu" <tianyu.lan@intel.com> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Jun Nakajima <jun.nakajima@intel.com> Cc: George Dunlap <George.Dunlap@eu.citrix.com> Cc: Ian Jackson <ian.jackson@eu.citrix.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Tim Deegan <tim@xen.org> Cc: Wei Liu <wei.liu2@citrix.com> --- xen/common/tasklet.c | 110 ++++++++++++++++++---------------------------- xen/include/xen/tasklet.h | 9 ++-- 2 files changed, 46 insertions(+), 73 deletions(-)