From a29af79e4ecf404bbd084b7b39cf139b963982ea Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Tue, 15 Apr 2008 10:19:58 +0100 Subject: [PATCH] Change tasklet implementation so that tasklet_kill() does not have to busy-wait for softirq work to start. This reduces the possibility of deadlocks, but the implementation is now less efficient. For the current few users of tasklets this does not matter. Signed-off-by: Keir Fraser --- xen/common/softirq.c | 61 +++++++++++++++++++++++---------------- xen/include/xen/softirq.h | 4 +-- 2 files changed, 38 insertions(+), 27 deletions(-) diff --git a/xen/common/softirq.c b/xen/common/softirq.c index c3618eb7ca..1888d17112 100644 --- a/xen/common/softirq.c +++ b/xen/common/softirq.c @@ -52,65 +52,76 @@ void open_softirq(int nr, softirq_handler handler) softirq_handlers[nr] = handler; } -static DEFINE_PER_CPU(struct tasklet *, tasklet_list); +static LIST_HEAD(tasklet_list); +static DEFINE_SPINLOCK(tasklet_lock); void tasklet_schedule(struct tasklet *t) { unsigned long flags; - if ( test_and_set_bool(t->is_scheduled) ) - return; + spin_lock_irqsave(&tasklet_lock, flags); - local_irq_save(flags); - t->next = this_cpu(tasklet_list); - this_cpu(tasklet_list) = t; - local_irq_restore(flags); + if ( !t->is_scheduled ) + { + list_add(&t->list, &tasklet_list); + t->is_scheduled = 1; + } + + spin_unlock_irqrestore(&tasklet_lock, flags); raise_softirq(TASKLET_SOFTIRQ); } static void tasklet_action(void) { - struct tasklet *list, *t; + struct tasklet *t; - local_irq_disable(); - list = this_cpu(tasklet_list); - this_cpu(tasklet_list) = NULL; - local_irq_enable(); + spin_lock_irq(&tasklet_lock); - while ( (t = list) != NULL ) + while ( !list_empty(&tasklet_list) ) { - list = list->next; - - BUG_ON(t->is_running); - t->is_running = 1; - smp_wmb(); + t = list_entry(tasklet_list.next, struct tasklet, list); + list_del(&t->list); BUG_ON(!t->is_scheduled); t->is_scheduled = 0; - smp_mb(); + BUG_ON(t->is_running); + t->is_running = 1; + + spin_unlock_irq(&tasklet_lock); t->func(t->data); - smp_mb(); + spin_lock_irq(&tasklet_lock); t->is_running = 0; } + + spin_unlock_irq(&tasklet_lock); } void tasklet_kill(struct tasklet *t) { - /* Prevent tasklet from re-scheduling itself. */ - while ( t->is_scheduled || test_and_set_bool(t->is_scheduled) ) - cpu_relax(); - smp_mb(); + unsigned long flags; + + spin_lock_irqsave(&tasklet_lock, flags); + + /* De-schedule the tasklet and prevent it from re-scheduling itself. */ + if ( !list_empty(&t->list) ) + list_del(&t->list); + t->is_scheduled = 1; /* Wait for tasklet to complete. */ while ( t->is_running ) + { + spin_unlock_irqrestore(&tasklet_lock, flags); cpu_relax(); - smp_mb(); + spin_lock_irqsave(&tasklet_lock, flags); + } /* Clean up and we're done. */ t->is_scheduled = 0; + + spin_unlock_irqrestore(&tasklet_lock, flags); } void tasklet_init( diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h index dbf4606261..4fa7a39b0c 100644 --- a/xen/include/xen/softirq.h +++ b/xen/include/xen/softirq.h @@ -59,7 +59,7 @@ static inline void raise_softirq(unsigned int nr) */ struct tasklet { - struct tasklet *next; + struct list_head list; bool_t is_scheduled; bool_t is_running; void (*func)(unsigned long); @@ -67,7 +67,7 @@ struct tasklet }; #define DECLARE_TASKLET(name, func, data) \ - struct tasklet name = { NULL, 0, 0, func, data } + struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, func, data } void tasklet_schedule(struct tasklet *t); void tasklet_kill(struct tasklet *t); -- 2.39.5