]> xenbits.xensource.com Git - xen.git/commitdiff
Change tasklet implementation so that tasklet_kill() does not have to
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 15 Apr 2008 09:19:58 +0000 (10:19 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 15 Apr 2008 09:19:58 +0000 (10:19 +0100)
busy-wait for softirq work to start. This reduces the possibility of
deadlocks, but the implementation is now less efficient. For the
current few users of tasklets this does not matter.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/common/softirq.c
xen/include/xen/softirq.h

index c3618eb7caade0d7d526b7e1f0945a3557a110d7..1888d1711280fe5604725299ba8b63a07a3df5b4 100644 (file)
@@ -52,65 +52,76 @@ void open_softirq(int nr, softirq_handler handler)
     softirq_handlers[nr] = handler;
 }
 
-static DEFINE_PER_CPU(struct tasklet *, tasklet_list);
+static LIST_HEAD(tasklet_list);
+static DEFINE_SPINLOCK(tasklet_lock);
 
 void tasklet_schedule(struct tasklet *t)
 {
     unsigned long flags;
 
-    if ( test_and_set_bool(t->is_scheduled) )
-        return;
+    spin_lock_irqsave(&tasklet_lock, flags);
 
-    local_irq_save(flags);
-    t->next = this_cpu(tasklet_list);
-    this_cpu(tasklet_list) = t;
-    local_irq_restore(flags);
+    if ( !t->is_scheduled )
+    {
+        list_add(&t->list, &tasklet_list);
+        t->is_scheduled = 1;
+    }
+
+    spin_unlock_irqrestore(&tasklet_lock, flags);
 
     raise_softirq(TASKLET_SOFTIRQ);
 }
 
 static void tasklet_action(void)
 {
-    struct tasklet *list, *t;
+    struct tasklet *t;
 
-    local_irq_disable();
-    list = this_cpu(tasklet_list);
-    this_cpu(tasklet_list) = NULL;
-    local_irq_enable();
+    spin_lock_irq(&tasklet_lock);
 
-    while ( (t = list) != NULL )
+    while ( !list_empty(&tasklet_list) )
     {
-        list = list->next;
-
-        BUG_ON(t->is_running);
-        t->is_running = 1;
-        smp_wmb();
+        t = list_entry(tasklet_list.next, struct tasklet, list);
+        list_del(&t->list);
 
         BUG_ON(!t->is_scheduled);
         t->is_scheduled = 0;
 
-        smp_mb();
+        BUG_ON(t->is_running);
+        t->is_running = 1;
+
+        spin_unlock_irq(&tasklet_lock);
         t->func(t->data);
-        smp_mb();
+        spin_lock_irq(&tasklet_lock);
 
         t->is_running = 0;
     }
+
+    spin_unlock_irq(&tasklet_lock);
 }
 
 void tasklet_kill(struct tasklet *t)
 {
-    /* Prevent tasklet from re-scheduling itself. */
-    while ( t->is_scheduled || test_and_set_bool(t->is_scheduled) )
-        cpu_relax();
-    smp_mb();
+    unsigned long flags;
+
+    spin_lock_irqsave(&tasklet_lock, flags);
+
+    /* De-schedule the tasklet and prevent it from re-scheduling itself. */
+    if ( !list_empty(&t->list) )
+        list_del(&t->list);
+    t->is_scheduled = 1;
 
     /* Wait for tasklet to complete. */
     while ( t->is_running )
+    {
+        spin_unlock_irqrestore(&tasklet_lock, flags);
         cpu_relax();
-    smp_mb();
+        spin_lock_irqsave(&tasklet_lock, flags);
+    }
 
     /* Clean up and we're done. */
     t->is_scheduled = 0;
+
+    spin_unlock_irqrestore(&tasklet_lock, flags);
 }
 
 void tasklet_init(
index dbf460626135e8866adbc30d24fec5186eb24c9d..4fa7a39b0cbe713626fb00f2c96f355e125e4975 100644 (file)
@@ -59,7 +59,7 @@ static inline void raise_softirq(unsigned int nr)
  */
 struct tasklet
 {
-    struct tasklet *next;
+    struct list_head list;
     bool_t is_scheduled;
     bool_t is_running;
     void (*func)(unsigned long);
@@ -67,7 +67,7 @@ struct tasklet
 };
 
 #define DECLARE_TASKLET(name, func, data) \
-    struct tasklet name = { NULL, 0, 0, func, data }
+    struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, func, data }
 
 void tasklet_schedule(struct tasklet *t);
 void tasklet_kill(struct tasklet *t);