]> xenbits.xensource.com Git - xen.git/commitdiff
timers: Simplify implementation logic.
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 18 Aug 2010 13:56:01 +0000 (14:56 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 18 Aug 2010 13:56:01 +0000 (14:56 +0100)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/acpi/cpu_idle.c
xen/arch/x86/acpi/cpuidle_menu.c
xen/arch/x86/hpet.c
xen/arch/x86/time.c
xen/common/timer.c
xen/include/xen/timer.h

index 6c79d17f834296de9991e164c554fc7fe8b2af8a..cdf0f492b487e698240584e079399be4d06636d4 100644 (file)
@@ -252,7 +252,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mask)
 static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 {
     unsigned int cpu = smp_processor_id();
-    s_time_t expires = per_cpu(timer_deadline_start, cpu);
+    s_time_t expires = per_cpu(timer_deadline, cpu);
 
     __monitor((void *)&mwait_wakeup(cpu), 0, 0);
     smp_mb();
index 38a5543c00fc4f41e2f0123d121d963faabf9a72..69527766d6b04cc112ae5106586d4ea837da4f63 100644 (file)
@@ -173,7 +173,7 @@ static inline s_time_t avg_intr_interval_us(void)
 
 static unsigned int get_sleep_length_us(void)
 {
-    s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000;
+    s_time_t us = (this_cpu(timer_deadline) - NOW()) / 1000;
     /*
      * while us < 0 or us > (u32)-1, return a large u32,
      * choose (unsigned int)-2000 to avoid wrapping while added with exit
index addabc7e1ec8cfb01e3f3245be6501aa3f7f2d08..b26f677967eb46a147311201ae76d1eab7dd8e7d 100644 (file)
@@ -36,14 +36,14 @@ struct hpet_event_channel
     cpumask_t     cpumask;
     /*
      * cpumask_lock is used to prevent hpet intr handler from accessing other
-     * cpu's timer_deadline_start/end after the other cpu's mask was cleared --
-     * mask cleared means cpu waken up, then accessing timer_deadline_xxx from
+     * cpu's timer_deadline after the other cpu's mask was cleared --
+     * mask cleared means cpu waken up, then accessing timer_deadline from
      * other cpu is not safe.
      * It is not used for protecting cpumask, so set ops needn't take it.
      * Multiple cpus clear cpumask simultaneously is ok due to the atomic
      * feature of cpu_clear, so hpet_broadcast_exit() can take read lock for 
      * clearing cpumask, and handle_hpet_broadcast() have to take write lock 
-     * for read cpumask & access timer_deadline_xxx.
+     * for read cpumask & access timer_deadline.
      */
     rwlock_t      cpumask_lock;
     spinlock_t    lock;
@@ -212,10 +212,10 @@ again:
 
         if ( cpu_isset(cpu, ch->cpumask) )
         {
-            if ( per_cpu(timer_deadline_start, cpu) <= now )
+            if ( per_cpu(timer_deadline, cpu) <= now )
                 cpu_set(cpu, mask);
-            else if ( per_cpu(timer_deadline_end, cpu) < next_event )
-                next_event = per_cpu(timer_deadline_end, cpu);
+            else if ( per_cpu(timer_deadline, cpu) < next_event )
+                next_event = per_cpu(timer_deadline, cpu);
         }
 
         write_unlock_irq(&ch->cpumask_lock);
@@ -661,7 +661,7 @@ void hpet_broadcast_enter(void)
     int cpu = smp_processor_id();
     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
 
-    if ( this_cpu(timer_deadline_start) == 0 )
+    if ( this_cpu(timer_deadline) == 0 )
         return;
 
     if ( !ch )
@@ -682,8 +682,8 @@ void hpet_broadcast_enter(void)
 
     spin_lock(&ch->lock);
     /* reprogram if current cpu expire time is nearer */
-    if ( this_cpu(timer_deadline_end) < ch->next_event )
-        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
+    if ( this_cpu(timer_deadline) < ch->next_event )
+        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
     spin_unlock(&ch->lock);
 }
 
@@ -692,7 +692,7 @@ void hpet_broadcast_exit(void)
     int cpu = smp_processor_id();
     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
 
-    if ( this_cpu(timer_deadline_start) == 0 )
+    if ( this_cpu(timer_deadline) == 0 )
         return;
 
     if ( !ch )
@@ -700,7 +700,7 @@ void hpet_broadcast_exit(void)
 
     /* Reprogram the deadline; trigger timer work now if it has passed. */
     enable_APIC_timer();
-    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+    if ( !reprogram_timer(this_cpu(timer_deadline)) )
         raise_softirq(TIMER_SOFTIRQ);
 
     read_lock_irq(&ch->cpumask_lock);
index 8a08a53791f397a83a97ddeb1f0b728a1b80ee48..3fdcca9b732120fd20ed0a0ffc882e1c73698f73 100644 (file)
@@ -1488,7 +1488,7 @@ void pit_broadcast_exit(void)
     int cpu = smp_processor_id();
 
     if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
-        reprogram_timer(per_cpu(timer_deadline_start, cpu));
+        reprogram_timer(this_cpu(timer_deadline));
 }
 
 int pit_broadcast_is_available(void)
index 52283cfad07939c052c5c7ad0f2b7880209276dd..648f299dfd539ee9ba84d9196f750cfd0322b3fa 100644 (file)
 #include <asm/system.h>
 #include <asm/desc.h>
 
-/*
- * We pull handlers off the timer list this far in future,
- * rather than reprogramming the time hardware.
- */
+/* We program the time hardware this far behind the closest deadline. */
 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
 integer_param("timer_slop", timer_slop);
 
 struct timers {
     spinlock_t     lock;
-    bool_t         overflow;
     struct timer **heap;
     struct timer  *list;
     struct timer  *running;
@@ -43,8 +39,7 @@ static DEFINE_PER_CPU(struct timers, timers);
 
 static cpumask_t timer_valid_cpumask;
 
-DEFINE_PER_CPU(s_time_t, timer_deadline_start);
-DEFINE_PER_CPU(s_time_t, timer_deadline_end);
+DEFINE_PER_CPU(s_time_t, timer_deadline);
 
 /****************************************************************************
  * HEAP OPERATIONS.
@@ -210,7 +205,6 @@ static int add_entry(struct timer *t)
         return rc;
 
     /* Fall back to adding to the slower linked list. */
-    timers->overflow = 1;
     t->status = TIMER_STATUS_in_list;
     return add_to_list(&timers->list, t);
 }
@@ -311,7 +305,6 @@ void set_timer(struct timer *timer, s_time_t expires)
         deactivate_timer(timer);
 
     timer->expires = expires;
-    timer->expires_end = expires + timer_slop;
 
     activate_timer(timer);
 
@@ -427,13 +420,13 @@ static void timer_softirq_action(void)
 {
     struct timer  *t, **heap, *next;
     struct timers *ts;
-    s_time_t       now;
+    s_time_t       now, deadline;
 
     ts = &this_cpu(timers);
     heap = ts->heap;
 
     /* If we overflowed the heap, try to allocate a larger heap. */
-    if ( unlikely(ts->overflow) )
+    if ( unlikely(ts->list != NULL) )
     {
         /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
         int old_limit = GET_HEAP_LIMIT(heap);
@@ -481,46 +474,16 @@ static void timer_softirq_action(void)
         add_entry(t);
     }
 
-    ts->overflow = (ts->list != NULL);
-    if ( unlikely(ts->overflow) )
-    {
-        /* Find earliest deadline at head of list or top of heap. */
-        this_cpu(timer_deadline_start) = ts->list->expires;
-        if ( (GET_HEAP_SIZE(heap) != 0) &&
-             ((t = heap[1])->expires < this_cpu(timer_deadline_start)) )
-            this_cpu(timer_deadline_start) = t->expires;
-        this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start);
-    }
-    else
-    {
-        /*
-         * Find the earliest deadline that encompasses largest number of timers
-         * on the heap. To do this we take timers from the heap while their
-         * valid deadline ranges continue to intersect.
-         */
-        s_time_t start = 0, end = STIME_MAX;
-        struct timer **list_tail = &ts->list;
-
-        while ( (GET_HEAP_SIZE(heap) != 0) &&
-                ((t = heap[1])->expires <= end) )
-        {
-            remove_entry(t);
-
-            t->status = TIMER_STATUS_in_list;
-            t->list_next = NULL;
-            *list_tail = t;
-            list_tail = &t->list_next;
-
-            start = t->expires;
-            if ( end > t->expires_end )
-                end = t->expires_end;
-        }
-
-        this_cpu(timer_deadline_start) = start;
-        this_cpu(timer_deadline_end) = end;
-    }
+    /* Find earliest deadline from head of linked list and top of heap. */
+    deadline = STIME_MAX;
+    if ( GET_HEAP_SIZE(heap) != 0 )
+        deadline = heap[1]->expires;
+    if ( (ts->list != NULL) && (ts->list->expires < deadline) )
+        deadline = ts->list->expires;
+    this_cpu(timer_deadline) =
+        (deadline == STIME_MAX) ? 0 : deadline + timer_slop;
 
-    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+    if ( !reprogram_timer(this_cpu(timer_deadline)) )
         raise_softirq(TIMER_SOFTIRQ);
 
     spin_unlock_irq(&ts->lock);
index 53b3c3cc17e560d2156adbd81965655245147c1c..e8b80f248e53e083f05123c9a43c54ec28f85853 100644 (file)
@@ -16,7 +16,6 @@
 struct timer {
     /* System time expiry value (nanoseconds since boot). */
     s_time_t expires;
-    s_time_t expires_end;
 
     /* Position in active-timer data structure. */
     union {
@@ -82,12 +81,8 @@ void kill_timer(struct timer *timer);
 /* Bootstrap initialisation. Must be called before any other timer function. */
 void timer_init(void);
 
-/*
- * Next timer deadline for each CPU.
- * Modified only by the local CPU and never in interrupt context.
- */
-DECLARE_PER_CPU(s_time_t, timer_deadline_start);
-DECLARE_PER_CPU(s_time_t, timer_deadline_end);
+/* Next timer deadline for each CPU. */
+DECLARE_PER_CPU(s_time_t, timer_deadline);
 
 /* Arch-defined function to reprogram timer hardware for new deadline. */
 int reprogram_timer(s_time_t timeout);