ia64/xen-unstable
changeset 18752:f12d9595d07c
Change timer implementation to allow variable 'slop' in how late
timers are fired. The default continues to be 50us, but this can be
configured on Xen's command line.
Signed-off-by: Yu Ke <ke.yu@intel.com>
Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
timers are fired. The default continues to be 50us, but this can be
configured on Xen's command line.
Signed-off-by: Yu Ke <ke.yu@intel.com>
Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Oct 31 14:02:39 2008 +0000 (2008-10-31) |
parents | 85ba96069dfb |
children | 91a2b9309a72 |
files | xen/arch/x86/hpet.c xen/common/timer.c xen/include/xen/time.h xen/include/xen/timer.h |
line diff
1.1 --- a/xen/arch/x86/hpet.c Thu Oct 30 15:04:27 2008 +0000 1.2 +++ b/xen/arch/x86/hpet.c Fri Oct 31 14:02:39 2008 +0000 1.3 @@ -14,8 +14,6 @@ 1.4 #include <asm/div64.h> 1.5 #include <asm/hpet.h> 1.6 1.7 -#define STIME_MAX ((s_time_t)((uint64_t)~0ull>>1)) 1.8 - 1.9 #define MAX_DELTA_NS MILLISECS(10*1000) 1.10 #define MIN_DELTA_NS MICROSECS(20) 1.11
2.1 --- a/xen/common/timer.c Thu Oct 30 15:04:27 2008 +0000 2.2 +++ b/xen/common/timer.c Fri Oct 31 14:02:39 2008 +0000 2.3 @@ -25,10 +25,12 @@ 2.4 * We pull handlers off the timer list this far in future, 2.5 * rather than reprogramming the time hardware. 2.6 */ 2.7 -#define TIMER_SLOP (50*1000) /* ns */ 2.8 +static unsigned int timer_slop __read_mostly = 50000; /* 50 us */ 2.9 +integer_param("timer_slop", timer_slop); 2.10 2.11 struct timers { 2.12 spinlock_t lock; 2.13 + bool_t overflow; 2.14 struct timer **heap; 2.15 struct timer *list; 2.16 struct timer *running; 2.17 @@ -200,6 +202,7 @@ static int add_entry(struct timers *time 2.18 return rc; 2.19 2.20 /* Fall back to adding to the slower linked list. */ 2.21 + timers->overflow = 1; 2.22 t->status = TIMER_STATUS_in_list; 2.23 return add_to_list(&timers->list, t); 2.24 } 2.25 @@ -258,6 +261,7 @@ void set_timer(struct timer *timer, s_ti 2.26 __stop_timer(timer); 2.27 2.28 timer->expires = expires; 2.29 + timer->expires_end = expires + timer_slop; 2.30 2.31 if ( likely(timer->status != TIMER_STATUS_killed) ) 2.32 __add_timer(timer); 2.33 @@ -344,19 +348,30 @@ void kill_timer(struct timer *timer) 2.34 } 2.35 2.36 2.37 +static void execute_timer(struct timers *ts, struct timer *t) 2.38 +{ 2.39 + void (*fn)(void *) = t->function; 2.40 + void *data = t->data; 2.41 + 2.42 + ts->running = t; 2.43 + spin_unlock_irq(&ts->lock); 2.44 + (*fn)(data); 2.45 + spin_lock_irq(&ts->lock); 2.46 + ts->running = NULL; 2.47 +} 2.48 + 2.49 + 2.50 static void timer_softirq_action(void) 2.51 { 2.52 struct timer *t, **heap, *next; 2.53 struct timers *ts; 2.54 - s_time_t now, deadline; 2.55 - void (*fn)(void *); 2.56 - void *data; 2.57 + s_time_t now; 2.58 2.59 ts = &this_cpu(timers); 2.60 heap = ts->heap; 2.61 2.62 - /* If we are using overflow linked list, try to allocate a larger heap. */ 2.63 - if ( unlikely(ts->list != NULL) ) 2.64 + /* If we overflowed the heap, try to allocate a larger heap. */ 2.65 + if ( unlikely(ts->overflow) ) 2.66 { 2.67 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */ 2.68 int old_limit = GET_HEAP_LIMIT(heap); 2.69 @@ -377,7 +392,26 @@ static void timer_softirq_action(void) 2.70 2.71 spin_lock_irq(&ts->lock); 2.72 2.73 - /* Try to move timers from overflow linked list to more efficient heap. */ 2.74 + now = NOW(); 2.75 + 2.76 + /* Execute ready heap timers. */ 2.77 + while ( (GET_HEAP_SIZE(heap) != 0) && 2.78 + ((t = heap[1])->expires_end < now) ) 2.79 + { 2.80 + remove_from_heap(heap, t); 2.81 + t->status = TIMER_STATUS_inactive; 2.82 + execute_timer(ts, t); 2.83 + } 2.84 + 2.85 + /* Execute ready list timers. */ 2.86 + while ( ((t = ts->list) != NULL) && (t->expires_end < now) ) 2.87 + { 2.88 + ts->list = t->list_next; 2.89 + t->status = TIMER_STATUS_inactive; 2.90 + execute_timer(ts, t); 2.91 + } 2.92 + 2.93 + /* Try to move timers from linked list to more efficient heap. */ 2.94 next = ts->list; 2.95 ts->list = NULL; 2.96 while ( unlikely((t = next) != NULL) ) 2.97 @@ -387,51 +421,44 @@ static void timer_softirq_action(void) 2.98 add_entry(ts, t); 2.99 } 2.100 2.101 - now = NOW(); 2.102 - 2.103 - while ( (GET_HEAP_SIZE(heap) != 0) && 2.104 - ((t = heap[1])->expires < (now + TIMER_SLOP)) ) 2.105 + ts->overflow = (ts->list != NULL); 2.106 + if ( unlikely(ts->overflow) ) 2.107 { 2.108 - remove_entry(ts, t); 2.109 + /* Find earliest deadline at head of list or top of heap. */ 2.110 + this_cpu(timer_deadline) = ts->list->expires; 2.111 + if ( (GET_HEAP_SIZE(heap) != 0) && 2.112 + ((t = heap[1])->expires < this_cpu(timer_deadline)) ) 2.113 + this_cpu(timer_deadline) = t->expires; 2.114 + } 2.115 + else 2.116 + { 2.117 + /* 2.118 + * Find the earliest deadline that encompasses largest number of timers 2.119 + * on the heap. To do this we take timers from the heap while their 2.120 + * valid deadline ranges continue to intersect. 2.121 + */ 2.122 + s_time_t start = 0, end = STIME_MAX; 2.123 + struct timer **list_tail = &ts->list; 2.124 2.125 - ts->running = t; 2.126 + while ( (GET_HEAP_SIZE(heap) != 0) && 2.127 + ((t = heap[1])->expires <= end) ) 2.128 + { 2.129 + remove_entry(ts, t); 2.130 2.131 - fn = t->function; 2.132 - data = t->data; 2.133 + t->status = TIMER_STATUS_in_list; 2.134 + t->list_next = NULL; 2.135 + *list_tail = t; 2.136 + list_tail = &t->list_next; 2.137 2.138 - spin_unlock_irq(&ts->lock); 2.139 - (*fn)(data); 2.140 - spin_lock_irq(&ts->lock); 2.141 + start = t->expires; 2.142 + if ( end > t->expires_end ) 2.143 + end = t->expires_end; 2.144 + } 2.145 + 2.146 + this_cpu(timer_deadline) = start; 2.147 } 2.148 2.149 - deadline = GET_HEAP_SIZE(heap) ? heap[1]->expires : 0; 2.150 - 2.151 - while ( unlikely((t = ts->list) != NULL) ) 2.152 - { 2.153 - if ( t->expires >= (now + TIMER_SLOP) ) 2.154 - { 2.155 - if ( (deadline == 0) || (deadline > t->expires) ) 2.156 - deadline = t->expires; 2.157 - break; 2.158 - } 2.159 - 2.160 - ts->list = t->list_next; 2.161 - t->status = TIMER_STATUS_inactive; 2.162 - 2.163 - ts->running = t; 2.164 - 2.165 - fn = t->function; 2.166 - data = t->data; 2.167 - 2.168 - spin_unlock_irq(&ts->lock); 2.169 - (*fn)(data); 2.170 - spin_lock_irq(&ts->lock); 2.171 - } 2.172 - 2.173 - ts->running = NULL; 2.174 - 2.175 - this_cpu(timer_deadline) = deadline; 2.176 - if ( !reprogram_timer(deadline) ) 2.177 + if ( !reprogram_timer(this_cpu(timer_deadline)) ) 2.178 raise_softirq(TIMER_SOFTIRQ); 2.179 2.180 spin_unlock_irq(&ts->lock);
3.1 --- a/xen/include/xen/time.h Thu Oct 30 15:04:27 2008 +0000 3.2 +++ b/xen/include/xen/time.h Fri Oct 31 14:02:39 2008 +0000 3.3 @@ -52,6 +52,7 @@ struct tm gmtime(unsigned long t); 3.4 #define SECONDS(_s) ((s_time_t)((_s) * 1000000000ULL)) 3.5 #define MILLISECS(_ms) ((s_time_t)((_ms) * 1000000ULL)) 3.6 #define MICROSECS(_us) ((s_time_t)((_us) * 1000ULL)) 3.7 +#define STIME_MAX ((s_time_t)((uint64_t)~0ull>>1)) 3.8 3.9 extern void update_vcpu_system_time(struct vcpu *v); 3.10 extern void update_domain_wallclock_time(struct domain *d);
4.1 --- a/xen/include/xen/timer.h Thu Oct 30 15:04:27 2008 +0000 4.2 +++ b/xen/include/xen/timer.h Fri Oct 31 14:02:39 2008 +0000 4.3 @@ -15,12 +15,13 @@ 4.4 struct timer { 4.5 /* System time expiry value (nanoseconds since boot). */ 4.6 s_time_t expires; 4.7 + s_time_t expires_end; 4.8 4.9 /* Position in active-timer data structure. */ 4.10 union { 4.11 /* Timer-heap offset. */ 4.12 unsigned int heap_offset; 4.13 - /* Overflow linked list. */ 4.14 + /* Linked list. */ 4.15 struct timer *list_next; 4.16 }; 4.17