ia64/xen-unstable

view xen/common/timer.c @ 18392:070688cdf62c

Fall back to a timer linked list when the timer heap overflows.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 27 13:24:35 2008 +0100 (2008-08-27)
parents d795e15b85a7
children ecdbcd27490f
line source
1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <xen/percpu.h>
21 #include <asm/system.h>
22 #include <asm/desc.h>
24 /*
25 * We pull handlers off the timer list this far in future,
26 * rather than reprogramming the time hardware.
27 */
28 #define TIMER_SLOP (50*1000) /* ns */
30 struct timers {
31 spinlock_t lock;
32 struct timer **heap;
33 struct timer *list;
34 struct timer *running;
35 } __cacheline_aligned;
37 static DEFINE_PER_CPU(struct timers, timers);
39 DEFINE_PER_CPU(s_time_t, timer_deadline);
41 /****************************************************************************
42 * HEAP OPERATIONS.
43 */
45 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
46 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
48 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
49 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
51 /* Sink down element @pos of @heap. */
52 static void down_heap(struct timer **heap, int pos)
53 {
54 int sz = GET_HEAP_SIZE(heap), nxt;
55 struct timer *t = heap[pos];
57 while ( (nxt = (pos << 1)) <= sz )
58 {
59 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
60 nxt++;
61 if ( heap[nxt]->expires > t->expires )
62 break;
63 heap[pos] = heap[nxt];
64 heap[pos]->heap_offset = pos;
65 pos = nxt;
66 }
68 heap[pos] = t;
69 t->heap_offset = pos;
70 }
72 /* Float element @pos up @heap. */
73 static void up_heap(struct timer **heap, int pos)
74 {
75 struct timer *t = heap[pos];
77 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
78 {
79 heap[pos] = heap[pos>>1];
80 heap[pos]->heap_offset = pos;
81 pos >>= 1;
82 }
84 heap[pos] = t;
85 t->heap_offset = pos;
86 }
89 /* Delete @t from @heap. Return TRUE if new top of heap. */
90 static int remove_from_heap(struct timer **heap, struct timer *t)
91 {
92 int sz = GET_HEAP_SIZE(heap);
93 int pos = t->heap_offset;
95 if ( unlikely(pos == sz) )
96 {
97 SET_HEAP_SIZE(heap, sz-1);
98 goto out;
99 }
101 heap[pos] = heap[sz];
102 heap[pos]->heap_offset = pos;
104 SET_HEAP_SIZE(heap, --sz);
106 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
107 up_heap(heap, pos);
108 else
109 down_heap(heap, pos);
111 out:
112 return (pos == 1);
113 }
116 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
117 static int add_to_heap(struct timer ***pheap, struct timer *t)
118 {
119 struct timer **heap = *pheap;
120 int sz = GET_HEAP_SIZE(heap);
122 /* Copy the heap if it is full. */
123 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
124 {
125 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
126 int old_limit = GET_HEAP_LIMIT(heap);
127 int new_limit = ((old_limit + 1) << 4) - 1;
128 if ( in_irq() )
129 goto out;
130 heap = xmalloc_array(struct timer *, new_limit + 1);
131 if ( heap == NULL )
132 goto out;
133 memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
134 SET_HEAP_LIMIT(heap, new_limit);
135 if ( old_limit != 0 )
136 xfree(*pheap);
137 *pheap = heap;
138 }
140 SET_HEAP_SIZE(heap, ++sz);
141 heap[sz] = t;
142 t->heap_offset = sz;
143 up_heap(heap, sz);
144 out:
145 return (t->heap_offset == 1);
146 }
149 /****************************************************************************
150 * LINKED LIST OPERATIONS.
151 */
153 static int remove_from_list(struct timer **pprev, struct timer *t)
154 {
155 struct timer *curr, **_pprev = pprev;
157 while ( (curr = *_pprev) != t )
158 _pprev = &curr->list_next;
160 *_pprev = t->list_next;
162 return (_pprev == pprev);
163 }
165 static int add_to_list(struct timer **pprev, struct timer *t)
166 {
167 struct timer *curr, **_pprev = pprev;
169 while ( ((curr = *_pprev) != NULL) && (curr->expires <= t->expires) )
170 _pprev = &curr->list_next;
172 t->list_next = curr;
173 *_pprev = t;
175 return (_pprev == pprev);
176 }
179 /****************************************************************************
180 * TIMER OPERATIONS.
181 */
183 static int remove_entry(struct timers *timers, struct timer *t)
184 {
185 int rc;
187 switch ( t->status )
188 {
189 case TIMER_STATUS_in_heap:
190 rc = remove_from_heap(timers->heap, t);
191 break;
192 case TIMER_STATUS_in_list:
193 rc = remove_from_list(&timers->list, t);
194 break;
195 default:
196 rc = 0;
197 BUG();
198 }
200 t->status = TIMER_STATUS_inactive;
201 return rc;
202 }
204 static int add_entry(struct timers *timers, struct timer *t)
205 {
206 int rc;
208 ASSERT(t->status == TIMER_STATUS_inactive);
210 /* Try to add to heap. t->heap_offset indicates whether we succeed. */
211 t->heap_offset = 0;
212 t->status = TIMER_STATUS_in_heap;
213 rc = add_to_heap(&timers->heap, t);
214 if ( t->heap_offset != 0 )
215 return rc;
217 /* Fall back to adding to the slower linked list. */
218 t->status = TIMER_STATUS_in_list;
219 return add_to_list(&timers->list, t);
220 }
222 static inline void __add_timer(struct timer *timer)
223 {
224 int cpu = timer->cpu;
225 if ( add_entry(&per_cpu(timers, cpu), timer) )
226 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
227 }
229 static inline void __stop_timer(struct timer *timer)
230 {
231 int cpu = timer->cpu;
232 if ( remove_entry(&per_cpu(timers, cpu), timer) )
233 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
234 }
236 static inline void timer_lock(struct timer *timer)
237 {
238 unsigned int cpu;
240 for ( ; ; )
241 {
242 cpu = timer->cpu;
243 spin_lock(&per_cpu(timers, cpu).lock);
244 if ( likely(timer->cpu == cpu) )
245 break;
246 spin_unlock(&per_cpu(timers, cpu).lock);
247 }
248 }
250 #define timer_lock_irq(t) \
251 do { local_irq_disable(); timer_lock(t); } while ( 0 )
252 #define timer_lock_irqsave(t, flags) \
253 do { local_irq_save(flags); timer_lock(t); } while ( 0 )
255 static inline void timer_unlock(struct timer *timer)
256 {
257 spin_unlock(&per_cpu(timers, timer->cpu).lock);
258 }
260 #define timer_unlock_irq(t) \
261 do { timer_unlock(t); local_irq_enable(); } while ( 0 )
262 #define timer_unlock_irqrestore(t, flags) \
263 do { timer_unlock(t); local_irq_restore(flags); } while ( 0 )
266 void set_timer(struct timer *timer, s_time_t expires)
267 {
268 unsigned long flags;
270 timer_lock_irqsave(timer, flags);
272 if ( active_timer(timer) )
273 __stop_timer(timer);
275 timer->expires = expires;
277 if ( likely(timer->status != TIMER_STATUS_killed) )
278 __add_timer(timer);
280 timer_unlock_irqrestore(timer, flags);
281 }
284 void stop_timer(struct timer *timer)
285 {
286 unsigned long flags;
288 timer_lock_irqsave(timer, flags);
290 if ( active_timer(timer) )
291 __stop_timer(timer);
293 timer_unlock_irqrestore(timer, flags);
294 }
297 void migrate_timer(struct timer *timer, unsigned int new_cpu)
298 {
299 int old_cpu;
300 unsigned long flags;
302 for ( ; ; )
303 {
304 if ( (old_cpu = timer->cpu) == new_cpu )
305 return;
307 if ( old_cpu < new_cpu )
308 {
309 spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags);
310 spin_lock(&per_cpu(timers, new_cpu).lock);
311 }
312 else
313 {
314 spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags);
315 spin_lock(&per_cpu(timers, old_cpu).lock);
316 }
318 if ( likely(timer->cpu == old_cpu) )
319 break;
321 spin_unlock(&per_cpu(timers, old_cpu).lock);
322 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
323 }
325 if ( active_timer(timer) )
326 {
327 __stop_timer(timer);
328 timer->cpu = new_cpu;
329 __add_timer(timer);
330 }
331 else
332 {
333 timer->cpu = new_cpu;
334 }
336 spin_unlock(&per_cpu(timers, old_cpu).lock);
337 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
338 }
341 void kill_timer(struct timer *timer)
342 {
343 int cpu;
344 unsigned long flags;
346 BUG_ON(this_cpu(timers).running == timer);
348 timer_lock_irqsave(timer, flags);
350 if ( active_timer(timer) )
351 __stop_timer(timer);
352 timer->status = TIMER_STATUS_killed;
354 timer_unlock_irqrestore(timer, flags);
356 for_each_online_cpu ( cpu )
357 while ( per_cpu(timers, cpu).running == timer )
358 cpu_relax();
359 }
362 static void timer_softirq_action(void)
363 {
364 struct timer *t, **heap, *next;
365 struct timers *ts;
366 s_time_t now, deadline;
367 void (*fn)(void *);
368 void *data;
370 ts = &this_cpu(timers);
372 spin_lock_irq(&ts->lock);
374 /* Try to move timers from overflow linked list to more efficient heap. */
375 next = ts->list;
376 ts->list = NULL;
377 while ( unlikely((t = next) != NULL) )
378 {
379 next = t->list_next;
380 t->status = TIMER_STATUS_inactive;
381 add_entry(ts, t);
382 }
384 heap = ts->heap;
385 now = NOW();
387 while ( (GET_HEAP_SIZE(heap) != 0) &&
388 ((t = heap[1])->expires < (now + TIMER_SLOP)) )
389 {
390 remove_entry(ts, t);
392 ts->running = t;
394 fn = t->function;
395 data = t->data;
397 spin_unlock_irq(&ts->lock);
398 (*fn)(data);
399 spin_lock_irq(&ts->lock);
401 /* Heap may have grown while the lock was released. */
402 heap = ts->heap;
403 }
405 deadline = GET_HEAP_SIZE(heap) ? heap[1]->expires : 0;
407 while ( unlikely((t = ts->list) != NULL) )
408 {
409 if ( t->expires >= (now + TIMER_SLOP) )
410 {
411 if ( (deadline == 0) || (deadline > t->expires) )
412 deadline = t->expires;
413 break;
414 }
416 ts->list = t->list_next;
417 t->status = TIMER_STATUS_inactive;
419 ts->running = t;
421 fn = t->function;
422 data = t->data;
424 spin_unlock_irq(&ts->lock);
425 (*fn)(data);
426 spin_lock_irq(&ts->lock);
427 }
429 ts->running = NULL;
431 this_cpu(timer_deadline) = deadline;
432 if ( !reprogram_timer(deadline) )
433 raise_softirq(TIMER_SOFTIRQ);
435 spin_unlock_irq(&ts->lock);
436 }
439 void process_pending_timers(void)
440 {
441 unsigned int cpu = smp_processor_id();
442 ASSERT(!in_irq() && local_irq_is_enabled());
443 if ( test_and_clear_bit(TIMER_SOFTIRQ, &softirq_pending(cpu)) )
444 timer_softirq_action();
445 }
448 static void dump_timerq(unsigned char key)
449 {
450 struct timer *t;
451 struct timers *ts;
452 unsigned long flags;
453 s_time_t now = NOW();
454 int i, j;
456 printk("Dumping timer queues: NOW=0x%08X%08X\n",
457 (u32)(now>>32), (u32)now);
459 for_each_online_cpu( i )
460 {
461 ts = &per_cpu(timers, i);
463 printk("CPU[%02d] ", i);
464 spin_lock_irqsave(&ts->lock, flags);
465 for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
466 {
467 t = ts->heap[j];
468 printk (" %d : %p ex=0x%08X%08X %p\n",
469 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
470 }
471 for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ )
472 printk (" L%d : %p ex=0x%08X%08X %p\n",
473 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
474 spin_unlock_irqrestore(&ts->lock, flags);
475 printk("\n");
476 }
477 }
480 void __init timer_init(void)
481 {
482 static struct timer *dummy_heap;
483 int i;
485 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
487 /*
488 * All CPUs initially share an empty dummy heap. Only those CPUs that
489 * are brought online will be dynamically allocated their own heap.
490 */
491 SET_HEAP_SIZE(&dummy_heap, 0);
492 SET_HEAP_LIMIT(&dummy_heap, 0);
494 for_each_cpu ( i )
495 {
496 spin_lock_init(&per_cpu(timers, i).lock);
497 per_cpu(timers, i).heap = &dummy_heap;
498 }
500 register_keyhandler('a', dump_timerq, "dump timer queues");
501 }
503 /*
504 * Local variables:
505 * mode: C
506 * c-set-style: "BSD"
507 * c-basic-offset: 4
508 * tab-width: 4
509 * indent-tabs-mode: nil
510 * End:
511 */