ia64/xen-unstable

view xen/common/timer.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents d9480422034b
children
line source
1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <xen/percpu.h>
21 #include <asm/system.h>
22 #include <asm/desc.h>
24 /*
25 * We pull handlers off the timer list this far in future,
26 * rather than reprogramming the time hardware.
27 */
28 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
29 integer_param("timer_slop", timer_slop);
31 struct timers {
32 spinlock_t lock;
33 bool_t overflow;
34 struct timer **heap;
35 struct timer *list;
36 struct timer *running;
37 } __cacheline_aligned;
39 static DEFINE_PER_CPU(struct timers, timers);
41 DEFINE_PER_CPU(s_time_t, timer_deadline);
43 /****************************************************************************
44 * HEAP OPERATIONS.
45 */
47 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
48 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
50 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
51 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
53 /* Sink down element @pos of @heap. */
54 static void down_heap(struct timer **heap, int pos)
55 {
56 int sz = GET_HEAP_SIZE(heap), nxt;
57 struct timer *t = heap[pos];
59 while ( (nxt = (pos << 1)) <= sz )
60 {
61 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
62 nxt++;
63 if ( heap[nxt]->expires > t->expires )
64 break;
65 heap[pos] = heap[nxt];
66 heap[pos]->heap_offset = pos;
67 pos = nxt;
68 }
70 heap[pos] = t;
71 t->heap_offset = pos;
72 }
74 /* Float element @pos up @heap. */
75 static void up_heap(struct timer **heap, int pos)
76 {
77 struct timer *t = heap[pos];
79 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
80 {
81 heap[pos] = heap[pos>>1];
82 heap[pos]->heap_offset = pos;
83 pos >>= 1;
84 }
86 heap[pos] = t;
87 t->heap_offset = pos;
88 }
91 /* Delete @t from @heap. Return TRUE if new top of heap. */
92 static int remove_from_heap(struct timer **heap, struct timer *t)
93 {
94 int sz = GET_HEAP_SIZE(heap);
95 int pos = t->heap_offset;
97 if ( unlikely(pos == sz) )
98 {
99 SET_HEAP_SIZE(heap, sz-1);
100 goto out;
101 }
103 heap[pos] = heap[sz];
104 heap[pos]->heap_offset = pos;
106 SET_HEAP_SIZE(heap, --sz);
108 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
109 up_heap(heap, pos);
110 else
111 down_heap(heap, pos);
113 out:
114 return (pos == 1);
115 }
118 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
119 static int add_to_heap(struct timer **heap, struct timer *t)
120 {
121 int sz = GET_HEAP_SIZE(heap);
123 /* Fail if the heap is full. */
124 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
125 return 0;
127 SET_HEAP_SIZE(heap, ++sz);
128 heap[sz] = t;
129 t->heap_offset = sz;
130 up_heap(heap, sz);
132 return (t->heap_offset == 1);
133 }
136 /****************************************************************************
137 * LINKED LIST OPERATIONS.
138 */
140 static int remove_from_list(struct timer **pprev, struct timer *t)
141 {
142 struct timer *curr, **_pprev = pprev;
144 while ( (curr = *_pprev) != t )
145 _pprev = &curr->list_next;
147 *_pprev = t->list_next;
149 return (_pprev == pprev);
150 }
152 static int add_to_list(struct timer **pprev, struct timer *t)
153 {
154 struct timer *curr, **_pprev = pprev;
156 while ( ((curr = *_pprev) != NULL) && (curr->expires <= t->expires) )
157 _pprev = &curr->list_next;
159 t->list_next = curr;
160 *_pprev = t;
162 return (_pprev == pprev);
163 }
166 /****************************************************************************
167 * TIMER OPERATIONS.
168 */
170 static int remove_entry(struct timers *timers, struct timer *t)
171 {
172 int rc;
174 switch ( t->status )
175 {
176 case TIMER_STATUS_in_heap:
177 rc = remove_from_heap(timers->heap, t);
178 break;
179 case TIMER_STATUS_in_list:
180 rc = remove_from_list(&timers->list, t);
181 break;
182 default:
183 rc = 0;
184 BUG();
185 }
187 t->status = TIMER_STATUS_inactive;
188 return rc;
189 }
191 static int add_entry(struct timers *timers, struct timer *t)
192 {
193 int rc;
195 ASSERT(t->status == TIMER_STATUS_inactive);
197 /* Try to add to heap. t->heap_offset indicates whether we succeed. */
198 t->heap_offset = 0;
199 t->status = TIMER_STATUS_in_heap;
200 rc = add_to_heap(timers->heap, t);
201 if ( t->heap_offset != 0 )
202 return rc;
204 /* Fall back to adding to the slower linked list. */
205 timers->overflow = 1;
206 t->status = TIMER_STATUS_in_list;
207 return add_to_list(&timers->list, t);
208 }
210 static inline void __add_timer(struct timer *timer)
211 {
212 int cpu = timer->cpu;
213 if ( add_entry(&per_cpu(timers, cpu), timer) )
214 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
215 }
217 static inline void __stop_timer(struct timer *timer)
218 {
219 int cpu = timer->cpu;
220 if ( remove_entry(&per_cpu(timers, cpu), timer) )
221 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
222 }
224 static inline void timer_lock(struct timer *timer)
225 {
226 unsigned int cpu;
228 for ( ; ; )
229 {
230 cpu = timer->cpu;
231 spin_lock(&per_cpu(timers, cpu).lock);
232 if ( likely(timer->cpu == cpu) )
233 break;
234 spin_unlock(&per_cpu(timers, cpu).lock);
235 }
236 }
238 #define timer_lock_irq(t) \
239 do { local_irq_disable(); timer_lock(t); } while ( 0 )
240 #define timer_lock_irqsave(t, flags) \
241 do { local_irq_save(flags); timer_lock(t); } while ( 0 )
243 static inline void timer_unlock(struct timer *timer)
244 {
245 spin_unlock(&per_cpu(timers, timer->cpu).lock);
246 }
248 #define timer_unlock_irq(t) \
249 do { timer_unlock(t); local_irq_enable(); } while ( 0 )
250 #define timer_unlock_irqrestore(t, flags) \
251 do { timer_unlock(t); local_irq_restore(flags); } while ( 0 )
254 void set_timer(struct timer *timer, s_time_t expires)
255 {
256 unsigned long flags;
258 timer_lock_irqsave(timer, flags);
260 if ( active_timer(timer) )
261 __stop_timer(timer);
263 timer->expires = expires;
264 timer->expires_end = expires + timer_slop;
266 if ( likely(timer->status != TIMER_STATUS_killed) )
267 __add_timer(timer);
269 timer_unlock_irqrestore(timer, flags);
270 }
273 void stop_timer(struct timer *timer)
274 {
275 unsigned long flags;
277 timer_lock_irqsave(timer, flags);
279 if ( active_timer(timer) )
280 __stop_timer(timer);
282 timer_unlock_irqrestore(timer, flags);
283 }
286 void migrate_timer(struct timer *timer, unsigned int new_cpu)
287 {
288 int old_cpu;
289 unsigned long flags;
291 for ( ; ; )
292 {
293 if ( (old_cpu = timer->cpu) == new_cpu )
294 return;
296 if ( old_cpu < new_cpu )
297 {
298 spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags);
299 spin_lock(&per_cpu(timers, new_cpu).lock);
300 }
301 else
302 {
303 spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags);
304 spin_lock(&per_cpu(timers, old_cpu).lock);
305 }
307 if ( likely(timer->cpu == old_cpu) )
308 break;
310 spin_unlock(&per_cpu(timers, old_cpu).lock);
311 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
312 }
314 if ( active_timer(timer) )
315 {
316 __stop_timer(timer);
317 timer->cpu = new_cpu;
318 __add_timer(timer);
319 }
320 else
321 {
322 timer->cpu = new_cpu;
323 }
325 spin_unlock(&per_cpu(timers, old_cpu).lock);
326 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
327 }
330 void kill_timer(struct timer *timer)
331 {
332 int cpu;
333 unsigned long flags;
335 BUG_ON(this_cpu(timers).running == timer);
337 timer_lock_irqsave(timer, flags);
339 if ( active_timer(timer) )
340 __stop_timer(timer);
341 timer->status = TIMER_STATUS_killed;
343 timer_unlock_irqrestore(timer, flags);
345 for_each_online_cpu ( cpu )
346 while ( per_cpu(timers, cpu).running == timer )
347 cpu_relax();
348 }
351 static void execute_timer(struct timers *ts, struct timer *t)
352 {
353 void (*fn)(void *) = t->function;
354 void *data = t->data;
356 ts->running = t;
357 spin_unlock_irq(&ts->lock);
358 (*fn)(data);
359 spin_lock_irq(&ts->lock);
360 ts->running = NULL;
361 }
364 static void timer_softirq_action(void)
365 {
366 struct timer *t, **heap, *next;
367 struct timers *ts;
368 s_time_t now;
370 ts = &this_cpu(timers);
371 heap = ts->heap;
373 /* If we overflowed the heap, try to allocate a larger heap. */
374 if ( unlikely(ts->overflow) )
375 {
376 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
377 int old_limit = GET_HEAP_LIMIT(heap);
378 int new_limit = ((old_limit + 1) << 4) - 1;
379 struct timer **newheap = xmalloc_array(struct timer *, new_limit + 1);
380 if ( newheap != NULL )
381 {
382 spin_lock_irq(&ts->lock);
383 memcpy(newheap, heap, (old_limit + 1) * sizeof(*heap));
384 SET_HEAP_LIMIT(newheap, new_limit);
385 ts->heap = newheap;
386 spin_unlock_irq(&ts->lock);
387 if ( old_limit != 0 )
388 xfree(heap);
389 heap = newheap;
390 }
391 }
393 spin_lock_irq(&ts->lock);
395 now = NOW();
397 /* Execute ready heap timers. */
398 while ( (GET_HEAP_SIZE(heap) != 0) &&
399 ((t = heap[1])->expires < now) )
400 {
401 remove_from_heap(heap, t);
402 t->status = TIMER_STATUS_inactive;
403 execute_timer(ts, t);
404 }
406 /* Execute ready list timers. */
407 while ( ((t = ts->list) != NULL) && (t->expires < now) )
408 {
409 ts->list = t->list_next;
410 t->status = TIMER_STATUS_inactive;
411 execute_timer(ts, t);
412 }
414 /* Try to move timers from linked list to more efficient heap. */
415 next = ts->list;
416 ts->list = NULL;
417 while ( unlikely((t = next) != NULL) )
418 {
419 next = t->list_next;
420 t->status = TIMER_STATUS_inactive;
421 add_entry(ts, t);
422 }
424 ts->overflow = (ts->list != NULL);
425 if ( unlikely(ts->overflow) )
426 {
427 /* Find earliest deadline at head of list or top of heap. */
428 this_cpu(timer_deadline) = ts->list->expires;
429 if ( (GET_HEAP_SIZE(heap) != 0) &&
430 ((t = heap[1])->expires < this_cpu(timer_deadline)) )
431 this_cpu(timer_deadline) = t->expires;
432 }
433 else
434 {
435 /*
436 * Find the earliest deadline that encompasses largest number of timers
437 * on the heap. To do this we take timers from the heap while their
438 * valid deadline ranges continue to intersect.
439 */
440 s_time_t start = 0, end = STIME_MAX;
441 struct timer **list_tail = &ts->list;
443 while ( (GET_HEAP_SIZE(heap) != 0) &&
444 ((t = heap[1])->expires <= end) )
445 {
446 remove_entry(ts, t);
448 t->status = TIMER_STATUS_in_list;
449 t->list_next = NULL;
450 *list_tail = t;
451 list_tail = &t->list_next;
453 start = t->expires;
454 if ( end > t->expires_end )
455 end = t->expires_end;
456 }
458 this_cpu(timer_deadline) = start;
459 }
461 if ( !reprogram_timer(this_cpu(timer_deadline)) )
462 raise_softirq(TIMER_SOFTIRQ);
464 spin_unlock_irq(&ts->lock);
465 }
468 void process_pending_timers(void)
469 {
470 unsigned int cpu = smp_processor_id();
471 ASSERT(!in_irq() && local_irq_is_enabled());
472 if ( test_and_clear_bit(TIMER_SOFTIRQ, &softirq_pending(cpu)) )
473 timer_softirq_action();
474 }
476 s_time_t align_timer(s_time_t firsttick, uint64_t period)
477 {
478 if ( !period )
479 return firsttick;
481 return firsttick + (period - 1) - ((firsttick - 1) % period);
482 }
484 static void dump_timerq(unsigned char key)
485 {
486 struct timer *t;
487 struct timers *ts;
488 unsigned long flags;
489 s_time_t now = NOW();
490 int i, j;
492 printk("Dumping timer queues: NOW=0x%08X%08X\n",
493 (u32)(now>>32), (u32)now);
495 for_each_online_cpu( i )
496 {
497 ts = &per_cpu(timers, i);
499 printk("CPU[%02d] ", i);
500 spin_lock_irqsave(&ts->lock, flags);
501 for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
502 {
503 t = ts->heap[j];
504 printk (" %d : %p ex=0x%08X%08X %p %p\n",
505 j, t, (u32)(t->expires>>32), (u32)t->expires,
506 t->data, t->function);
507 }
508 for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ )
509 printk (" L%d : %p ex=0x%08X%08X %p %p\n",
510 j, t, (u32)(t->expires>>32), (u32)t->expires,
511 t->data, t->function);
512 spin_unlock_irqrestore(&ts->lock, flags);
513 printk("\n");
514 }
515 }
518 void __init timer_init(void)
519 {
520 static struct timer *dummy_heap;
521 int i;
523 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
525 /*
526 * All CPUs initially share an empty dummy heap. Only those CPUs that
527 * are brought online will be dynamically allocated their own heap.
528 */
529 SET_HEAP_SIZE(&dummy_heap, 0);
530 SET_HEAP_LIMIT(&dummy_heap, 0);
532 for_each_cpu ( i )
533 {
534 spin_lock_init(&per_cpu(timers, i).lock);
535 per_cpu(timers, i).heap = &dummy_heap;
536 }
538 register_keyhandler('a', dump_timerq, "dump timer queues");
539 }
541 /*
542 * Local variables:
543 * mode: C
544 * c-set-style: "BSD"
545 * c-basic-offset: 4
546 * tab-width: 4
547 * indent-tabs-mode: nil
548 * End:
549 */