ia64/xen-unstable

view xen/common/timer.c @ 9776:72f9c751d3ea

Replace &foo[0] with foo where the latter seems cleaner
(which is usually, and particularly when its an argument
to one of the bitops functions).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 19 18:32:20 2006 +0100 (2006-04-19)
parents 974ed9f73641
children b2323eefb79e
line source
1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <asm/system.h>
21 #include <asm/desc.h>
23 /*
24 * We pull handlers off the timer list this far in future,
25 * rather than reprogramming the time hardware.
26 */
27 #define TIMER_SLOP (50*1000) /* ns */
29 struct timers {
30 spinlock_t lock;
31 struct timer **heap;
32 struct timer *running;
33 } __cacheline_aligned;
35 struct timers timers[NR_CPUS];
37 extern int reprogram_timer(s_time_t timeout);
39 /****************************************************************************
40 * HEAP OPERATIONS.
41 */
43 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
44 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
46 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
47 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
49 /* Sink down element @pos of @heap. */
50 static void down_heap(struct timer **heap, int pos)
51 {
52 int sz = GET_HEAP_SIZE(heap), nxt;
53 struct timer *t = heap[pos];
55 while ( (nxt = (pos << 1)) <= sz )
56 {
57 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
58 nxt++;
59 if ( heap[nxt]->expires > t->expires )
60 break;
61 heap[pos] = heap[nxt];
62 heap[pos]->heap_offset = pos;
63 pos = nxt;
64 }
66 heap[pos] = t;
67 t->heap_offset = pos;
68 }
70 /* Float element @pos up @heap. */
71 static void up_heap(struct timer **heap, int pos)
72 {
73 struct timer *t = heap[pos];
75 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
76 {
77 heap[pos] = heap[pos>>1];
78 heap[pos]->heap_offset = pos;
79 pos >>= 1;
80 }
82 heap[pos] = t;
83 t->heap_offset = pos;
84 }
87 /* Delete @t from @heap. Return TRUE if new top of heap. */
88 static int remove_entry(struct timer **heap, struct timer *t)
89 {
90 int sz = GET_HEAP_SIZE(heap);
91 int pos = t->heap_offset;
93 t->heap_offset = 0;
95 if ( unlikely(pos == sz) )
96 {
97 SET_HEAP_SIZE(heap, sz-1);
98 goto out;
99 }
101 heap[pos] = heap[sz];
102 heap[pos]->heap_offset = pos;
104 SET_HEAP_SIZE(heap, --sz);
106 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
107 up_heap(heap, pos);
108 else
109 down_heap(heap, pos);
111 out:
112 return (pos == 1);
113 }
116 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
117 static int add_entry(struct timer ***pheap, struct timer *t)
118 {
119 struct timer **heap = *pheap;
120 int sz = GET_HEAP_SIZE(heap);
122 /* Copy the heap if it is full. */
123 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
124 {
125 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
126 int old_limit = GET_HEAP_LIMIT(heap);
127 int new_limit = ((old_limit + 1) << 4) - 1;
128 heap = xmalloc_array(struct timer *, new_limit + 1);
129 BUG_ON(heap == NULL);
130 memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
131 SET_HEAP_LIMIT(heap, new_limit);
132 if ( old_limit != 0 )
133 xfree(*pheap);
134 *pheap = heap;
135 }
137 SET_HEAP_SIZE(heap, ++sz);
138 heap[sz] = t;
139 t->heap_offset = sz;
140 up_heap(heap, sz);
141 return (t->heap_offset == 1);
142 }
145 /****************************************************************************
146 * TIMER OPERATIONS.
147 */
149 static inline void __add_timer(struct timer *timer)
150 {
151 int cpu = timer->cpu;
152 if ( add_entry(&timers[cpu].heap, timer) )
153 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
154 }
157 static inline void __stop_timer(struct timer *timer)
158 {
159 int cpu = timer->cpu;
160 if ( remove_entry(timers[cpu].heap, timer) )
161 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
162 }
164 static inline void timer_lock(struct timer *timer)
165 {
166 unsigned int cpu;
168 for ( ; ; )
169 {
170 cpu = timer->cpu;
171 spin_lock(&timers[cpu].lock);
172 if ( likely(timer->cpu == cpu) )
173 break;
174 spin_unlock(&timers[cpu].lock);
175 }
176 }
178 #define timer_lock_irq(t) \
179 do { local_irq_disable(); timer_lock(t); } while ( 0 )
180 #define timer_lock_irqsave(t, flags) \
181 do { local_irq_save(flags); timer_lock(t); } while ( 0 )
183 static inline void timer_unlock(struct timer *timer)
184 {
185 spin_unlock(&timers[timer->cpu].lock);
186 }
188 #define timer_unlock_irq(t) \
189 do { timer_unlock(t); local_irq_enable(); } while ( 0 )
190 #define timer_unlock_irqrestore(t, flags) \
191 do { timer_unlock(t); local_irq_restore(flags); } while ( 0 )
194 void set_timer(struct timer *timer, s_time_t expires)
195 {
196 unsigned long flags;
198 timer_lock_irqsave(timer, flags);
200 if ( active_timer(timer) )
201 __stop_timer(timer);
203 timer->expires = expires;
205 if ( likely(!timer->killed) )
206 __add_timer(timer);
208 timer_unlock_irqrestore(timer, flags);
209 }
212 void stop_timer(struct timer *timer)
213 {
214 unsigned long flags;
216 timer_lock_irqsave(timer, flags);
218 if ( active_timer(timer) )
219 __stop_timer(timer);
221 timer_unlock_irqrestore(timer, flags);
222 }
225 void migrate_timer(struct timer *timer, unsigned int new_cpu)
226 {
227 int old_cpu;
228 unsigned long flags;
230 for ( ; ; )
231 {
232 if ( (old_cpu = timer->cpu) == new_cpu )
233 return;
235 if ( old_cpu < new_cpu )
236 {
237 spin_lock_irqsave(&timers[old_cpu].lock, flags);
238 spin_lock(&timers[new_cpu].lock);
239 }
240 else
241 {
242 spin_lock_irqsave(&timers[new_cpu].lock, flags);
243 spin_lock(&timers[old_cpu].lock);
244 }
246 if ( likely(timer->cpu == old_cpu) )
247 break;
249 spin_unlock(&timers[old_cpu].lock);
250 spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
251 }
253 if ( active_timer(timer) )
254 __stop_timer(timer);
256 timer->cpu = new_cpu;
258 if ( likely(!timer->killed) )
259 __add_timer(timer);
261 spin_unlock(&timers[old_cpu].lock);
262 spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
263 }
266 void kill_timer(struct timer *timer)
267 {
268 int cpu;
269 unsigned long flags;
271 BUG_ON(timers[smp_processor_id()].running == timer);
273 timer_lock_irqsave(timer, flags);
275 if ( active_timer(timer) )
276 __stop_timer(timer);
277 timer->killed = 1;
279 timer_unlock_irqrestore(timer, flags);
281 for_each_online_cpu ( cpu )
282 while ( timers[cpu].running == timer )
283 cpu_relax();
284 }
287 static void timer_softirq_action(void)
288 {
289 int cpu = smp_processor_id();
290 struct timer *t, **heap;
291 s_time_t now;
292 void (*fn)(void *);
293 void *data;
295 spin_lock_irq(&timers[cpu].lock);
297 do {
298 heap = timers[cpu].heap;
299 now = NOW();
301 while ( (GET_HEAP_SIZE(heap) != 0) &&
302 ((t = heap[1])->expires < (now + TIMER_SLOP)) )
303 {
304 remove_entry(heap, t);
306 timers[cpu].running = t;
308 fn = t->function;
309 data = t->data;
311 spin_unlock_irq(&timers[cpu].lock);
312 (*fn)(data);
313 spin_lock_irq(&timers[cpu].lock);
315 /* Heap may have grown while the lock was released. */
316 heap = timers[cpu].heap;
317 }
319 timers[cpu].running = NULL;
320 }
321 while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
323 spin_unlock_irq(&timers[cpu].lock);
324 }
327 static void dump_timerq(unsigned char key)
328 {
329 struct timer *t;
330 unsigned long flags;
331 s_time_t now = NOW();
332 int i, j;
334 printk("Dumping timer queues: NOW=0x%08X%08X\n",
335 (u32)(now>>32), (u32)now);
337 for_each_online_cpu( i )
338 {
339 printk("CPU[%02d] ", i);
340 spin_lock_irqsave(&timers[i].lock, flags);
341 for ( j = 1; j <= GET_HEAP_SIZE(timers[i].heap); j++ )
342 {
343 t = timers[i].heap[j];
344 printk (" %d : %p ex=0x%08X%08X %p\n",
345 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
346 }
347 spin_unlock_irqrestore(&timers[i].lock, flags);
348 printk("\n");
349 }
350 }
353 void __init timer_init(void)
354 {
355 static struct timer *dummy_heap;
356 int i;
358 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
360 /*
361 * All CPUs initially share an empty dummy heap. Only those CPUs that
362 * are brought online will be dynamically allocated their own heap.
363 */
364 SET_HEAP_SIZE(&dummy_heap, 0);
365 SET_HEAP_LIMIT(&dummy_heap, 0);
367 for ( i = 0; i < NR_CPUS; i++ )
368 {
369 spin_lock_init(&timers[i].lock);
370 timers[i].heap = &dummy_heap;
371 }
373 register_keyhandler('a', dump_timerq, "dump timer queues");
374 }
376 /*
377 * Local variables:
378 * mode: C
379 * c-set-style: "BSD"
380 * c-basic-offset: 4
381 * tab-width: 4
382 * indent-tabs-mode: nil
383 * End:
384 */