direct-io.hg

view xen/common/timer.c @ 11135:88e6bd5e2b54

Whitespace clean-ups.

Signed-off-by: Steven Hand <steven@xensource.com>
author shand@kneesaa.uk.xensource.com
date Wed Aug 16 11:36:13 2006 +0100 (2006-08-16)
parents e8bdf6a8b74a
children 80c5350a68f1
line source
1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <xen/percpu.h>
21 #include <asm/system.h>
22 #include <asm/desc.h>
24 /*
25 * We pull handlers off the timer list this far in future,
26 * rather than reprogramming the time hardware.
27 */
28 #define TIMER_SLOP (50*1000) /* ns */
30 struct timers {
31 spinlock_t lock;
32 struct timer **heap;
33 struct timer *running;
34 } __cacheline_aligned;
36 static DEFINE_PER_CPU(struct timers, timers);
38 extern int reprogram_timer(s_time_t timeout);
40 /****************************************************************************
41 * HEAP OPERATIONS.
42 */
44 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
45 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
47 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
48 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
50 /* Sink down element @pos of @heap. */
51 static void down_heap(struct timer **heap, int pos)
52 {
53 int sz = GET_HEAP_SIZE(heap), nxt;
54 struct timer *t = heap[pos];
56 while ( (nxt = (pos << 1)) <= sz )
57 {
58 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
59 nxt++;
60 if ( heap[nxt]->expires > t->expires )
61 break;
62 heap[pos] = heap[nxt];
63 heap[pos]->heap_offset = pos;
64 pos = nxt;
65 }
67 heap[pos] = t;
68 t->heap_offset = pos;
69 }
71 /* Float element @pos up @heap. */
72 static void up_heap(struct timer **heap, int pos)
73 {
74 struct timer *t = heap[pos];
76 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
77 {
78 heap[pos] = heap[pos>>1];
79 heap[pos]->heap_offset = pos;
80 pos >>= 1;
81 }
83 heap[pos] = t;
84 t->heap_offset = pos;
85 }
88 /* Delete @t from @heap. Return TRUE if new top of heap. */
89 static int remove_entry(struct timer **heap, struct timer *t)
90 {
91 int sz = GET_HEAP_SIZE(heap);
92 int pos = t->heap_offset;
94 t->heap_offset = 0;
96 if ( unlikely(pos == sz) )
97 {
98 SET_HEAP_SIZE(heap, sz-1);
99 goto out;
100 }
102 heap[pos] = heap[sz];
103 heap[pos]->heap_offset = pos;
105 SET_HEAP_SIZE(heap, --sz);
107 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
108 up_heap(heap, pos);
109 else
110 down_heap(heap, pos);
112 out:
113 return (pos == 1);
114 }
117 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
118 static int add_entry(struct timer ***pheap, struct timer *t)
119 {
120 struct timer **heap = *pheap;
121 int sz = GET_HEAP_SIZE(heap);
123 /* Copy the heap if it is full. */
124 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
125 {
126 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
127 int old_limit = GET_HEAP_LIMIT(heap);
128 int new_limit = ((old_limit + 1) << 4) - 1;
129 heap = xmalloc_array(struct timer *, new_limit + 1);
130 BUG_ON(heap == NULL);
131 memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
132 SET_HEAP_LIMIT(heap, new_limit);
133 if ( old_limit != 0 )
134 xfree(*pheap);
135 *pheap = heap;
136 }
138 SET_HEAP_SIZE(heap, ++sz);
139 heap[sz] = t;
140 t->heap_offset = sz;
141 up_heap(heap, sz);
142 return (t->heap_offset == 1);
143 }
146 /****************************************************************************
147 * TIMER OPERATIONS.
148 */
150 static inline void __add_timer(struct timer *timer)
151 {
152 int cpu = timer->cpu;
153 if ( add_entry(&per_cpu(timers, cpu).heap, timer) )
154 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
155 }
158 static inline void __stop_timer(struct timer *timer)
159 {
160 int cpu = timer->cpu;
161 if ( remove_entry(per_cpu(timers, cpu).heap, timer) )
162 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
163 }
165 static inline void timer_lock(struct timer *timer)
166 {
167 unsigned int cpu;
169 for ( ; ; )
170 {
171 cpu = timer->cpu;
172 spin_lock(&per_cpu(timers, cpu).lock);
173 if ( likely(timer->cpu == cpu) )
174 break;
175 spin_unlock(&per_cpu(timers, cpu).lock);
176 }
177 }
179 #define timer_lock_irq(t) \
180 do { local_irq_disable(); timer_lock(t); } while ( 0 )
181 #define timer_lock_irqsave(t, flags) \
182 do { local_irq_save(flags); timer_lock(t); } while ( 0 )
184 static inline void timer_unlock(struct timer *timer)
185 {
186 spin_unlock(&per_cpu(timers, timer->cpu).lock);
187 }
189 #define timer_unlock_irq(t) \
190 do { timer_unlock(t); local_irq_enable(); } while ( 0 )
191 #define timer_unlock_irqrestore(t, flags) \
192 do { timer_unlock(t); local_irq_restore(flags); } while ( 0 )
195 void set_timer(struct timer *timer, s_time_t expires)
196 {
197 unsigned long flags;
199 timer_lock_irqsave(timer, flags);
201 if ( active_timer(timer) )
202 __stop_timer(timer);
204 timer->expires = expires;
206 if ( likely(!timer->killed) )
207 __add_timer(timer);
209 timer_unlock_irqrestore(timer, flags);
210 }
213 void stop_timer(struct timer *timer)
214 {
215 unsigned long flags;
217 timer_lock_irqsave(timer, flags);
219 if ( active_timer(timer) )
220 __stop_timer(timer);
222 timer_unlock_irqrestore(timer, flags);
223 }
226 void migrate_timer(struct timer *timer, unsigned int new_cpu)
227 {
228 int old_cpu;
229 unsigned long flags;
231 for ( ; ; )
232 {
233 if ( (old_cpu = timer->cpu) == new_cpu )
234 return;
236 if ( old_cpu < new_cpu )
237 {
238 spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags);
239 spin_lock(&per_cpu(timers, new_cpu).lock);
240 }
241 else
242 {
243 spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags);
244 spin_lock(&per_cpu(timers, old_cpu).lock);
245 }
247 if ( likely(timer->cpu == old_cpu) )
248 break;
250 spin_unlock(&per_cpu(timers, old_cpu).lock);
251 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
252 }
254 if ( active_timer(timer) )
255 {
256 __stop_timer(timer);
257 timer->cpu = new_cpu;
258 __add_timer(timer);
259 }
260 else
261 {
262 timer->cpu = new_cpu;
263 }
265 spin_unlock(&per_cpu(timers, old_cpu).lock);
266 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
267 }
270 void kill_timer(struct timer *timer)
271 {
272 int cpu;
273 unsigned long flags;
275 BUG_ON(this_cpu(timers).running == timer);
277 timer_lock_irqsave(timer, flags);
279 if ( active_timer(timer) )
280 __stop_timer(timer);
281 timer->killed = 1;
283 timer_unlock_irqrestore(timer, flags);
285 for_each_online_cpu ( cpu )
286 while ( per_cpu(timers, cpu).running == timer )
287 cpu_relax();
288 }
291 static void timer_softirq_action(void)
292 {
293 struct timer *t, **heap;
294 struct timers *ts;
295 s_time_t now;
296 void (*fn)(void *);
297 void *data;
299 ts = &this_cpu(timers);
301 spin_lock_irq(&ts->lock);
303 do {
304 heap = ts->heap;
305 now = NOW();
307 while ( (GET_HEAP_SIZE(heap) != 0) &&
308 ((t = heap[1])->expires < (now + TIMER_SLOP)) )
309 {
310 remove_entry(heap, t);
312 ts->running = t;
314 fn = t->function;
315 data = t->data;
317 spin_unlock_irq(&ts->lock);
318 (*fn)(data);
319 spin_lock_irq(&ts->lock);
321 /* Heap may have grown while the lock was released. */
322 heap = ts->heap;
323 }
325 ts->running = NULL;
326 }
327 while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
329 spin_unlock_irq(&ts->lock);
330 }
333 void process_pending_timers(void)
334 {
335 unsigned int cpu = smp_processor_id();
336 ASSERT(!in_irq() && local_irq_is_enabled());
337 if ( test_and_clear_bit(TIMER_SOFTIRQ, &softirq_pending(cpu)) )
338 timer_softirq_action();
339 }
342 static void dump_timerq(unsigned char key)
343 {
344 struct timer *t;
345 struct timers *ts;
346 unsigned long flags;
347 s_time_t now = NOW();
348 int i, j;
350 printk("Dumping timer queues: NOW=0x%08X%08X\n",
351 (u32)(now>>32), (u32)now);
353 for_each_online_cpu( i )
354 {
355 ts = &per_cpu(timers, i);
357 printk("CPU[%02d] ", i);
358 spin_lock_irqsave(&ts->lock, flags);
359 for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
360 {
361 t = ts->heap[j];
362 printk (" %d : %p ex=0x%08X%08X %p\n",
363 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
364 }
365 spin_unlock_irqrestore(&ts->lock, flags);
366 printk("\n");
367 }
368 }
371 void __init timer_init(void)
372 {
373 static struct timer *dummy_heap;
374 int i;
376 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
378 /*
379 * All CPUs initially share an empty dummy heap. Only those CPUs that
380 * are brought online will be dynamically allocated their own heap.
381 */
382 SET_HEAP_SIZE(&dummy_heap, 0);
383 SET_HEAP_LIMIT(&dummy_heap, 0);
385 for ( i = 0; i < NR_CPUS; i++ )
386 {
387 spin_lock_init(&per_cpu(timers, i).lock);
388 per_cpu(timers, i).heap = &dummy_heap;
389 }
391 register_keyhandler('a', dump_timerq, "dump timer queues");
392 }
394 /*
395 * Local variables:
396 * mode: C
397 * c-set-style: "BSD"
398 * c-basic-offset: 4
399 * tab-width: 4
400 * indent-tabs-mode: nil
401 * End:
402 */