ia64/xen-unstable

view xen/common/timer.c @ 10570:8dc4af3f192c

[IA64] Implement and use DOM0_DOMAIN_STEUP.

DOM0_GETMEMLIST now reads ptes and use gpfn.
Domain builder reworked: calls DOMAIN_SETUP, setup start_info page.
SAL data are now in domain memory.
is_vti field added in domain.arch.
Many cleanups (indentation, static, warnings).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Jul 05 09:28:32 2006 -0600 (2006-07-05)
parents b2323eefb79e
children 53f552ad4042
line source
1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <asm/system.h>
21 #include <asm/desc.h>
23 /*
24 * We pull handlers off the timer list this far in future,
25 * rather than reprogramming the time hardware.
26 */
27 #define TIMER_SLOP (50*1000) /* ns */
29 struct timers {
30 spinlock_t lock;
31 struct timer **heap;
32 struct timer *running;
33 } __cacheline_aligned;
35 struct timers timers[NR_CPUS];
37 extern int reprogram_timer(s_time_t timeout);
39 /****************************************************************************
40 * HEAP OPERATIONS.
41 */
43 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
44 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
46 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
47 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
49 /* Sink down element @pos of @heap. */
50 static void down_heap(struct timer **heap, int pos)
51 {
52 int sz = GET_HEAP_SIZE(heap), nxt;
53 struct timer *t = heap[pos];
55 while ( (nxt = (pos << 1)) <= sz )
56 {
57 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
58 nxt++;
59 if ( heap[nxt]->expires > t->expires )
60 break;
61 heap[pos] = heap[nxt];
62 heap[pos]->heap_offset = pos;
63 pos = nxt;
64 }
66 heap[pos] = t;
67 t->heap_offset = pos;
68 }
70 /* Float element @pos up @heap. */
71 static void up_heap(struct timer **heap, int pos)
72 {
73 struct timer *t = heap[pos];
75 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
76 {
77 heap[pos] = heap[pos>>1];
78 heap[pos]->heap_offset = pos;
79 pos >>= 1;
80 }
82 heap[pos] = t;
83 t->heap_offset = pos;
84 }
87 /* Delete @t from @heap. Return TRUE if new top of heap. */
88 static int remove_entry(struct timer **heap, struct timer *t)
89 {
90 int sz = GET_HEAP_SIZE(heap);
91 int pos = t->heap_offset;
93 t->heap_offset = 0;
95 if ( unlikely(pos == sz) )
96 {
97 SET_HEAP_SIZE(heap, sz-1);
98 goto out;
99 }
101 heap[pos] = heap[sz];
102 heap[pos]->heap_offset = pos;
104 SET_HEAP_SIZE(heap, --sz);
106 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
107 up_heap(heap, pos);
108 else
109 down_heap(heap, pos);
111 out:
112 return (pos == 1);
113 }
116 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
117 static int add_entry(struct timer ***pheap, struct timer *t)
118 {
119 struct timer **heap = *pheap;
120 int sz = GET_HEAP_SIZE(heap);
122 /* Copy the heap if it is full. */
123 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
124 {
125 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
126 int old_limit = GET_HEAP_LIMIT(heap);
127 int new_limit = ((old_limit + 1) << 4) - 1;
128 heap = xmalloc_array(struct timer *, new_limit + 1);
129 BUG_ON(heap == NULL);
130 memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
131 SET_HEAP_LIMIT(heap, new_limit);
132 if ( old_limit != 0 )
133 xfree(*pheap);
134 *pheap = heap;
135 }
137 SET_HEAP_SIZE(heap, ++sz);
138 heap[sz] = t;
139 t->heap_offset = sz;
140 up_heap(heap, sz);
141 return (t->heap_offset == 1);
142 }
145 /****************************************************************************
146 * TIMER OPERATIONS.
147 */
149 static inline void __add_timer(struct timer *timer)
150 {
151 int cpu = timer->cpu;
152 if ( add_entry(&timers[cpu].heap, timer) )
153 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
154 }
157 static inline void __stop_timer(struct timer *timer)
158 {
159 int cpu = timer->cpu;
160 if ( remove_entry(timers[cpu].heap, timer) )
161 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
162 }
164 static inline void timer_lock(struct timer *timer)
165 {
166 unsigned int cpu;
168 for ( ; ; )
169 {
170 cpu = timer->cpu;
171 spin_lock(&timers[cpu].lock);
172 if ( likely(timer->cpu == cpu) )
173 break;
174 spin_unlock(&timers[cpu].lock);
175 }
176 }
178 #define timer_lock_irq(t) \
179 do { local_irq_disable(); timer_lock(t); } while ( 0 )
180 #define timer_lock_irqsave(t, flags) \
181 do { local_irq_save(flags); timer_lock(t); } while ( 0 )
183 static inline void timer_unlock(struct timer *timer)
184 {
185 spin_unlock(&timers[timer->cpu].lock);
186 }
188 #define timer_unlock_irq(t) \
189 do { timer_unlock(t); local_irq_enable(); } while ( 0 )
190 #define timer_unlock_irqrestore(t, flags) \
191 do { timer_unlock(t); local_irq_restore(flags); } while ( 0 )
194 void set_timer(struct timer *timer, s_time_t expires)
195 {
196 unsigned long flags;
198 timer_lock_irqsave(timer, flags);
200 if ( active_timer(timer) )
201 __stop_timer(timer);
203 timer->expires = expires;
205 if ( likely(!timer->killed) )
206 __add_timer(timer);
208 timer_unlock_irqrestore(timer, flags);
209 }
212 void stop_timer(struct timer *timer)
213 {
214 unsigned long flags;
216 timer_lock_irqsave(timer, flags);
218 if ( active_timer(timer) )
219 __stop_timer(timer);
221 timer_unlock_irqrestore(timer, flags);
222 }
225 void migrate_timer(struct timer *timer, unsigned int new_cpu)
226 {
227 int old_cpu;
228 unsigned long flags;
230 for ( ; ; )
231 {
232 if ( (old_cpu = timer->cpu) == new_cpu )
233 return;
235 if ( old_cpu < new_cpu )
236 {
237 spin_lock_irqsave(&timers[old_cpu].lock, flags);
238 spin_lock(&timers[new_cpu].lock);
239 }
240 else
241 {
242 spin_lock_irqsave(&timers[new_cpu].lock, flags);
243 spin_lock(&timers[old_cpu].lock);
244 }
246 if ( likely(timer->cpu == old_cpu) )
247 break;
249 spin_unlock(&timers[old_cpu].lock);
250 spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
251 }
253 if ( active_timer(timer) )
254 {
255 __stop_timer(timer);
256 timer->cpu = new_cpu;
257 __add_timer(timer);
258 }
259 else
260 {
261 timer->cpu = new_cpu;
262 }
264 spin_unlock(&timers[old_cpu].lock);
265 spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
266 }
269 void kill_timer(struct timer *timer)
270 {
271 int cpu;
272 unsigned long flags;
274 BUG_ON(timers[smp_processor_id()].running == timer);
276 timer_lock_irqsave(timer, flags);
278 if ( active_timer(timer) )
279 __stop_timer(timer);
280 timer->killed = 1;
282 timer_unlock_irqrestore(timer, flags);
284 for_each_online_cpu ( cpu )
285 while ( timers[cpu].running == timer )
286 cpu_relax();
287 }
290 static void timer_softirq_action(void)
291 {
292 int cpu = smp_processor_id();
293 struct timer *t, **heap;
294 s_time_t now;
295 void (*fn)(void *);
296 void *data;
298 spin_lock_irq(&timers[cpu].lock);
300 do {
301 heap = timers[cpu].heap;
302 now = NOW();
304 while ( (GET_HEAP_SIZE(heap) != 0) &&
305 ((t = heap[1])->expires < (now + TIMER_SLOP)) )
306 {
307 remove_entry(heap, t);
309 timers[cpu].running = t;
311 fn = t->function;
312 data = t->data;
314 spin_unlock_irq(&timers[cpu].lock);
315 (*fn)(data);
316 spin_lock_irq(&timers[cpu].lock);
318 /* Heap may have grown while the lock was released. */
319 heap = timers[cpu].heap;
320 }
322 timers[cpu].running = NULL;
323 }
324 while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
326 spin_unlock_irq(&timers[cpu].lock);
327 }
330 static void dump_timerq(unsigned char key)
331 {
332 struct timer *t;
333 unsigned long flags;
334 s_time_t now = NOW();
335 int i, j;
337 printk("Dumping timer queues: NOW=0x%08X%08X\n",
338 (u32)(now>>32), (u32)now);
340 for_each_online_cpu( i )
341 {
342 printk("CPU[%02d] ", i);
343 spin_lock_irqsave(&timers[i].lock, flags);
344 for ( j = 1; j <= GET_HEAP_SIZE(timers[i].heap); j++ )
345 {
346 t = timers[i].heap[j];
347 printk (" %d : %p ex=0x%08X%08X %p\n",
348 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
349 }
350 spin_unlock_irqrestore(&timers[i].lock, flags);
351 printk("\n");
352 }
353 }
356 void __init timer_init(void)
357 {
358 static struct timer *dummy_heap;
359 int i;
361 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
363 /*
364 * All CPUs initially share an empty dummy heap. Only those CPUs that
365 * are brought online will be dynamically allocated their own heap.
366 */
367 SET_HEAP_SIZE(&dummy_heap, 0);
368 SET_HEAP_LIMIT(&dummy_heap, 0);
370 for ( i = 0; i < NR_CPUS; i++ )
371 {
372 spin_lock_init(&timers[i].lock);
373 timers[i].heap = &dummy_heap;
374 }
376 register_keyhandler('a', dump_timerq, "dump timer queues");
377 }
379 /*
380 * Local variables:
381 * mode: C
382 * c-set-style: "BSD"
383 * c-basic-offset: 4
384 * tab-width: 4
385 * indent-tabs-mode: nil
386 * End:
387 */