ia64/xen-unstable

view xen/common/timer.c @ 8609:85d693e6f61a

Arch-specific per-vcpu info should be initialised to zero
when allocating a new vcpu structure, not copied from
CPU0's idle VCPU. Especially now that the idle VCPU itself
is dynamically allocated.

This should fix assertions people have been seeing in
getdomain_info_ctxt() relation to IOPL in eflags.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Jan 14 21:26:40 2006 +0100 (2006-01-14)
parents cc2f35c83b4c
children 974ed9f73641
line source
1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <asm/system.h>
21 #include <asm/desc.h>
23 /*
24 * We pull handlers off the timer list this far in future,
25 * rather than reprogramming the time hardware.
26 */
27 #define TIMER_SLOP (50*1000) /* ns */
29 struct timers {
30 spinlock_t lock;
31 struct timer **heap;
32 struct timer *running;
33 } __cacheline_aligned;
35 struct timers timers[NR_CPUS];
37 extern int reprogram_timer(s_time_t timeout);
39 /****************************************************************************
40 * HEAP OPERATIONS.
41 */
43 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
44 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
46 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
47 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
49 /* Sink down element @pos of @heap. */
50 static void down_heap(struct timer **heap, int pos)
51 {
52 int sz = GET_HEAP_SIZE(heap), nxt;
53 struct timer *t = heap[pos];
55 while ( (nxt = (pos << 1)) <= sz )
56 {
57 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
58 nxt++;
59 if ( heap[nxt]->expires > t->expires )
60 break;
61 heap[pos] = heap[nxt];
62 heap[pos]->heap_offset = pos;
63 pos = nxt;
64 }
66 heap[pos] = t;
67 t->heap_offset = pos;
68 }
70 /* Float element @pos up @heap. */
71 static void up_heap(struct timer **heap, int pos)
72 {
73 struct timer *t = heap[pos];
75 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
76 {
77 heap[pos] = heap[pos>>1];
78 heap[pos]->heap_offset = pos;
79 pos >>= 1;
80 }
82 heap[pos] = t;
83 t->heap_offset = pos;
84 }
87 /* Delete @t from @heap. Return TRUE if new top of heap. */
88 static int remove_entry(struct timer **heap, struct timer *t)
89 {
90 int sz = GET_HEAP_SIZE(heap);
91 int pos = t->heap_offset;
93 t->heap_offset = 0;
95 if ( unlikely(pos == sz) )
96 {
97 SET_HEAP_SIZE(heap, sz-1);
98 goto out;
99 }
101 heap[pos] = heap[sz];
102 heap[pos]->heap_offset = pos;
104 SET_HEAP_SIZE(heap, --sz);
106 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
107 up_heap(heap, pos);
108 else
109 down_heap(heap, pos);
111 out:
112 return (pos == 1);
113 }
116 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
117 static int add_entry(struct timer ***pheap, struct timer *t)
118 {
119 struct timer **heap = *pheap;
120 int sz = GET_HEAP_SIZE(heap);
122 /* Copy the heap if it is full. */
123 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
124 {
125 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
126 int old_limit = GET_HEAP_LIMIT(heap);
127 int new_limit = ((old_limit + 1) << 4) - 1;
128 heap = xmalloc_array(struct timer *, new_limit + 1);
129 BUG_ON(heap == NULL);
130 memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
131 SET_HEAP_LIMIT(heap, new_limit);
132 if ( old_limit != 0 )
133 xfree(*pheap);
134 *pheap = heap;
135 }
137 SET_HEAP_SIZE(heap, ++sz);
138 heap[sz] = t;
139 t->heap_offset = sz;
140 up_heap(heap, sz);
141 return (t->heap_offset == 1);
142 }
145 /****************************************************************************
146 * TIMER OPERATIONS.
147 */
149 static inline void __add_timer(struct timer *timer)
150 {
151 int cpu = timer->cpu;
152 if ( add_entry(&timers[cpu].heap, timer) )
153 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
154 }
157 static inline void __stop_timer(struct timer *timer)
158 {
159 int cpu = timer->cpu;
160 if ( remove_entry(timers[cpu].heap, timer) )
161 cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
162 }
165 void set_timer(struct timer *timer, s_time_t expires)
166 {
167 int cpu = timer->cpu;
168 unsigned long flags;
170 spin_lock_irqsave(&timers[cpu].lock, flags);
171 if ( active_timer(timer) )
172 __stop_timer(timer);
173 timer->expires = expires;
174 if ( likely(!timer->killed) )
175 __add_timer(timer);
176 spin_unlock_irqrestore(&timers[cpu].lock, flags);
177 }
180 void stop_timer(struct timer *timer)
181 {
182 int cpu = timer->cpu;
183 unsigned long flags;
185 spin_lock_irqsave(&timers[cpu].lock, flags);
186 if ( active_timer(timer) )
187 __stop_timer(timer);
188 spin_unlock_irqrestore(&timers[cpu].lock, flags);
189 }
192 void kill_timer(struct timer *timer)
193 {
194 int cpu = timer->cpu;
195 unsigned long flags;
197 BUG_ON(timers[cpu].running == timer);
199 spin_lock_irqsave(&timers[cpu].lock, flags);
200 if ( active_timer(timer) )
201 __stop_timer(timer);
202 timer->killed = 1;
203 spin_unlock_irqrestore(&timers[cpu].lock, flags);
205 for_each_online_cpu ( cpu )
206 while ( timers[cpu].running == timer )
207 cpu_relax();
208 }
211 static void timer_softirq_action(void)
212 {
213 int cpu = smp_processor_id();
214 struct timer *t, **heap;
215 s_time_t now;
216 void (*fn)(void *);
217 void *data;
219 spin_lock_irq(&timers[cpu].lock);
221 do {
222 heap = timers[cpu].heap;
223 now = NOW();
225 while ( (GET_HEAP_SIZE(heap) != 0) &&
226 ((t = heap[1])->expires < (now + TIMER_SLOP)) )
227 {
228 remove_entry(heap, t);
230 timers[cpu].running = t;
232 fn = t->function;
233 data = t->data;
235 spin_unlock_irq(&timers[cpu].lock);
236 (*fn)(data);
237 spin_lock_irq(&timers[cpu].lock);
239 /* Heap may have grown while the lock was released. */
240 heap = timers[cpu].heap;
241 }
243 timers[cpu].running = NULL;
244 }
245 while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
247 spin_unlock_irq(&timers[cpu].lock);
248 }
251 static void dump_timerq(unsigned char key)
252 {
253 struct timer *t;
254 unsigned long flags;
255 s_time_t now = NOW();
256 int i, j;
258 printk("Dumping timer queues: NOW=0x%08X%08X\n",
259 (u32)(now>>32), (u32)now);
261 for_each_online_cpu( i )
262 {
263 printk("CPU[%02d] ", i);
264 spin_lock_irqsave(&timers[i].lock, flags);
265 for ( j = 1; j <= GET_HEAP_SIZE(timers[i].heap); j++ )
266 {
267 t = timers[i].heap[j];
268 printk (" %d : %p ex=0x%08X%08X %p\n",
269 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
270 }
271 spin_unlock_irqrestore(&timers[i].lock, flags);
272 printk("\n");
273 }
274 }
277 void __init timer_init(void)
278 {
279 static struct timer *dummy_heap;
280 int i;
282 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
284 /*
285 * All CPUs initially share an empty dummy heap. Only those CPUs that
286 * are brought online will be dynamically allocated their own heap.
287 */
288 SET_HEAP_SIZE(&dummy_heap, 0);
289 SET_HEAP_LIMIT(&dummy_heap, 0);
291 for ( i = 0; i < NR_CPUS; i++ )
292 {
293 spin_lock_init(&timers[i].lock);
294 timers[i].heap = &dummy_heap;
295 }
297 register_keyhandler('a', dump_timerq, "dump timer queues");
298 }
300 /*
301 * Local variables:
302 * mode: C
303 * c-set-style: "BSD"
304 * c-basic-offset: 4
305 * tab-width: 4
306 * indent-tabs-mode: nil
307 * End:
308 */