ia64/xen-unstable

view xen/common/ac_timer.c @ 6832:5959fae4722a

Set NE bit for VMX guest CR0. VMCS guest CR0.NE bit must
be set, else it will cause "vm-entry failed".

Signed-off-by: Chengyuan Li <chengyuan.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Sep 14 13:37:50 2005 +0000 (2005-09-14)
parents dd668f7527cb
children b2f4823b6ff0 b35215021b32 9af349b055e5 3233e7ecfa9f
line source
1 /******************************************************************************
2 * ac_timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/types.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/lib.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/ac_timer.h>
19 #include <xen/keyhandler.h>
20 #include <asm/system.h>
21 #include <asm/desc.h>
23 /*
24 * We pull handlers off the timer list this far in future,
25 * rather than reprogramming the time hardware.
26 */
27 #define TIMER_SLOP (50*1000) /* ns */
29 struct ac_timers {
30 spinlock_t lock;
31 struct ac_timer **heap;
32 unsigned int softirqs;
33 } __cacheline_aligned;
35 struct ac_timers ac_timers[NR_CPUS];
37 extern int reprogram_ac_timer(s_time_t timeout);
39 /****************************************************************************
40 * HEAP OPERATIONS.
41 */
43 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
44 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
46 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
47 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
49 /* Sink down element @pos of @heap. */
50 static void down_heap(struct ac_timer **heap, int pos)
51 {
52 int sz = GET_HEAP_SIZE(heap), nxt;
53 struct ac_timer *t = heap[pos];
55 while ( (nxt = (pos << 1)) <= sz )
56 {
57 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
58 nxt++;
59 if ( heap[nxt]->expires > t->expires )
60 break;
61 heap[pos] = heap[nxt];
62 heap[pos]->heap_offset = pos;
63 pos = nxt;
64 }
66 heap[pos] = t;
67 t->heap_offset = pos;
68 }
70 /* Float element @pos up @heap. */
71 static void up_heap(struct ac_timer **heap, int pos)
72 {
73 struct ac_timer *t = heap[pos];
75 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
76 {
77 heap[pos] = heap[pos>>1];
78 heap[pos]->heap_offset = pos;
79 pos >>= 1;
80 }
82 heap[pos] = t;
83 t->heap_offset = pos;
84 }
87 /* Delete @t from @heap. Return TRUE if new top of heap. */
88 static int remove_entry(struct ac_timer **heap, struct ac_timer *t)
89 {
90 int sz = GET_HEAP_SIZE(heap);
91 int pos = t->heap_offset;
93 t->heap_offset = 0;
95 if ( unlikely(pos == sz) )
96 {
97 SET_HEAP_SIZE(heap, sz-1);
98 goto out;
99 }
101 heap[pos] = heap[sz];
102 heap[pos]->heap_offset = pos;
104 SET_HEAP_SIZE(heap, --sz);
106 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
107 up_heap(heap, pos);
108 else
109 down_heap(heap, pos);
111 out:
112 return (pos == 1);
113 }
116 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
117 static int add_entry(struct ac_timer ***pheap, struct ac_timer *t)
118 {
119 struct ac_timer **heap = *pheap;
120 int sz = GET_HEAP_SIZE(heap);
122 /* Copy the heap if it is full. */
123 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
124 {
125 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
126 int old_limit = GET_HEAP_LIMIT(heap);
127 int new_limit = ((old_limit + 1) << 4) - 1;
128 heap = xmalloc_array(struct ac_timer *, new_limit + 1);
129 BUG_ON(heap == NULL);
130 memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
131 SET_HEAP_LIMIT(heap, new_limit);
132 if ( old_limit != 0 )
133 xfree(*pheap);
134 *pheap = heap;
135 }
137 SET_HEAP_SIZE(heap, ++sz);
138 heap[sz] = t;
139 t->heap_offset = sz;
140 up_heap(heap, sz);
141 return (t->heap_offset == 1);
142 }
145 /****************************************************************************
146 * TIMER OPERATIONS.
147 */
149 static inline void __add_ac_timer(struct ac_timer *timer)
150 {
151 int cpu = timer->cpu;
152 if ( add_entry(&ac_timers[cpu].heap, timer) )
153 cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
154 }
157 static inline void __rem_ac_timer(struct ac_timer *timer)
158 {
159 int cpu = timer->cpu;
160 if ( remove_entry(ac_timers[cpu].heap, timer) )
161 cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
162 }
165 void set_ac_timer(struct ac_timer *timer, s_time_t expires)
166 {
167 int cpu = timer->cpu;
168 unsigned long flags;
170 spin_lock_irqsave(&ac_timers[cpu].lock, flags);
171 ASSERT(timer != NULL);
172 if ( active_ac_timer(timer) )
173 __rem_ac_timer(timer);
174 timer->expires = expires;
175 __add_ac_timer(timer);
176 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
177 }
180 void rem_ac_timer(struct ac_timer *timer)
181 {
182 int cpu = timer->cpu;
183 unsigned long flags;
185 spin_lock_irqsave(&ac_timers[cpu].lock, flags);
186 ASSERT(timer != NULL);
187 if ( active_ac_timer(timer) )
188 __rem_ac_timer(timer);
189 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
190 }
193 static void ac_timer_softirq_action(void)
194 {
195 int cpu = smp_processor_id();
196 struct ac_timer *t, **heap;
197 s_time_t now;
198 void (*fn)(void *);
200 spin_lock_irq(&ac_timers[cpu].lock);
202 do {
203 heap = ac_timers[cpu].heap;
204 now = NOW();
206 while ( (GET_HEAP_SIZE(heap) != 0) &&
207 ((t = heap[1])->expires < (now + TIMER_SLOP)) )
208 {
209 remove_entry(heap, t);
211 if ( (fn = t->function) != NULL )
212 {
213 void *data = t->data;
214 spin_unlock_irq(&ac_timers[cpu].lock);
215 (*fn)(data);
216 spin_lock_irq(&ac_timers[cpu].lock);
217 }
219 /* Heap may have grown while the lock was released. */
220 heap = ac_timers[cpu].heap;
221 }
222 }
223 while ( !reprogram_ac_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
225 spin_unlock_irq(&ac_timers[cpu].lock);
226 }
229 static void dump_timerq(unsigned char key)
230 {
231 struct ac_timer *t;
232 unsigned long flags;
233 s_time_t now = NOW();
234 int i, j;
236 printk("Dumping ac_timer queues: NOW=0x%08X%08X\n",
237 (u32)(now>>32), (u32)now);
239 for_each_online_cpu( i )
240 {
241 printk("CPU[%02d] ", i);
242 spin_lock_irqsave(&ac_timers[i].lock, flags);
243 for ( j = 1; j <= GET_HEAP_SIZE(ac_timers[i].heap); j++ )
244 {
245 t = ac_timers[i].heap[j];
246 printk (" %d : %p ex=0x%08X%08X %p\n",
247 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
248 }
249 spin_unlock_irqrestore(&ac_timers[i].lock, flags);
250 printk("\n");
251 }
252 }
255 void __init ac_timer_init(void)
256 {
257 static struct ac_timer *dummy_heap;
258 int i;
260 open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action);
262 /*
263 * All CPUs initially share an empty dummy heap. Only those CPUs that
264 * are brought online will be dynamically allocated their own heap.
265 */
266 SET_HEAP_SIZE(&dummy_heap, 0);
267 SET_HEAP_LIMIT(&dummy_heap, 0);
269 for ( i = 0; i < NR_CPUS; i++ )
270 {
271 spin_lock_init(&ac_timers[i].lock);
272 ac_timers[i].heap = &dummy_heap;
273 }
275 register_keyhandler('a', dump_timerq, "dump ac_timer queues");
276 }
278 /*
279 * Local variables:
280 * mode: C
281 * c-set-style: "BSD"
282 * c-basic-offset: 4
283 * tab-width: 4
284 * indent-tabs-mode: nil
285 * End:
286 */