direct-io.hg

view extras/mini-os/sched.c @ 12477:7f7aeaa0cba6

[HVMLOADEr] Reserve MMIO 0xa0000 to 0xc0000 in ACPI dsdt.

Avoids possible vga driver loading problem in HVM Windows guest.
Also fixes a Makefile bug in hvmloader directory.

Signed-off-by: Qing He <qing.he@intel.com>
author kfraser@localhost.localdomain
date Fri Nov 17 10:02:54 2006 +0000 (2006-11-17)
parents 0839db0aa611
children a3c6479c87ef
line source
1 /*
2 ****************************************************************************
3 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
4 ****************************************************************************
5 *
6 * File: sched.c
7 * Author: Grzegorz Milos
8 * Changes: Robert Kaiser
9 *
10 * Date: Aug 2005
11 *
12 * Environment: Xen Minimal OS
13 * Description: simple scheduler for Mini-Os
14 *
15 * The scheduler is non-preemptive (cooperative), and schedules according
16 * to Round Robin algorithm.
17 *
18 ****************************************************************************
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this software and associated documentation files (the "Software"), to
21 * deal in the Software without restriction, including without limitation the
22 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
23 * sell copies of the Software, and to permit persons to whom the Software is
24 * furnished to do so, subject to the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
35 * DEALINGS IN THE SOFTWARE.
36 */
38 #include <os.h>
39 #include <hypervisor.h>
40 #include <time.h>
41 #include <mm.h>
42 #include <types.h>
43 #include <lib.h>
44 #include <xmalloc.h>
45 #include <list.h>
46 #include <sched.h>
47 #include <semaphore.h>
50 #ifdef SCHED_DEBUG
51 #define DEBUG(_f, _a...) \
52 printk("MINI_OS(file=sched.c, line=%d) " _f "\n", __LINE__, ## _a)
53 #else
54 #define DEBUG(_f, _a...) ((void)0)
55 #endif
58 #define RUNNABLE_FLAG 0x00000001
60 #define is_runnable(_thread) (_thread->flags & RUNNABLE_FLAG)
61 #define set_runnable(_thread) (_thread->flags |= RUNNABLE_FLAG)
62 #define clear_runnable(_thread) (_thread->flags &= ~RUNNABLE_FLAG)
65 struct thread *idle_thread = NULL;
66 LIST_HEAD(exited_threads);
68 void idle_thread_fn(void *unused);
70 void dump_stack(struct thread *thread)
71 {
72 unsigned long *bottom = (unsigned long *)(thread->stack + 2*4*1024);
73 unsigned long *pointer = (unsigned long *)thread->sp;
74 int count;
75 if(thread == current)
76 {
77 #ifdef __i386__
78 asm("movl %%esp,%0"
79 : "=r"(pointer));
80 #else
81 asm("movq %%rsp,%0"
82 : "=r"(pointer));
83 #endif
84 }
85 printk("The stack for \"%s\"\n", thread->name);
86 for(count = 0; count < 25 && pointer < bottom; count ++)
87 {
88 printk("[0x%lx] 0x%lx\n", pointer, *pointer);
89 pointer++;
90 }
92 if(pointer < bottom) printk(" ... continues.\n");
93 }
95 #ifdef __i386__
96 #define switch_threads(prev, next) do { \
97 unsigned long esi,edi; \
98 __asm__ __volatile__("pushfl\n\t" \
99 "pushl %%ebp\n\t" \
100 "movl %%esp,%0\n\t" /* save ESP */ \
101 "movl %4,%%esp\n\t" /* restore ESP */ \
102 "movl $1f,%1\n\t" /* save EIP */ \
103 "pushl %5\n\t" /* restore EIP */ \
104 "ret\n\t" \
105 "1:\t" \
106 "popl %%ebp\n\t" \
107 "popfl" \
108 :"=m" (prev->sp),"=m" (prev->ip), \
109 "=S" (esi),"=D" (edi) \
110 :"m" (next->sp),"m" (next->ip), \
111 "2" (prev), "d" (next)); \
112 } while (0)
113 #elif __x86_64__
114 #define switch_threads(prev, next) do { \
115 unsigned long rsi,rdi; \
116 __asm__ __volatile__("pushfq\n\t" \
117 "pushq %%rbp\n\t" \
118 "movq %%rsp,%0\n\t" /* save RSP */ \
119 "movq %4,%%rsp\n\t" /* restore RSP */ \
120 "movq $1f,%1\n\t" /* save RIP */ \
121 "pushq %5\n\t" /* restore RIP */ \
122 "ret\n\t" \
123 "1:\t" \
124 "popq %%rbp\n\t" \
125 "popfq" \
126 :"=m" (prev->sp),"=m" (prev->ip), \
127 "=S" (rsi),"=D" (rdi) \
128 :"m" (next->sp),"m" (next->ip), \
129 "2" (prev), "d" (next)); \
130 } while (0)
131 #endif
133 void inline print_runqueue(void)
134 {
135 struct list_head *it;
136 struct thread *th;
137 list_for_each(it, &idle_thread->thread_list)
138 {
139 th = list_entry(it, struct thread, thread_list);
140 printk(" Thread \"%s\", runnable=%d\n", th->name, is_runnable(th));
141 }
142 printk("\n");
143 }
145 /* Find the time when the next timeout expires. If this is more than
146 10 seconds from now, return 10 seconds from now. */
147 static s_time_t blocking_time(void)
148 {
149 struct thread *thread;
150 struct list_head *iterator;
151 s_time_t min_wakeup_time;
152 unsigned long flags;
153 local_irq_save(flags);
154 /* default-block the domain for 10 seconds: */
155 min_wakeup_time = NOW() + SECONDS(10);
157 /* Thread list needs to be protected */
158 list_for_each(iterator, &idle_thread->thread_list)
159 {
160 thread = list_entry(iterator, struct thread, thread_list);
161 if(!is_runnable(thread) && thread->wakeup_time != 0LL)
162 {
163 if(thread->wakeup_time < min_wakeup_time)
164 {
165 min_wakeup_time = thread->wakeup_time;
166 }
167 }
168 }
169 local_irq_restore(flags);
170 return(min_wakeup_time);
171 }
173 /* Wake up all threads with expired timeouts. */
174 static void wake_expired(void)
175 {
176 struct thread *thread;
177 struct list_head *iterator;
178 s_time_t now = NOW();
179 unsigned long flags;
180 local_irq_save(flags);
181 /* Thread list needs to be protected */
182 list_for_each(iterator, &idle_thread->thread_list)
183 {
184 thread = list_entry(iterator, struct thread, thread_list);
185 if(!is_runnable(thread) && thread->wakeup_time != 0LL)
186 {
187 if(thread->wakeup_time <= now)
188 wake(thread);
189 }
190 }
191 local_irq_restore(flags);
192 }
194 void schedule(void)
195 {
196 struct thread *prev, *next, *thread;
197 struct list_head *iterator;
198 unsigned long flags;
199 prev = current;
200 local_irq_save(flags);
201 list_for_each(iterator, &exited_threads)
202 {
203 thread = list_entry(iterator, struct thread, thread_list);
204 if(thread != prev)
205 {
206 list_del(&thread->thread_list);
207 free_pages(thread->stack, 1);
208 xfree(thread);
209 }
210 }
211 next = idle_thread;
212 /* Thread list needs to be protected */
213 list_for_each(iterator, &idle_thread->thread_list)
214 {
215 thread = list_entry(iterator, struct thread, thread_list);
216 if(is_runnable(thread))
217 {
218 next = thread;
219 /* Put this thread on the end of the list */
220 list_del(&thread->thread_list);
221 list_add_tail(&thread->thread_list, &idle_thread->thread_list);
222 break;
223 }
224 }
225 local_irq_restore(flags);
226 /* Interrupting the switch is equivalent to having the next thread
227 inturrupted at the return instruction. And therefore at safe point. */
228 if(prev != next) switch_threads(prev, next);
229 }
232 /* Gets run when a new thread is scheduled the first time ever,
233 defined in x86_[32/64].S */
234 extern void thread_starter(void);
237 void exit_thread(void)
238 {
239 unsigned long flags;
240 struct thread *thread = current;
241 printk("Thread \"%s\" exited.\n", thread->name);
242 local_irq_save(flags);
243 /* Remove from the thread list */
244 list_del(&thread->thread_list);
245 clear_runnable(thread);
246 /* Put onto exited list */
247 list_add(&thread->thread_list, &exited_threads);
248 local_irq_restore(flags);
249 /* Schedule will free the resources */
250 schedule();
251 }
253 /* Pushes the specified value onto the stack of the specified thread */
254 static void stack_push(struct thread *thread, unsigned long value)
255 {
256 thread->sp -= sizeof(unsigned long);
257 *((unsigned long *)thread->sp) = value;
258 }
260 struct thread* create_thread(char *name, void (*function)(void *), void *data)
261 {
262 struct thread *thread;
263 unsigned long flags;
265 thread = xmalloc(struct thread);
266 /* Allocate 2 pages for stack, stack will be 2pages aligned */
267 thread->stack = (char *)alloc_pages(1);
268 thread->name = name;
269 printk("Thread \"%s\": pointer: 0x%lx, stack: 0x%lx\n", name, thread,
270 thread->stack);
272 thread->sp = (unsigned long)thread->stack + 4096 * 2;
273 /* Save pointer to the thread on the stack, used by current macro */
274 *((unsigned long *)thread->stack) = (unsigned long)thread;
276 stack_push(thread, (unsigned long) function);
277 stack_push(thread, (unsigned long) data);
278 thread->ip = (unsigned long) thread_starter;
280 /* Not runable, not exited, not sleeping */
281 thread->flags = 0;
282 thread->wakeup_time = 0LL;
283 set_runnable(thread);
284 local_irq_save(flags);
285 if(idle_thread != NULL) {
286 list_add_tail(&thread->thread_list, &idle_thread->thread_list);
287 } else if(function != idle_thread_fn)
288 {
289 printk("BUG: Not allowed to create thread before initialising scheduler.\n");
290 BUG();
291 }
292 local_irq_restore(flags);
293 return thread;
294 }
297 void block(struct thread *thread)
298 {
299 thread->wakeup_time = 0LL;
300 clear_runnable(thread);
301 }
303 void sleep(u32 millisecs)
304 {
305 struct thread *thread = get_current();
306 thread->wakeup_time = NOW() + MILLISECS(millisecs);
307 clear_runnable(thread);
308 schedule();
309 }
311 void wake(struct thread *thread)
312 {
313 thread->wakeup_time = 0LL;
314 set_runnable(thread);
315 }
317 void idle_thread_fn(void *unused)
318 {
319 s_time_t until;
320 for(;;)
321 {
322 schedule();
323 /* block until the next timeout expires, or for 10 secs, whichever comes first */
324 until = blocking_time();
325 block_domain(until);
326 wake_expired();
327 }
328 }
330 void run_idle_thread(void)
331 {
332 /* Switch stacks and run the thread */
333 #if defined(__i386__)
334 __asm__ __volatile__("mov %0,%%esp\n\t"
335 "push %1\n\t"
336 "ret"
337 :"=m" (idle_thread->sp)
338 :"m" (idle_thread->ip));
339 #elif defined(__x86_64__)
340 __asm__ __volatile__("mov %0,%%rsp\n\t"
341 "push %1\n\t"
342 "ret"
343 :"=m" (idle_thread->sp)
344 :"m" (idle_thread->ip));
345 #endif
346 }
350 DECLARE_MUTEX(mutex);
352 void th_f1(void *data)
353 {
354 struct timeval tv1, tv2;
356 for(;;)
357 {
358 down(&mutex);
359 printk("Thread \"%s\" got semaphore, runnable %d\n", current->name, is_runnable(current));
360 schedule();
361 printk("Thread \"%s\" releases the semaphore\n", current->name);
362 up(&mutex);
365 gettimeofday(&tv1);
366 for(;;)
367 {
368 gettimeofday(&tv2);
369 if(tv2.tv_sec - tv1.tv_sec > 2) break;
370 }
373 schedule();
374 }
375 }
377 void th_f2(void *data)
378 {
379 for(;;)
380 {
381 printk("Thread OTHER executing, data 0x%lx\n", data);
382 schedule();
383 }
384 }
388 void init_sched(void)
389 {
390 printk("Initialising scheduler\n");
392 idle_thread = create_thread("Idle", idle_thread_fn, NULL);
393 INIT_LIST_HEAD(&idle_thread->thread_list);
394 }