ia64/xen-unstable

view extras/mini-os/sched.c @ 17042:a905c582a406

Add stubdomain support. See stubdom/README for usage details.

- Move PAGE_SIZE and STACK_SIZE into __PAGE_SIZE and __STACK_SIZE in
arch_limits.h so as to permit getting them from there without
pulling all the internal Mini-OS defines.
- Setup a xen-elf cross-compilation environment in stubdom/cross-root
- Add a POSIX layer on top of Mini-OS by linking against the newlib C
library and lwIP, and implementing the Unixish part in mini-os/lib/sys.c
- Cross-compile zlib and libpci too.
- Add an xs.h-compatible layer on top of Mini-OS' xenbus.
- Cross-compile libxc with an additional xc_minios.c and a few things
disabled.
- Cross-compile ioemu with an additional block-vbd, but without sound,
tpm and other details. A few hacks are needed:
- Align ide and scsi buffers at least on sector size to permit
direct transmission to the block backend. While we are at it, just
page-align it to possibly save a segment. Also, limit the scsi
buffer size because of limitations of the block paravirtualization
protocol.
- Allocate big tables dynamically rather that letting them go to
bss: when Mini-OS gets installed in memory, bss is not lazily
allocated, and doing so during Mini-OS is unnecessarily trick while
we can simply use malloc.
- Had to change the Mini-OS compilation somehow, so as to export
Mini-OS compilation flags to the Makefiles of libxc and ioemu.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 12 14:35:39 2008 +0000 (2008-02-12)
parents 945820bfedb6
children 6cd0d4d1baa3
line source
1 /*
2 ****************************************************************************
3 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
4 ****************************************************************************
5 *
6 * File: sched.c
7 * Author: Grzegorz Milos
8 * Changes: Robert Kaiser
9 *
10 * Date: Aug 2005
11 *
12 * Environment: Xen Minimal OS
13 * Description: simple scheduler for Mini-Os
14 *
15 * The scheduler is non-preemptive (cooperative), and schedules according
16 * to Round Robin algorithm.
17 *
18 ****************************************************************************
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this software and associated documentation files (the "Software"), to
21 * deal in the Software without restriction, including without limitation the
22 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
23 * sell copies of the Software, and to permit persons to whom the Software is
24 * furnished to do so, subject to the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
35 * DEALINGS IN THE SOFTWARE.
36 */
38 #include <os.h>
39 #include <hypervisor.h>
40 #include <time.h>
41 #include <mm.h>
42 #include <types.h>
43 #include <lib.h>
44 #include <xmalloc.h>
45 #include <list.h>
46 #include <sched.h>
47 #include <semaphore.h>
50 #ifdef SCHED_DEBUG
51 #define DEBUG(_f, _a...) \
52 printk("MINI_OS(file=sched.c, line=%d) " _f "\n", __LINE__, ## _a)
53 #else
54 #define DEBUG(_f, _a...) ((void)0)
55 #endif
57 struct thread *idle_thread = NULL;
58 LIST_HEAD(exited_threads);
59 static int threads_started;
61 void inline print_runqueue(void)
62 {
63 struct list_head *it;
64 struct thread *th;
65 list_for_each(it, &idle_thread->thread_list)
66 {
67 th = list_entry(it, struct thread, thread_list);
68 printk(" Thread \"%s\", runnable=%d\n", th->name, is_runnable(th));
69 }
70 printk("\n");
71 }
73 /* Find the time when the next timeout expires. If this is more than
74 10 seconds from now, return 10 seconds from now. */
75 static s_time_t blocking_time(void)
76 {
77 struct thread *thread;
78 struct list_head *iterator;
79 s_time_t min_wakeup_time;
80 unsigned long flags;
81 local_irq_save(flags);
82 /* default-block the domain for 10 seconds: */
83 min_wakeup_time = NOW() + SECONDS(10);
85 /* Thread list needs to be protected */
86 list_for_each(iterator, &idle_thread->thread_list)
87 {
88 thread = list_entry(iterator, struct thread, thread_list);
89 if(!is_runnable(thread) && thread->wakeup_time != 0LL)
90 {
91 if(thread->wakeup_time < min_wakeup_time)
92 {
93 min_wakeup_time = thread->wakeup_time;
94 }
95 }
96 }
97 local_irq_restore(flags);
98 return(min_wakeup_time);
99 }
101 /* Wake up all threads with expired timeouts. */
102 static void wake_expired(void)
103 {
104 struct thread *thread;
105 struct list_head *iterator;
106 s_time_t now = NOW();
107 unsigned long flags;
108 local_irq_save(flags);
109 /* Thread list needs to be protected */
110 list_for_each(iterator, &idle_thread->thread_list)
111 {
112 thread = list_entry(iterator, struct thread, thread_list);
113 if(!is_runnable(thread) && thread->wakeup_time != 0LL)
114 {
115 if(thread->wakeup_time <= now)
116 wake(thread);
117 }
118 }
119 local_irq_restore(flags);
120 }
122 void schedule(void)
123 {
124 struct thread *prev, *next, *thread;
125 struct list_head *iterator;
126 unsigned long flags;
127 prev = current;
128 local_irq_save(flags);
129 if (in_callback) {
130 printk("Must not call schedule() from a callback\n");
131 BUG();
132 }
133 if (flags) {
134 printk("Must not call schedule() with IRQs disabled\n");
135 BUG();
136 }
137 list_for_each(iterator, &exited_threads)
138 {
139 thread = list_entry(iterator, struct thread, thread_list);
140 if(thread != prev)
141 {
142 list_del(&thread->thread_list);
143 free_pages(thread->stack, STACK_SIZE_PAGE_ORDER);
144 xfree(thread);
145 }
146 }
147 next = idle_thread;
148 /* Thread list needs to be protected */
149 list_for_each(iterator, &idle_thread->thread_list)
150 {
151 thread = list_entry(iterator, struct thread, thread_list);
152 if(is_runnable(thread))
153 {
154 next = thread;
155 /* Put this thread on the end of the list */
156 list_del(&thread->thread_list);
157 list_add_tail(&thread->thread_list, &idle_thread->thread_list);
158 break;
159 }
160 }
161 local_irq_restore(flags);
162 /* Interrupting the switch is equivalent to having the next thread
163 inturrupted at the return instruction. And therefore at safe point. */
164 if(prev != next) switch_threads(prev, next);
165 }
167 struct thread* create_thread(char *name, void (*function)(void *), void *data)
168 {
169 struct thread *thread;
170 unsigned long flags;
171 /* Call architecture specific setup. */
172 thread = arch_create_thread(name, function, data);
173 /* Not runable, not exited, not sleeping */
174 thread->flags = 0;
175 thread->wakeup_time = 0LL;
176 #ifdef HAVE_LIBC
177 _REENT_INIT_PTR((&thread->reent))
178 #endif
179 set_runnable(thread);
180 local_irq_save(flags);
181 if(idle_thread != NULL) {
182 list_add_tail(&thread->thread_list, &idle_thread->thread_list);
183 } else if(function != idle_thread_fn)
184 {
185 printk("BUG: Not allowed to create thread before initialising scheduler.\n");
186 BUG();
187 }
188 local_irq_restore(flags);
189 return thread;
190 }
192 #ifdef HAVE_LIBC
193 static struct _reent callback_reent;
194 struct _reent *__getreent(void)
195 {
196 struct _reent *_reent;
198 if (!threads_started)
199 _reent = _impure_ptr;
200 else if (in_callback)
201 _reent = &callback_reent;
202 else
203 _reent = &get_current()->reent;
205 #ifndef NDEBUG
206 #if defined(__x86_64__) || defined(__x86__)
207 {
208 #ifdef __x86_64__
209 register unsigned long sp asm ("rsp");
210 #else
211 register unsigned long sp asm ("esp");
212 #endif
213 if ((sp & (STACK_SIZE-1)) < STACK_SIZE / 16) {
214 static int overflowing;
215 if (!overflowing) {
216 overflowing = 1;
217 printk("stack overflow\n");
218 BUG();
219 }
220 }
221 }
222 #endif
223 #endif
224 return _reent;
225 }
226 #endif
228 void exit_thread(void)
229 {
230 unsigned long flags;
231 struct thread *thread = current;
232 printk("Thread \"%s\" exited.\n", thread->name);
233 local_irq_save(flags);
234 /* Remove from the thread list */
235 list_del(&thread->thread_list);
236 clear_runnable(thread);
237 /* Put onto exited list */
238 list_add(&thread->thread_list, &exited_threads);
239 local_irq_restore(flags);
240 /* Schedule will free the resources */
241 while(1)
242 {
243 schedule();
244 printk("schedule() returned! Trying again\n");
245 }
246 }
248 void block(struct thread *thread)
249 {
250 thread->wakeup_time = 0LL;
251 clear_runnable(thread);
252 }
254 void msleep(u32 millisecs)
255 {
256 struct thread *thread = get_current();
257 thread->wakeup_time = NOW() + MILLISECS(millisecs);
258 clear_runnable(thread);
259 schedule();
260 }
262 void wake(struct thread *thread)
263 {
264 thread->wakeup_time = 0LL;
265 set_runnable(thread);
266 }
268 void idle_thread_fn(void *unused)
269 {
270 s_time_t until;
271 threads_started = 1;
272 unsigned long flags;
273 struct list_head *iterator;
274 struct thread *next, *thread;
275 for(;;)
276 {
277 schedule();
278 next = NULL;
279 local_irq_save(flags);
280 list_for_each(iterator, &idle_thread->thread_list)
281 {
282 thread = list_entry(iterator, struct thread, thread_list);
283 if(is_runnable(thread))
284 {
285 next = thread;
286 break;
287 }
288 }
289 if (!next) {
290 /* block until the next timeout expires, or for 10 secs, whichever comes first */
291 until = blocking_time();
292 block_domain(until);
293 }
294 local_irq_restore(flags);
295 wake_expired();
296 }
297 }
299 DECLARE_MUTEX(mutex);
301 void th_f1(void *data)
302 {
303 struct timeval tv1, tv2;
305 for(;;)
306 {
307 down(&mutex);
308 printk("Thread \"%s\" got semaphore, runnable %d\n", current->name, is_runnable(current));
309 schedule();
310 printk("Thread \"%s\" releases the semaphore\n", current->name);
311 up(&mutex);
314 gettimeofday(&tv1, NULL);
315 for(;;)
316 {
317 gettimeofday(&tv2, NULL);
318 if(tv2.tv_sec - tv1.tv_sec > 2) break;
319 }
322 schedule();
323 }
324 }
326 void th_f2(void *data)
327 {
328 for(;;)
329 {
330 printk("Thread OTHER executing, data 0x%lx\n", data);
331 schedule();
332 }
333 }
337 void init_sched(void)
338 {
339 printk("Initialising scheduler\n");
341 #ifdef HAVE_LIBC
342 _REENT_INIT_PTR((&callback_reent))
343 #endif
344 idle_thread = create_thread("Idle", idle_thread_fn, NULL);
345 INIT_LIST_HEAD(&idle_thread->thread_list);
346 }