ia64/xen-unstable

view xen/arch/i386/process.c @ 722:7a9d47fea66c

bitkeeper revision 1.428 (3f677454_j81KDQLm_L7AscjYn2nYg)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Tue Sep 16 20:36:36 2003 +0000 (2003-09-16)
parents 93c7dcf4a80e ec38a236c5db
children b45bc774c22c
line source
1 /*
2 * linux/arch/i386/kernel/process.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
10 /*
11 * This file handles the architecture-dependent parts of process handling..
12 */
14 #define __KERNEL_SYSCALLS__
15 #include <xeno/config.h>
16 #include <xeno/lib.h>
17 #include <xeno/errno.h>
18 #include <xeno/sched.h>
19 #include <xeno/smp.h>
20 #include <asm/ptrace.h>
21 #include <xeno/delay.h>
22 #include <xeno/interrupt.h>
23 #include <asm/mc146818rtc.h>
25 #include <asm/system.h>
26 #include <asm/io.h>
27 #include <asm/processor.h>
28 #include <asm/desc.h>
29 #include <asm/i387.h>
31 #include <xeno/irq.h>
32 #include <xeno/event.h>
34 #define GET_SYSCALL_REGS(_p) \
35 (((struct pt_regs *)(THREAD_SIZE + (unsigned long)(_p))) - 1)
37 asmlinkage void ret_from_newdomain(void) __asm__("ret_from_newdomain");
39 int hlt_counter;
41 void disable_hlt(void)
42 {
43 hlt_counter++;
44 }
46 void enable_hlt(void)
47 {
48 hlt_counter--;
49 }
51 /*
52 * We use this if we don't have any better
53 * idle routine..
54 */
55 static void default_idle(void)
56 {
57 if (!hlt_counter) {
58 __cli();
59 if (!current->hyp_events && !softirq_pending(smp_processor_id()))
60 safe_halt();
61 else
62 __sti();
63 }
64 }
66 /*
67 * The idle thread. There's no useful work to be
68 * done, so just try to conserve power and have a
69 * low exit latency (ie sit in a loop waiting for
70 * somebody to say that they'd like to reschedule)
71 */
72 void cpu_idle (void)
73 {
74 int cpu = smp_processor_id();
76 /* Just some sanity to ensure that the scheduler is set up okay. */
77 ASSERT(current->domain == IDLE_DOMAIN_ID);
78 (void)wake_up(current);
79 schedule();
81 /*
82 * Declares CPU setup done to the boot processor.
83 * Therefore memory barrier to ensure state is visible.
84 */
85 smp_mb();
86 init_idle();
88 for ( ; ; )
89 {
90 irq_stat[cpu].idle_timestamp = jiffies;
91 while (!current->hyp_events && !softirq_pending(cpu))
92 default_idle();
93 do_hyp_events();
94 do_softirq();
95 }
96 }
98 static long no_idt[2];
99 static int reboot_mode;
100 int reboot_thru_bios = 0;
102 #ifdef CONFIG_SMP
103 int reboot_smp = 0;
104 static int reboot_cpu = -1;
105 /* shamelessly grabbed from lib/vsprintf.c for readability */
106 #define is_digit(c) ((c) >= '0' && (c) <= '9')
107 #endif
110 static inline void kb_wait(void)
111 {
112 int i;
114 for (i=0; i<0x10000; i++)
115 if ((inb_p(0x64) & 0x02) == 0)
116 break;
117 }
120 void machine_restart(char * __unused)
121 {
122 #if CONFIG_SMP
123 int cpuid;
125 cpuid = GET_APIC_ID(apic_read(APIC_ID));
127 if (reboot_smp) {
129 /* check to see if reboot_cpu is valid
130 if its not, default to the BSP */
131 if ((reboot_cpu == -1) ||
132 (reboot_cpu > (NR_CPUS -1)) ||
133 !(phys_cpu_present_map & (1<<cpuid)))
134 reboot_cpu = boot_cpu_physical_apicid;
136 reboot_smp = 0; /* use this as a flag to only go through this once*/
137 /* re-run this function on the other CPUs
138 it will fall though this section since we have
139 cleared reboot_smp, and do the reboot if it is the
140 correct CPU, otherwise it halts. */
141 if (reboot_cpu != cpuid)
142 smp_call_function((void *)machine_restart , NULL, 1, 0);
143 }
145 /* if reboot_cpu is still -1, then we want a tradional reboot,
146 and if we are not running on the reboot_cpu,, halt */
147 if ((reboot_cpu != -1) && (cpuid != reboot_cpu)) {
148 for (;;)
149 __asm__ __volatile__ ("hlt");
150 }
151 /*
152 * Stop all CPUs and turn off local APICs and the IO-APIC, so
153 * other OSs see a clean IRQ state.
154 */
155 smp_send_stop();
156 disable_IO_APIC();
157 #endif
159 if(!reboot_thru_bios) {
160 /* rebooting needs to touch the page at absolute addr 0 */
161 *((unsigned short *)__va(0x472)) = reboot_mode;
162 for (;;) {
163 int i;
164 for (i=0; i<100; i++) {
165 kb_wait();
166 udelay(50);
167 outb(0xfe,0x64); /* pulse reset low */
168 udelay(50);
169 }
170 /* That didn't work - force a triple fault.. */
171 __asm__ __volatile__("lidt %0": :"m" (no_idt));
172 __asm__ __volatile__("int3");
173 }
174 }
176 panic("Need to reinclude BIOS reboot code\n");
177 }
179 void machine_halt(void)
180 {
181 machine_restart(0);
182 }
184 void machine_power_off(void)
185 {
186 machine_restart(0);
187 }
189 extern void show_trace(unsigned long* esp);
191 void show_regs(struct pt_regs * regs)
192 {
193 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
195 printk("\n");
196 printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip, smp_processor_id());
197 if (regs->xcs & 3)
198 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
199 printk(" EFLAGS: %08lx\n",regs->eflags);
200 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
201 regs->eax,regs->ebx,regs->ecx,regs->edx);
202 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
203 regs->esi, regs->edi, regs->ebp);
204 printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
205 0xffff & regs->xds, 0xffff & regs->xes,
206 0xffff & regs->xfs, 0xffff & regs->xgs);
208 __asm__("movl %%cr0, %0": "=r" (cr0));
209 __asm__("movl %%cr2, %0": "=r" (cr2));
210 __asm__("movl %%cr3, %0": "=r" (cr3));
211 /* This could fault if %cr4 does not exist */
212 __asm__("1: movl %%cr4, %0 \n"
213 "2: \n"
214 ".section __ex_table,\"a\" \n"
215 ".long 1b,2b \n"
216 ".previous \n"
217 : "=r" (cr4): "0" (0));
218 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
219 show_trace(&regs->esp);
220 }
223 /*
224 * Free current thread data structures etc..
225 */
226 void exit_thread(void)
227 {
228 /* nothing to do ... */
229 }
231 void flush_thread(void)
232 {
233 struct task_struct *tsk = current;
235 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
236 /*
237 * Forget coprocessor state..
238 */
239 clear_fpu(tsk);
240 tsk->flags &= ~PF_DONEFPUINIT;
241 }
243 void release_thread(struct task_struct *dead_task)
244 {
245 }
247 void new_thread(struct task_struct *p,
248 unsigned long start_pc,
249 unsigned long start_stack,
250 unsigned long start_info)
251 {
252 struct pt_regs *regs = GET_SYSCALL_REGS(p);
253 memset(regs, 0, sizeof(*regs));
255 /*
256 * Initial register values:
257 * DS,ES,FS,GS = FLAT_RING1_DS
258 * CS:EIP = FLAT_RING1_CS:start_pc
259 * SS:ESP = FLAT_RING1_DS:start_stack
260 * ESI = start_info
261 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
262 */
263 p->thread.fs = p->thread.gs = FLAT_RING1_DS;
264 regs->xds = regs->xes = regs->xfs = regs->xgs = regs->xss = FLAT_RING1_DS;
265 regs->xcs = FLAT_RING1_CS;
266 regs->eip = start_pc;
267 regs->esp = start_stack;
268 regs->esi = start_info;
270 p->thread.esp = (unsigned long) regs;
271 p->thread.esp0 = (unsigned long) (regs+1);
273 p->thread.eip = (unsigned long) ret_from_newdomain;
275 __save_flags(regs->eflags);
276 regs->eflags |= X86_EFLAGS_IF;
278 /* No fast trap at start of day. */
279 SET_DEFAULT_FAST_TRAP(&p->thread);
280 }
283 /*
284 * This special macro can be used to load a debugging register
285 */
286 #define loaddebug(thread,register) \
287 __asm__("movl %0,%%db" #register \
288 : /* no output */ \
289 :"r" (thread->debugreg[register]))
291 /*
292 * switch_to(x,yn) should switch tasks from x to y.
293 *
294 * We fsave/fwait so that an exception goes off at the right time
295 * (as a call from the fsave or fwait in effect) rather than to
296 * the wrong process. Lazy FP saving no longer makes any sense
297 * with modern CPU's, and this simplifies a lot of things (SMP
298 * and UP become the same).
299 *
300 * NOTE! We used to use the x86 hardware context switching. The
301 * reason for not using it any more becomes apparent when you
302 * try to recover gracefully from saved state that is no longer
303 * valid (stale segment register values in particular). With the
304 * hardware task-switch, there is no way to fix up bad state in
305 * a reasonable manner.
306 *
307 * The fact that Intel documents the hardware task-switching to
308 * be slow is a fairly red herring - this code is not noticeably
309 * faster. However, there _is_ some room for improvement here,
310 * so the performance issues may eventually be a valid point.
311 * More important, however, is the fact that this allows us much
312 * more flexibility.
313 */
314 /* NB. prev_p passed in %eax, next_p passed in %edx */
315 void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
316 {
317 struct thread_struct *next = &next_p->thread;
318 struct tss_struct *tss = init_tss + smp_processor_id();
320 unlazy_fpu(prev_p);
322 /* Switch the fast-trap handler. */
323 CLEAR_FAST_TRAP(&prev_p->thread);
324 SET_FAST_TRAP(&next_p->thread);
326 tss->esp0 = next->esp0;
327 tss->esp1 = next->esp1;
328 tss->ss1 = next->ss1;
330 /* Switch GDT and LDT. */
331 __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt));
332 load_LDT();
334 /*
335 * Now maybe reload the debug registers
336 */
337 if (next->debugreg[7]){
338 loaddebug(next, 0);
339 loaddebug(next, 1);
340 loaddebug(next, 2);
341 loaddebug(next, 3);
342 /* no 4 and 5 */
343 loaddebug(next, 6);
344 loaddebug(next, 7);
345 }
347 }
350 /* XXX Currently the 'domain' field is ignored! XXX */
351 long do_iopl(unsigned int domain, unsigned int new_io_pl)
352 {
353 struct pt_regs *regs = GET_SYSCALL_REGS(current);
354 regs->eflags = (regs->eflags & 0xffffcfff) | ((new_io_pl&3) << 12);
355 return 0;
356 }