ia64/xen-unstable

view xen-2.4.16/arch/i386/process.c @ 86:4a10fe9b20ec

bitkeeper revision 1.15 (3e24a984iRiWWcgfKCxu2p5q3YbxXw)

Many files:
First half of support for per-domain GDTs and LDTs
author kaf24@labyrinth.cl.cam.ac.uk
date Wed Jan 15 00:21:24 2003 +0000 (2003-01-15)
parents 1ef2026299c3
children 88c1cf85fc8f 2f78322be16a
line source
1 /*
2 * linux/arch/i386/kernel/process.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
10 /*
11 * This file handles the architecture-dependent parts of process handling..
12 */
14 #define __KERNEL_SYSCALLS__
15 #include <stdarg.h>
17 #include <xeno/config.h>
18 #include <xeno/lib.h>
19 #include <xeno/errno.h>
20 #include <xeno/sched.h>
21 #include <xeno/smp.h>
22 #include <asm/ptrace.h>
23 #include <xeno/delay.h>
24 #include <asm/mc146818rtc.h>
26 #include <asm/system.h>
27 #include <asm/io.h>
28 #include <asm/processor.h>
29 #include <asm/desc.h>
30 #include <asm/i387.h>
32 #include <xeno/irq.h>
33 #include <xeno/event.h>
35 asmlinkage void ret_from_newdomain(void) __asm__("ret_from_newdomain");
37 int hlt_counter;
39 void disable_hlt(void)
40 {
41 hlt_counter++;
42 }
44 void enable_hlt(void)
45 {
46 hlt_counter--;
47 }
49 /*
50 * We use this if we don't have any better
51 * idle routine..
52 */
53 static void default_idle(void)
54 {
55 if (!hlt_counter) {
56 __cli();
57 if (!current->hyp_events)
58 safe_halt();
59 else
60 __sti();
61 }
62 }
64 /*
65 * The idle thread. There's no useful work to be
66 * done, so just try to conserve power and have a
67 * low exit latency (ie sit in a loop waiting for
68 * somebody to say that they'd like to reschedule)
69 */
70 void cpu_idle (void)
71 {
72 ASSERT(current->domain == IDLE_DOMAIN_ID);
73 current->has_cpu = 1;
75 /*
76 * Declares CPU setup done to the boot processor.
77 * Therefore memory barrier to ensure state is visible.
78 */
79 smp_mb();
80 init_idle();
82 for ( ; ; )
83 {
84 while (!current->hyp_events)
85 default_idle();
86 do_hyp_events();
87 }
88 }
90 static long no_idt[2];
91 static int reboot_mode;
92 int reboot_thru_bios = 0;
94 #ifdef CONFIG_SMP
95 int reboot_smp = 0;
96 static int reboot_cpu = -1;
97 /* shamelessly grabbed from lib/vsprintf.c for readability */
98 #define is_digit(c) ((c) >= '0' && (c) <= '9')
99 #endif
102 static inline void kb_wait(void)
103 {
104 int i;
106 for (i=0; i<0x10000; i++)
107 if ((inb_p(0x64) & 0x02) == 0)
108 break;
109 }
112 void machine_restart(char * __unused)
113 {
114 #if CONFIG_SMP
115 int cpuid;
117 cpuid = GET_APIC_ID(apic_read(APIC_ID));
119 if (reboot_smp) {
121 /* check to see if reboot_cpu is valid
122 if its not, default to the BSP */
123 if ((reboot_cpu == -1) ||
124 (reboot_cpu > (NR_CPUS -1)) ||
125 !(phys_cpu_present_map & (1<<cpuid)))
126 reboot_cpu = boot_cpu_physical_apicid;
128 reboot_smp = 0; /* use this as a flag to only go through this once*/
129 /* re-run this function on the other CPUs
130 it will fall though this section since we have
131 cleared reboot_smp, and do the reboot if it is the
132 correct CPU, otherwise it halts. */
133 if (reboot_cpu != cpuid)
134 smp_call_function((void *)machine_restart , NULL, 1, 0);
135 }
137 /* if reboot_cpu is still -1, then we want a tradional reboot,
138 and if we are not running on the reboot_cpu,, halt */
139 if ((reboot_cpu != -1) && (cpuid != reboot_cpu)) {
140 for (;;)
141 __asm__ __volatile__ ("hlt");
142 }
143 /*
144 * Stop all CPUs and turn off local APICs and the IO-APIC, so
145 * other OSs see a clean IRQ state.
146 */
147 smp_send_stop();
148 disable_IO_APIC();
149 #endif
151 if(!reboot_thru_bios) {
152 /* rebooting needs to touch the page at absolute addr 0 */
153 *((unsigned short *)__va(0x472)) = reboot_mode;
154 for (;;) {
155 int i;
156 for (i=0; i<100; i++) {
157 kb_wait();
158 udelay(50);
159 outb(0xfe,0x64); /* pulse reset low */
160 udelay(50);
161 }
162 /* That didn't work - force a triple fault.. */
163 __asm__ __volatile__("lidt %0": :"m" (no_idt));
164 __asm__ __volatile__("int3");
165 }
166 }
168 panic("Need to reinclude BIOS reboot code\n");
169 }
171 void machine_halt(void)
172 {
173 machine_restart(0);
174 }
176 void machine_power_off(void)
177 {
178 machine_restart(0);
179 }
181 extern void show_trace(unsigned long* esp);
183 void show_regs(struct pt_regs * regs)
184 {
185 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
187 printk("\n");
188 printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip, smp_processor_id());
189 if (regs->xcs & 3)
190 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
191 printk(" EFLAGS: %08lx\n",regs->eflags);
192 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
193 regs->eax,regs->ebx,regs->ecx,regs->edx);
194 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
195 regs->esi, regs->edi, regs->ebp);
196 printk(" DS: %04x ES: %04x\n",
197 0xffff & regs->xds,0xffff & regs->xes);
199 __asm__("movl %%cr0, %0": "=r" (cr0));
200 __asm__("movl %%cr2, %0": "=r" (cr2));
201 __asm__("movl %%cr3, %0": "=r" (cr3));
202 /* This could fault if %cr4 does not exist */
203 __asm__("1: movl %%cr4, %0 \n"
204 "2: \n"
205 ".section __ex_table,\"a\" \n"
206 ".long 1b,2b \n"
207 ".previous \n"
208 : "=r" (cr4): "0" (0));
209 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
210 show_trace(&regs->esp);
211 }
213 /*
214 * No need to lock the MM as we are the last user
215 */
216 void release_segments(struct mm_struct *mm)
217 {
218 #if 0
219 void * ldt = mm.context.segments;
221 /*
222 * free the LDT
223 */
224 if (ldt) {
225 mm.context.segments = NULL;
226 clear_LDT();
227 vfree(ldt);
228 }
229 #endif
230 }
233 /*
234 * Free current thread data structures etc..
235 */
236 void exit_thread(void)
237 {
238 /* nothing to do ... */
239 }
241 void flush_thread(void)
242 {
243 struct task_struct *tsk = current;
245 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
246 /*
247 * Forget coprocessor state..
248 */
249 clear_fpu(tsk);
250 tsk->flags &= ~PF_DONEFPUINIT;
251 }
253 void release_thread(struct task_struct *dead_task)
254 {
255 #if 0
256 if (dead_task->mm) {
257 void * ldt = dead_task->mm.context.segments;
259 // temporary debugging check
260 if (ldt) {
261 printk("WARNING: dead process %8s still has LDT? <%p>\n",
262 dead_task->comm, ldt);
263 BUG();
264 }
265 }
266 #endif
267 }
269 /*
270 * we do not have to muck with descriptors here, that is
271 * done in switch_mm() as needed.
272 */
273 void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
274 {
275 #if 0
276 struct mm_struct * old_mm;
277 void *old_ldt, *ldt;
279 ldt = NULL;
280 old_mm = current->mm;
281 if (old_mm && (old_ldt = old_mm.context.segments) != NULL) {
282 /*
283 * Completely new LDT, we initialize it from the parent:
284 */
285 ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
286 if (!ldt)
287 printk(KERN_WARNING "ldt allocation failed\n");
288 else
289 memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
290 }
291 new_mm.context.segments = ldt;
292 new_mm.context.cpuvalid = ~0UL; /* valid on all CPU's - they can't have stale data */
293 #endif
294 }
297 void new_thread(struct task_struct *p,
298 unsigned long start_pc,
299 unsigned long start_stack,
300 unsigned long start_info)
301 {
302 struct pt_regs * regs;
304 regs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
305 memset(regs, 0, sizeof(*regs));
307 /*
308 * Initial register values:
309 * DS,ES,FS,GS = __GUEST_DS
310 * CS:EIP = __GUEST_CS:start_pc
311 * SS:ESP = __GUEST_DS:start_stack
312 * ESI = start_info
313 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
314 */
315 p->thread.fs = p->thread.gs = __GUEST_DS;
316 regs->xds = regs->xes = regs->xss = __GUEST_DS;
317 regs->xcs = __GUEST_CS;
318 regs->eip = start_pc;
319 regs->esp = start_stack;
320 regs->esi = start_info;
322 p->thread.esp = (unsigned long) regs;
323 p->thread.esp0 = (unsigned long) (regs+1);
325 p->thread.eip = (unsigned long) ret_from_newdomain;
327 __save_flags(regs->eflags);
328 regs->eflags |= X86_EFLAGS_IF;
329 }
332 /*
333 * This special macro can be used to load a debugging register
334 */
335 #define loaddebug(thread,register) \
336 __asm__("movl %0,%%db" #register \
337 : /* no output */ \
338 :"r" (thread->debugreg[register]))
340 /*
341 * switch_to(x,yn) should switch tasks from x to y.
342 *
343 * We fsave/fwait so that an exception goes off at the right time
344 * (as a call from the fsave or fwait in effect) rather than to
345 * the wrong process. Lazy FP saving no longer makes any sense
346 * with modern CPU's, and this simplifies a lot of things (SMP
347 * and UP become the same).
348 *
349 * NOTE! We used to use the x86 hardware context switching. The
350 * reason for not using it any more becomes apparent when you
351 * try to recover gracefully from saved state that is no longer
352 * valid (stale segment register values in particular). With the
353 * hardware task-switch, there is no way to fix up bad state in
354 * a reasonable manner.
355 *
356 * The fact that Intel documents the hardware task-switching to
357 * be slow is a fairly red herring - this code is not noticeably
358 * faster. However, there _is_ some room for improvement here,
359 * so the performance issues may eventually be a valid point.
360 * More important, however, is the fact that this allows us much
361 * more flexibility.
362 */
363 /* NB. prev_p passed in %eax, next_p passed in %edx */
364 void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
365 {
366 struct thread_struct *prev = &prev_p->thread,
367 *next = &next_p->thread;
368 struct tss_struct *tss = init_tss + smp_processor_id();
370 unlazy_fpu(prev_p);
372 tss->esp0 = next->esp0;
373 tss->esp1 = next->esp1;
374 tss->ss1 = next->ss1;
376 /*
377 * Save away %fs and %gs. No need to save %es and %ds, as
378 * those are always kernel segments while inside the kernel.
379 */
380 asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
381 asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
383 /* Switch GDT and LDT. */
384 __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt));
385 __load_LDT(next_p->mm.ldt_sel);
387 /*
388 * Restore %fs and %gs.
389 */
390 loadsegment(fs, next->fs);
391 loadsegment(gs, next->gs);
393 /*
394 * Now maybe reload the debug registers
395 */
396 if (next->debugreg[7]){
397 loaddebug(next, 0);
398 loaddebug(next, 1);
399 loaddebug(next, 2);
400 loaddebug(next, 3);
401 /* no 4 and 5 */
402 loaddebug(next, 6);
403 loaddebug(next, 7);
404 }
406 }