ia64/xen-unstable

view xen-2.4.16/arch/i386/entry.S @ 86:4a10fe9b20ec

bitkeeper revision 1.15 (3e24a984iRiWWcgfKCxu2p5q3YbxXw)

Many files:
First half of support for per-domain GDTs and LDTs
author kaf24@labyrinth.cl.cam.ac.uk
date Wed Jan 15 00:21:24 2003 +0000 (2003-01-15)
parents c3e6a52cd801
children 336647fd8f40
line source
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * Stack layout in 'ret_from_system_call':
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - orig_eax
23 * 28(%esp) - %eip
24 * 2C(%esp) - %cs
25 * 30(%esp) - %eflags
26 * 34(%esp) - %oldesp
27 * 38(%esp) - %oldss
28 *
29 * "current" is in register %ebx during any slow entries.
30 */
31 /* The idea for callbacks from monitor -> guest OS.
32 *
33 * First, we require that all callbacks (either via a supplied
34 * interrupt-descriptor-table, or via the special event or failsafe callbacks
35 * in the shared-info-structure) are to ring 1. This just makes life easier,
36 * in that it means we don't have to do messy GDT/LDT lookups to find
37 * out which the privilege-level of the return code-selector. That code
38 * would just be a hassle to write, and would need to account for running
39 * off the end of the GDT/LDT, for example. The event callback has quite
40 * a constrained callback method: the guest OS provides a linear address
41 * which we call back to using the hard-coded __GUEST_CS descriptor (which
42 * is a ring 1 descriptor). For IDT callbacks, we check that the provided
43 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
44 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
45 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
46 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
47 * than the correct ring) and bad things are bound to ensue -- IRET is
48 * likely to fault, and we may end up killing the domain (no harm can
49 * come to the hypervisor itself, though).
50 *
51 * When doing a callback, we check if the return CS is in ring 0. If so,
52 * callback is delayed until next return to ring != 0.
53 * If return CS is in ring 1, then we create a callback frame
54 * starting at return SS/ESP. The base of the frame does an intra-privilege
55 * interrupt-return.
56 * If return CS is in ring > 1, we create a callback frame starting
57 * at SS/ESP taken from appropriate section of the current TSS. The base
58 * of the frame does an inter-privilege interrupt-return.
59 *
60 * Note that the "failsafe callback" uses a special stackframe:
61 * { return_DS, return_ES, return_EIP, return_CS, return_EFLAGS, ... }
62 * That is, original values for DS/ES are placed on stack rather than
63 * in DS/ES themselves. Why? It saves us loading them, only to have them
64 * saved/restored in guest OS. Furthermore, if we load them we may cause
65 * a fault if they are invalid, which is a hassle to deal with. We avoid
66 * that problem if we don't load them :-) This property allows us to use
67 * the failsafe callback as a fallback: if we ever fault on loading DS/ES
68 * on return to ring != 0, we can simply package it up as a return via
69 * the failsafe callback, and let the guest OS sort it out (perhaps by
70 * killing an application process). Note that we also do this for any
71 * faulting IRET -- just let the guest OS handle it via the event
72 * callback.
73 *
74 * We terminate a domain in the following cases:
75 * - creating a callback stack frame (due to bad ring-1 stack).
76 * - faulting IRET on entry to failsafe callback handler.
77 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
78 * handler in good order (absolutely no faults allowed!).
79 */
81 #include <xeno/config.h>
82 #include <asm/smp.h>
84 EBX = 0x00
85 ECX = 0x04
86 EDX = 0x08
87 ESI = 0x0C
88 EDI = 0x10
89 EBP = 0x14
90 EAX = 0x18
91 DS = 0x1C
92 ES = 0x20
93 ORIG_EAX = 0x24
94 EIP = 0x28
95 CS = 0x2C
96 EFLAGS = 0x30
97 OLDESP = 0x34
98 OLDSS = 0x38
100 /* Offsets in task_struct */
101 PROCESSOR = 0
102 STATE = 4
103 HYP_EVENTS = 8
104 DOMAIN = 12
105 SHARED_INFO = 16
107 /* Offsets in shared_info_t */
108 EVENTS = 0
109 EVENTS_ENABLE = 4
110 EVENT_ADDR = 8
111 FAILSAFE_ADDR = 12
113 /* Offsets in guest_trap_bounce */
114 GTB_ERROR_CODE = 0
115 GTB_CR2 = 4
116 GTB_FLAGS = 8
117 GTB_CS = 10
118 GTB_EIP = 12
119 GTBF_TRAP = 1
120 GTBF_TRAP_NOCODE = 2
121 GTBF_TRAP_CR2 = 4
123 CF_MASK = 0x00000001
124 IF_MASK = 0x00000200
125 NT_MASK = 0x00004000
127 #define SAVE_ALL \
128 cld; \
129 pushl %es; \
130 pushl %ds; \
131 pushl %eax; \
132 pushl %ebp; \
133 pushl %edi; \
134 pushl %esi; \
135 pushl %edx; \
136 pushl %ecx; \
137 pushl %ebx; \
138 movl $(__HYPERVISOR_DS),%edx; \
139 movl %edx,%ds; \
140 movl %edx,%es;
142 #define RESTORE_ALL \
143 popl %ebx; \
144 popl %ecx; \
145 popl %edx; \
146 popl %esi; \
147 popl %edi; \
148 popl %ebp; \
149 popl %eax; \
150 1: popl %ds; \
151 2: popl %es; \
152 addl $4,%esp; \
153 3: iret; \
154 .section .fixup,"ax"; \
155 6: subl $4,%esp; \
156 pushl %es; \
157 5: pushl %ds; \
158 4: pushl %eax; \
159 pushl %ebp; \
160 pushl %edi; \
161 pushl %esi; \
162 pushl %edx; \
163 pushl %ecx; \
164 pushl %ebx; \
165 pushl %ss; \
166 popl %ds; \
167 pushl %ss; \
168 popl %es; \
169 jmp failsafe_callback; \
170 .previous; \
171 .section __ex_table,"a"; \
172 .align 4; \
173 .long 1b,4b; \
174 .long 2b,5b; \
175 .long 3b,6b; \
176 .previous
178 #define GET_CURRENT(reg) \
179 movl $-8192, reg; \
180 andl %esp, reg
182 ENTRY(ret_from_newdomain)
183 GET_CURRENT(%ebx)
184 jmp test_all_events
186 ALIGN
187 restore_all:
188 RESTORE_ALL
190 ALIGN
191 ENTRY(hypervisor_call)
192 pushl %eax # save orig_eax
193 SAVE_ALL
194 GET_CURRENT(%ebx)
195 andl $255,%eax
196 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
197 movl %eax,EAX(%esp) # save the return value
199 test_all_events:
200 mov PROCESSOR(%ebx),%eax
201 shl $4,%eax # sizeof(irq_cpustat) == 16
202 lea guest_trap_bounce(%eax),%edx
203 cli # tests must not race interrupts
204 xorl %ecx,%ecx
205 notl %ecx
206 test_softirqs:
207 mov PROCESSOR(%ebx),%eax
208 shl $4,%eax # sizeof(irq_cpustat) == 16
209 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
210 jnz process_softirqs
211 test_hyp_events:
212 test %ecx, HYP_EVENTS(%ebx)
213 jnz process_hyp_events
214 test_guest_events:
215 movl SHARED_INFO(%ebx),%eax
216 test %ecx,EVENTS(%eax)
217 jz restore_all
218 test %ecx,EVENTS_ENABLE(%eax)
219 jz restore_all
220 /* Prevent unnecessary reentry of event callback (stack overflow!) */
221 xorl %ecx,%ecx
222 movl %ecx,EVENTS_ENABLE(%eax)
223 /* %eax == shared_info, %ebx == task_struct, %edx == guest_trap_bounce */
224 process_guest_events:
225 movl EVENT_ADDR(%eax),%eax
226 movl %eax,GTB_EIP(%edx)
227 movw $__GUEST_CS,GTB_CS(%edx)
228 call create_bounce_frame
229 jmp restore_all
231 ALIGN
232 process_softirqs:
233 push %edx
234 call SYMBOL_NAME(do_softirq)
235 pop %edx
236 jmp test_hyp_events
238 ALIGN
239 process_hyp_events:
240 sti
241 call SYMBOL_NAME(do_hyp_events)
242 jmp test_all_events
244 /* No special register assumptions */
245 failsafe_callback:
246 GET_CURRENT(%ebx)
247 mov PROCESSOR(%ebx),%eax
248 shl $4,%eax
249 lea guest_trap_bounce(%eax),%edx
250 movl SHARED_INFO(%ebx),%eax
251 movl FAILSAFE_ADDR(%eax),%eax
252 movl %eax,GTB_EIP(%edx)
253 movw $__GUEST_CS,GTB_CS(%edx)
254 call create_bounce_frame
255 subl $8,%esi # add DS/ES to failsafe stack frame
256 movl DS(%esp),%eax
257 FAULT1: movl %eax,(%esi)
258 movl ES(%esp),%eax
259 FAULT2: movl %eax,4(%esi)
260 movl %esi,OLDESP(%esp)
261 popl %ebx
262 popl %ecx
263 popl %edx
264 popl %esi
265 popl %edi
266 popl %ebp
267 popl %eax
268 addl $12,%esp
269 FAULT3: iret
272 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
273 /* {EIP, CS, EFLAGS, [ESP, SS]} */
274 /* %edx == guest_trap_bounce, %ebx == task_struct */
275 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
276 create_bounce_frame:
277 mov CS+4(%esp),%cl
278 test $2,%cl
279 jz 1f /* jump if returning to an existing ring-1 activation */
280 /* obtain ss/esp from TSS -- no current ring-1 activations */
281 movl PROCESSOR(%ebx),%eax
282 shll $8,%eax /* multiply by 256 */
283 addl $init_tss + 12,%eax
284 movl (%eax),%esi /* tss->esp1 */
285 FAULT4: movl 4(%eax),%ds /* tss->ss1 */
286 /* base of stack frame must contain ss/esp (inter-priv iret) */
287 subl $8,%esi
288 movl OLDESP+4(%esp),%eax
289 FAULT5: movl %eax,(%esi)
290 movl OLDSS+4(%esp),%eax
291 FAULT6: movl %eax,4(%esi)
292 jmp 2f
293 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
294 movl OLDESP+4(%esp),%esi
295 FAULT7: movl OLDSS+4(%esp),%ds
296 2: /* Construct a stack frame: EFLAGS, CS/EIP */
297 subl $12,%esi
298 movl EIP+4(%esp),%eax
299 FAULT8: movl %eax,(%esi)
300 movl CS+4(%esp),%eax
301 FAULT9: movl %eax,4(%esi)
302 movl EFLAGS+4(%esp),%eax
303 FAULT10:movl %eax,8(%esi)
304 /* Rewrite our stack frame and return to ring 1. */
305 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
306 andl $0xfffcbeff,%eax
307 movl %eax,EFLAGS+4(%esp)
308 movl %ds,OLDSS+4(%esp)
309 movl %esi,OLDESP+4(%esp)
310 movzwl %es:GTB_CS(%edx),%eax
311 movl %eax,CS+4(%esp)
312 movl %es:GTB_EIP(%edx),%eax
313 movl %eax,EIP+4(%esp)
314 ret
317 .section __ex_table,"a"
318 .align 4
319 .long FAULT1, kill_domain_fixup3 # Fault writing to ring-1 stack
320 .long FAULT2, kill_domain_fixup3 # Fault writing to ring-1 stack
321 .long FAULT3, kill_domain_fixup1 # Fault executing failsafe iret
322 .long FAULT4, kill_domain_fixup2 # Fault loading ring-1 stack selector
323 .long FAULT5, kill_domain_fixup2 # Fault writing to ring-1 stack
324 .long FAULT6, kill_domain_fixup2 # Fault writing to ring-1 stack
325 .long FAULT7, kill_domain_fixup2 # Fault loading ring-1 stack selector
326 .long FAULT8, kill_domain_fixup2 # Fault writing to ring-1 stack
327 .long FAULT9, kill_domain_fixup2 # Fault writing to ring-1 stack
328 .long FAULT10,kill_domain_fixup2 # Fault writing to ring-1 stack
329 .long FAULT11,kill_domain_fixup3 # Fault writing to ring-1 stack
330 .long FAULT12,kill_domain_fixup3 # Fault writing to ring-1 stack
331 .previous
333 # This handler kills domains which experience unrecoverable faults.
334 .section .fixup,"ax"
335 kill_domain_fixup1:
336 subl $4,%esp
337 SAVE_ALL
338 jmp kill_domain
339 kill_domain_fixup2:
340 addl $4,%esp
341 kill_domain_fixup3:
342 pushl %ss
343 popl %ds
344 jmp kill_domain
345 .previous
347 ALIGN
348 process_guest_exception_and_events:
349 mov PROCESSOR(%ebx),%eax
350 shl $4,%eax # sizeof(irq_cpustat) == 16
351 lea guest_trap_bounce(%eax),%edx
352 testb $~0,GTB_FLAGS(%edx)
353 jz test_all_events
354 call create_bounce_frame # just the basic frame
355 mov %es:GTB_FLAGS(%edx),%cl
356 test $GTBF_TRAP_NOCODE,%cl
357 jnz 2f
358 subl $4,%esi # push error_code onto guest frame
359 movl %es:GTB_ERROR_CODE(%edx),%eax
360 FAULT11:movl %eax,(%esi)
361 test $GTBF_TRAP_CR2,%cl
362 jz 1f
363 subl $4,%esi # push %cr2 onto guest frame
364 movl %es:GTB_CR2(%edx),%eax
365 FAULT12:movl %eax,(%esi)
366 1: movl %esi,OLDESP(%esp)
367 2: push %es # unclobber %ds
368 pop %ds
369 movb $0,GTB_FLAGS(%edx)
370 jmp test_all_events
372 ALIGN
373 ENTRY(ret_from_intr)
374 GET_CURRENT(%ebx)
375 movb CS(%esp),%al
376 testb $3,%al # return to non-supervisor?
377 jne test_all_events
378 jmp restore_all
380 ALIGN
381 ret_from_exception:
382 movb CS(%esp),%al
383 testb $3,%al # return to non-supervisor?
384 jne process_guest_exception_and_events
385 jmp restore_all
387 ALIGN
389 ENTRY(divide_error)
390 pushl $0 # no error code
391 pushl $ SYMBOL_NAME(do_divide_error)
392 ALIGN
393 error_code:
394 pushl %ds
395 pushl %eax
396 xorl %eax,%eax
397 pushl %ebp
398 pushl %edi
399 pushl %esi
400 pushl %edx
401 decl %eax # eax = -1
402 pushl %ecx
403 pushl %ebx
404 cld
405 movl %es,%ecx
406 movl ORIG_EAX(%esp), %esi # get the error code
407 movl ES(%esp), %edi # get the function address
408 movl %eax, ORIG_EAX(%esp)
409 movl %ecx, ES(%esp)
410 movl %esp,%edx
411 pushl %esi # push the error code
412 pushl %edx # push the pt_regs pointer
413 movl $(__HYPERVISOR_DS),%edx
414 movl %edx,%ds
415 movl %edx,%es
416 GET_CURRENT(%ebx)
417 call *%edi
418 addl $8,%esp
419 jmp ret_from_exception
421 ENTRY(coprocessor_error)
422 pushl $0
423 pushl $ SYMBOL_NAME(do_coprocessor_error)
424 jmp error_code
426 ENTRY(simd_coprocessor_error)
427 pushl $0
428 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
429 jmp error_code
431 ENTRY(device_not_available)
432 pushl $0
433 pushl $SYMBOL_NAME(math_state_restore)
434 jmp error_code
436 ENTRY(debug)
437 pushl $0
438 pushl $ SYMBOL_NAME(do_debug)
439 jmp error_code
441 ENTRY(nmi)
442 pushl %eax
443 SAVE_ALL
444 movl %esp,%edx
445 pushl $0
446 pushl %edx
447 call SYMBOL_NAME(do_nmi)
448 addl $8,%esp
449 RESTORE_ALL
451 ENTRY(int3)
452 pushl $0
453 pushl $ SYMBOL_NAME(do_int3)
454 jmp error_code
456 ENTRY(overflow)
457 pushl $0
458 pushl $ SYMBOL_NAME(do_overflow)
459 jmp error_code
461 ENTRY(bounds)
462 pushl $0
463 pushl $ SYMBOL_NAME(do_bounds)
464 jmp error_code
466 ENTRY(invalid_op)
467 pushl $0
468 pushl $ SYMBOL_NAME(do_invalid_op)
469 jmp error_code
471 ENTRY(coprocessor_segment_overrun)
472 pushl $0
473 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
474 jmp error_code
476 ENTRY(double_fault)
477 pushl $ SYMBOL_NAME(do_double_fault)
478 jmp error_code
480 ENTRY(invalid_TSS)
481 pushl $ SYMBOL_NAME(do_invalid_TSS)
482 jmp error_code
484 ENTRY(segment_not_present)
485 pushl $ SYMBOL_NAME(do_segment_not_present)
486 jmp error_code
488 ENTRY(stack_segment)
489 pushl $ SYMBOL_NAME(do_stack_segment)
490 jmp error_code
492 ENTRY(general_protection)
493 pushl $ SYMBOL_NAME(do_general_protection)
494 jmp error_code
496 ENTRY(alignment_check)
497 pushl $ SYMBOL_NAME(do_alignment_check)
498 jmp error_code
500 ENTRY(page_fault)
501 pushl $ SYMBOL_NAME(do_page_fault)
502 jmp error_code
504 ENTRY(machine_check)
505 pushl $0
506 pushl $ SYMBOL_NAME(do_machine_check)
507 jmp error_code
509 ENTRY(spurious_interrupt_bug)
510 pushl $0
511 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
512 jmp error_code
514 .data
515 ENTRY(hypervisor_call_table)
516 .long SYMBOL_NAME(do_set_trap_table)
517 .long SYMBOL_NAME(do_process_page_updates)
518 .long SYMBOL_NAME(do_console_write)
519 .long SYMBOL_NAME(do_set_gdt)
520 .long SYMBOL_NAME(do_stack_and_ldt_switch)
521 .long SYMBOL_NAME(do_net_update)
522 .long SYMBOL_NAME(do_fpu_taskswitch)
523 .long SYMBOL_NAME(do_yield)
524 .long SYMBOL_NAME(kill_domain)
525 .long SYMBOL_NAME(do_dom0_op)
526 .long SYMBOL_NAME(do_network_op)
527 .long SYMBOL_NAME(do_set_debugreg)
528 .long SYMBOL_NAME(do_get_debugreg)
529 .long SYMBOL_NAME(do_update_descriptor)
530 .rept NR_syscalls-(.-hypervisor_call_table)/4
531 .long SYMBOL_NAME(sys_ni_syscall)
532 .endr