direct-io.hg

view xen/arch/x86/x86_32/supervisor_mode_kernel.S @ 15412:acb7aa72fac7

i386: remove NMI deferral by instead making sure selector registers
are always stored/restored correctly despite the potential for an NMI
(and also MCE, with a subsequent patch) to kick in.

The idea is to always check values read from %ds and %es against
__HYPERVISOR_DS, and only store into the current frame (all normal
handlers) or the outer-most one (NMI and MCE) if the value read is
different. That way, any NMI or MCE occurring during frame setup will
store selectors not saved so far on behalf of the interrupted handler,
with that interrupted handler either having managed to read the guest
selector (in which case it can store it regardless of whether NMI/MCE
kicked in between the read and the store) or finding __HYPERVISOR_DS
already in the register, in which case it'll know not to store (as the
nested handler would have done the store).

For the restore portion this makes use of the fact that there's
exactly one such code sequence, and by moving the selector restore
part past all other restores (including all stack pointer adjustments)
the NMI/MCE handlers can safely detect whether any selector would have
been restored already (by range checking EIP) and move EIP back to the
beginning of the selector restore sequence without having to play with
the stack pointer itself or any other gpr.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jun 21 12:13:06 2007 +0100 (2007-06-21)
parents 29b02d929b7e
children
line source
1 /*
2 * Handle stack fixup for guest running in RING 0.
3 *
4 * Copyright (c) 2006 Ian Campbell
5 *
6 * When a guest kernel is allowed to run in RING 0 a hypercall,
7 * interrupt or exception interrupting the guest kernel will not cause
8 * a privilege level change and therefore the stack will not be swapped
9 * to the Xen stack.
10 *
11 * To fix this we look for RING 0 activation frames with a stack
12 * pointer below HYPERVISOR_VIRT_START (indicating a guest kernel
13 * frame) and fix this up by locating the Xen stack via the TSS
14 * and moving the activation frame to the Xen stack. In the process we
15 * convert the frame into an inter-privilege frame returning to RING 1
16 * so that we can catch and reverse the process on exit.
17 */
19 #include <xen/config.h>
20 #include <asm/asm_defns.h>
21 #include <public/xen.h>
23 #define guestreg(field) ((field)-UREGS_eip+36)
25 # Upon entry the stack should be the Xen stack and contain:
26 # %ss, %esp, EFLAGS, %cs|1, %eip, RETURN
27 # On exit the stack should be %ss:%esp (i.e. the guest stack)
28 # and contain:
29 # EFLAGS, %cs, %eip, RETURN
30 ALIGN
31 ENTRY(restore_ring0_guest)
32 pusha
34 # Point %gs:%esi to guest stack.
35 RRG0: movw guestreg(UREGS_ss)(%esp),%gs
36 movl guestreg(UREGS_esp)(%esp),%esi
38 # Copy EFLAGS, %cs, %eip, RETURN, PUSHA from Xen stack to guest stack.
39 movl $12,%ecx /* 12 32-bit values */
41 1: subl $4,%esi
42 movl -4(%esp,%ecx,4),%eax
43 RRG1: movl %eax,%gs:(%esi)
44 loop 1b
46 RRG2: andl $~3,%gs:guestreg(UREGS_cs)(%esi)
48 movl %gs,%eax
50 # We need to do this because these registers are not present
51 # on the guest stack so they cannot be restored by the code in
52 # restore_all_guest.
53 RRG3: mov guestreg(UREGS_ds)(%esp),%ds
54 RRG4: mov guestreg(UREGS_es)(%esp),%es
55 RRG5: mov guestreg(UREGS_fs)(%esp),%fs
56 RRG6: mov guestreg(UREGS_gs)(%esp),%gs
58 RRG7: movl %eax,%ss
59 movl %esi,%esp
61 popa
62 ret
63 .section __ex_table,"a"
64 .long RRG0,domain_crash_synchronous
65 .long RRG1,domain_crash_synchronous
66 .long RRG2,domain_crash_synchronous
67 .long RRG3,domain_crash_synchronous
68 .long RRG4,domain_crash_synchronous
69 .long RRG5,domain_crash_synchronous
70 .long RRG6,domain_crash_synchronous
71 .long RRG7,domain_crash_synchronous
72 .previous
74 # Upon entry the stack should be a guest stack and contain:
75 # EFLAGS, %cs, %eip, ERROR, RETURN
76 # On exit the stack should be the Xen stack and contain:
77 # %ss, %esp, EFLAGS, %cs|1, %eip, ERROR, RETURN
78 ALIGN
79 ENTRY(fixup_ring0_guest_stack)
80 pushl %eax
81 pushl %ecx
82 pushl %ds
83 pushl %gs
84 pushl %esi
86 movw $__HYPERVISOR_DS,%ax
87 movw %ax,%ds
89 # Point %gs:%esi to guest stack frame.
90 movw %ss,%ax
91 movw %ax,%gs
92 movl %esp,%esi
93 # Account for entries on the guest stack:
94 # * Pushed by normal exception/interrupt/hypercall mechanisms
95 # * EFLAGS, %cs, %eip, ERROR == 4 words.
96 # * Pushed by the fixup routine
97 # * [RETURN], %eax, %ecx, %ds, %gs and %esi == 6 words.
98 addl $((6+4)*4),%esi
100 # %gs:%esi now points to the guest stack before the
101 # interrupt/exception occured.
103 /*
104 * Reverse the __TSS macro, giving us the CPU number.
105 * The TSS for this cpu is at init_tss + ( cpu * 128 ).
106 */
107 str %ecx
108 shrl $3,%ecx # Calculate GDT index for TSS.
109 subl $(FIRST_RESERVED_GDT_ENTRY+8),%ecx # %ecx = 2*cpu.
110 shll $6,%ecx # Each TSS entry is 0x80 bytes
111 addl $init_tss,%ecx # but we have 2*cpu from above.
113 # Load Xen stack from TSS.
114 movw TSS_ss0(%ecx),%ax
115 TRP1: movw %ax,%ss
116 movl TSS_esp0(%ecx),%esp
118 pushl %gs
119 pushl %esi
121 # Move EFLAGS, %cs, %eip, ERROR, RETURN, %eax, %ecx, %ds, %gs, %esi
122 # from guest stack to Xen stack.
123 movl $10,%ecx
124 1: subl $4,%esp
125 subl $4,%esi
126 TRP2: movl %gs:(%esi),%eax
127 movl %eax,(%esp)
128 loop 1b
130 # CS = CS|1 to simulate RING1 stack frame.
131 orl $1,32(%esp)
133 popl %esi
134 popl %gs
135 popl %ds
136 popl %ecx
137 popl %eax
138 ret
139 .section __ex_table,"a"
140 .long TRP1,domain_crash_synchronous
141 .long TRP2,domain_crash_synchronous
142 .previous
144 domain_crash_synchronous_string:
145 .asciz "domain_crash_sync called from supervisor_mode_kernel.S (%lx)\n"
147 domain_crash_synchronous:
148 pushl $domain_crash_synchronous_string
149 call printk
150 jmp __domain_crash_synchronous