ia64/xen-unstable

view xen/arch/x86/hvm/vmx/x86_32/exits.S @ 8867:bee659de2a36

Add back code to zero the 'saved eflags' value on vmx vmexit.
vmx mode is unstable without this -- the root cause needs to
be investigated.

From: Jun Nakajima

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Feb 16 16:35:09 2006 +0100 (2006-02-16)
parents 707cb68a391f
children d3b0d74702b5
line source
1 /*
2 * exits.S: VMX architecture-specific exit handling.
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18 #include <xen/config.h>
19 #include <xen/errno.h>
20 #include <xen/softirq.h>
21 #include <asm/asm_defns.h>
22 #include <asm/apicdef.h>
23 #include <asm/page.h>
24 #include <public/xen.h>
26 #define GET_CURRENT(reg) \
27 movl $STACK_SIZE-4, reg; \
28 orl %esp, reg; \
29 andl $~3,reg; \
30 movl (reg),reg;
32 /*
33 * At VMExit time the processor saves the guest selectors, esp, eip,
34 * and eflags. Therefore we don't save them, but simply decrement
35 * the kernel stack pointer to make it consistent with the stack frame
36 * at usual interruption time. The eflags of the host is not saved by VMX,
37 * and we set it to the fixed value.
38 *
39 * We also need the room, especially because orig_eax field is used
40 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
41 * (10) u32 gs;
42 * (9) u32 fs;
43 * (8) u32 ds;
44 * (7) u32 es;
45 * <- get_stack_bottom() (= HOST_ESP)
46 * (6) u32 ss;
47 * (5) u32 esp;
48 * (4) u32 eflags;
49 * (3) u32 cs;
50 * (2) u32 eip;
51 * (2/1) u16 entry_vector;
52 * (1/1) u16 error_code;
53 * However, get_stack_bottom() actually returns 20 bytes before the real
54 * bottom of the stack to allow space for:
55 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
56 */
58 #define HVM_MONITOR_EFLAGS 0x202 /* IF on */
59 #define NR_SKIPPED_REGS 6 /* See the above explanation */
60 #define HVM_SAVE_ALL_NOSEGREGS \
61 pushl $HVM_MONITOR_EFLAGS; \
62 popf; \
63 subl $(NR_SKIPPED_REGS*4), %esp; \
64 movl $0, 0xc(%esp); /* XXX why do we need to force eflags==0 ?? */ \
65 pushl %eax; \
66 pushl %ebp; \
67 pushl %edi; \
68 pushl %esi; \
69 pushl %edx; \
70 pushl %ecx; \
71 pushl %ebx;
73 #define HVM_RESTORE_ALL_NOSEGREGS \
74 popl %ebx; \
75 popl %ecx; \
76 popl %edx; \
77 popl %esi; \
78 popl %edi; \
79 popl %ebp; \
80 popl %eax; \
81 addl $(NR_SKIPPED_REGS*4), %esp
83 ALIGN
85 ENTRY(vmx_asm_vmexit_handler)
86 /* selectors are restored/saved by VMX */
87 HVM_SAVE_ALL_NOSEGREGS
88 call vmx_trace_vmexit
89 call vmx_vmexit_handler
90 jmp vmx_asm_do_resume
92 .macro vmx_asm_common launch initialized
93 1:
94 /* vmx_test_all_events */
95 .if \initialized
96 GET_CURRENT(%ebx)
97 /*test_all_events:*/
98 xorl %ecx,%ecx
99 notl %ecx
100 cli # tests must not race interrupts
101 /*test_softirqs:*/
102 movl VCPU_processor(%ebx),%eax
103 shl $IRQSTAT_shift,%eax
104 test %ecx,irq_stat(%eax,1)
105 jnz 2f
107 /* vmx_restore_all_guest */
108 call vmx_intr_assist
109 call vmx_load_cr2
110 call vmx_trace_vmentry
111 .endif
112 HVM_RESTORE_ALL_NOSEGREGS
113 /*
114 * Check if we are going back to VMX-based VM
115 * By this time, all the setups in the VMCS must be complete.
116 */
117 .if \launch
118 /* VMLAUNCH */
119 .byte 0x0f,0x01,0xc2
120 pushf
121 call vm_launch_fail
122 .else
123 /* VMRESUME */
124 .byte 0x0f,0x01,0xc3
125 pushf
126 call vm_resume_fail
127 .endif
128 /* Should never reach here */
129 hlt
131 ALIGN
132 .if \initialized
133 2:
134 /* vmx_process_softirqs */
135 sti
136 call do_softirq
137 jmp 1b
138 ALIGN
139 .endif
140 .endm
142 ENTRY(vmx_asm_do_launch)
143 vmx_asm_common 1 0
145 ENTRY(vmx_asm_do_resume)
146 vmx_asm_common 0 1
148 ENTRY(vmx_asm_do_relaunch)
149 vmx_asm_common 1 1