ia64/xen-unstable

view xen/arch/x86/hvm/svm/x86_32/exits.S @ 9088:072d51860554

Clean up SVM 32-bit exits.S file (remove all tsc/fpu).

Signed-off-by: Tom Woller <thomas.woller@amd.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 01 23:38:18 2006 +0100 (2006-03-01)
parents 7d89f672aa49
children 415614d3a1ee
line source
1 /*
2 * exits.S: SVM architecture-specific exit handling.
3 * Copyright (c) 2004, Intel Corporation.
4 * Copyright (c) 2005, AMD Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
19 #include <xen/config.h>
20 #include <xen/errno.h>
21 #include <xen/softirq.h>
22 #include <asm/asm_defns.h>
23 #include <asm/apicdef.h>
24 #include <asm/page.h>
25 #include <public/xen.h>
27 #define GET_CURRENT(reg) \
28 movl $STACK_SIZE-4, reg; \
29 orl %esp, reg; \
30 andl $~3,reg; \
31 movl (reg),reg;
33 /*
34 * At VMExit time the processor saves the guest selectors, esp, eip,
35 * and eflags. Therefore we don't save them, but simply decrement
36 * the kernel stack pointer to make it consistent with the stack frame
37 * at usual interruption time. The eflags of the host is not saved by VMX,
38 * and we set it to the fixed value.
39 *
40 * We also need the room, especially because orig_eax field is used
41 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
42 * (10) u32 gs;
43 * (9) u32 fs;
44 * (8) u32 ds;
45 * (7) u32 es;
46 * <- get_stack_bottom() (= HOST_ESP)
47 * (6) u32 ss;
48 * (5) u32 esp;
49 * (4) u32 eflags;
50 * (3) u32 cs;
51 * (2) u32 eip;
52 * (2/1) u16 entry_vector;
53 * (1/1) u16 error_code;
54 * However, get_stack_bottom() actually returns 20 bytes before the real
55 * bottom of the stack to allow space for:
56 * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
57 */
59 #define HVM_MONITOR_EFLAGS 0x202 /* IF on */
60 #define NR_SKIPPED_REGS 6 /* See the above explanation */
61 #define HVM_SAVE_ALL_NOSEGREGS \
62 pushl $HVM_MONITOR_EFLAGS; \
63 popf; \
64 subl $(NR_SKIPPED_REGS*4), %esp; \
65 pushl %eax; \
66 pushl %ebp; \
67 pushl %edi; \
68 pushl %esi; \
69 pushl %edx; \
70 pushl %ecx; \
71 pushl %ebx;
73 #define HVM_RESTORE_ALL_NOSEGREGS \
74 popl %ebx; \
75 popl %ecx; \
76 popl %edx; \
77 popl %esi; \
78 popl %edi; \
79 popl %ebp; \
80 popl %eax; \
81 addl $(NR_SKIPPED_REGS*4), %esp
83 ALIGN
85 #define VMRUN .byte 0x0F,0x01,0xD8
86 #define VMLOAD .byte 0x0F,0x01,0xDA
87 #define VMSAVE .byte 0x0F,0x01,0xDB
88 #define STGI .byte 0x0F,0x01,0xDC
89 #define CLGI .byte 0x0F,0x01,0xDD
91 ENTRY(svm_asm_do_launch)
92 sti
93 CLGI
94 GET_CURRENT(%ebx)
95 movl VCPU_svm_vmcb(%ebx), %ecx
96 movl 24(%esp), %eax
97 movl %eax, VMCB_rax(%ecx)
98 movl VCPU_svm_hsa_pa(%ebx), %eax
99 VMSAVE
101 movl VCPU_svm_vmcb_pa(%ebx), %eax
102 popl %ebx
103 popl %ecx
104 popl %edx
105 popl %esi
106 popl %edi
107 popl %ebp
109 /*
110 * Skip %eax, we need to have vmcb address in there.
111 * Don't worry, EAX is restored through the VMRUN instruction.
112 */
113 addl $4, %esp
114 addl $(NR_SKIPPED_REGS*4), %esp
115 VMLOAD
116 VMRUN
117 VMSAVE
118 /* eax is the only register we're allowed to touch here... */
120 GET_CURRENT(%eax)
122 movl VCPU_svm_hsa_pa(%eax), %eax
123 VMLOAD
125 HVM_SAVE_ALL_NOSEGREGS
126 STGI
127 call svm_vmexit_handler
128 jmp svm_asm_do_resume
130 ALIGN
132 ENTRY(svm_asm_do_resume)
133 svm_test_all_events:
134 GET_CURRENT(%ebx)
135 /*test_all_events:*/
136 xorl %ecx,%ecx
137 notl %ecx
138 cli # tests must not race interrupts
139 /*test_softirqs:*/
140 movl VCPU_processor(%ebx),%eax
141 shl $IRQSTAT_shift,%eax
142 test %ecx,irq_stat(%eax,1)
143 jnz svm_process_softirqs
144 svm_restore_all_guest:
145 call svm_intr_assist
146 call svm_asid
147 call svm_load_cr2
148 sti
149 /*
150 * Check if we are going back to SVM-based VM
151 * By this time, all the setups in the VMCB must be complete.
152 */
153 jmp svm_asm_do_launch
155 ALIGN
156 svm_process_softirqs:
157 sti
158 call do_softirq
159 jmp svm_test_all_events