ia64/xen-unstable

view xen/arch/x86/hvm/svm/x86_64/exits.S @ 11205:b3bfff8b1c10

[SVM] Fix 64-bit VMRUN.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 17 19:24:20 2006 +0100 (2006-08-17)
parents 043a4aa24781
children 88418d7f22f2
line source
1 /*
2 * exits.S: SVM architecture-specific exit handling.
3 * Copyright (c) 2004, Intel Corporation.
4 * Copyright (c) 2005, AMD Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
19 #include <xen/config.h>
20 #include <xen/errno.h>
21 #include <xen/softirq.h>
22 #include <asm/asm_defns.h>
23 #include <asm/apicdef.h>
24 #include <asm/page.h>
25 #include <public/xen.h>
27 #define GET_CURRENT(reg) \
28 movq $STACK_SIZE-8, reg; \
29 orq %rsp, reg; \
30 andq $~7,reg; \
31 movq (reg),reg;
33 /*
34 * At VMExit time the processor saves the guest selectors, rsp, rip,
35 * and rflags. Therefore we don't save them, but simply decrement
36 * the kernel stack pointer to make it consistent with the stack frame
37 * at usual interruption time. The rflags of the host is not saved by VMX,
38 * and we set it to the fixed value.
39 *
40 * We also need the room, especially because orig_eax field is used
41 * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
42 * (10) u64 gs;
43 * (9) u64 fs;
44 * (8) u64 ds;
45 * (7) u64 es;
46 * <- get_stack_bottom() (= HOST_ESP)
47 * (6) u64 ss;
48 * (5) u64 rsp;
49 * (4) u64 rflags;
50 * (3) u64 cs;
51 * (2) u64 rip;
52 * (2/1) u32 entry_vector;
53 * (1/1) u32 error_code;
54 */
55 #define HVM_MONITOR_RFLAGS 0x202 /* IF on */
56 #define NR_SKIPPED_REGS 6 /* See the above explanation */
57 #define HVM_SAVE_ALL_NOSEGREGS \
58 pushq $HVM_MONITOR_RFLAGS; \
59 popfq; \
60 subq $(NR_SKIPPED_REGS*8), %rsp; \
61 pushq %rdi; \
62 pushq %rsi; \
63 pushq %rdx; \
64 pushq %rcx; \
65 pushq %rax; \
66 pushq %r8; \
67 pushq %r9; \
68 pushq %r10; \
69 pushq %r11; \
70 pushq %rbx; \
71 pushq %rbp; \
72 pushq %r12; \
73 pushq %r13; \
74 pushq %r14; \
75 pushq %r15; \
77 #define HVM_RESTORE_ALL_NOSEGREGS \
78 popq %r15; \
79 popq %r14; \
80 popq %r13; \
81 popq %r12; \
82 popq %rbp; \
83 popq %rbx; \
84 popq %r11; \
85 popq %r10; \
86 popq %r9; \
87 popq %r8; \
88 popq %rax; \
89 popq %rcx; \
90 popq %rdx; \
91 popq %rsi; \
92 popq %rdi; \
93 addq $(NR_SKIPPED_REGS*8), %rsp; \
95 #define VMRUN .byte 0x0F,0x01,0xD8
96 #define VMLOAD .byte 0x0F,0x01,0xDA
97 #define VMSAVE .byte 0x0F,0x01,0xDB
98 #define STGI .byte 0x0F,0x01,0xDC
99 #define CLGI .byte 0x0F,0x01,0xDD
101 ENTRY(svm_asm_do_launch)
102 sti
103 CLGI
104 GET_CURRENT(%rbx)
105 movq VCPU_svm_vmcb(%rbx), %rcx
106 movq UREGS_rax(%rsp), %rax
107 movq %rax, VMCB_rax(%rcx)
108 leaq root_vmcb_pa(%rip), %rax
109 movl VCPU_processor(%rbx), %ecx
110 movq (%rax,%rcx,8), %rax
111 VMSAVE
113 movq VCPU_svm_vmcb_pa(%rbx), %rax
114 popq %r15
115 popq %r14
116 popq %r13
117 popq %r12
118 popq %rbp
119 popq %rbx
120 popq %r11
121 popq %r10
122 popq %r9
123 popq %r8
124 /*
125 * Skip %rax, we need to have vmcb address in there.
126 * Don't worry, RAX is restored through the VMRUN instruction.
127 */
128 addq $8, %rsp
129 popq %rcx
130 popq %rdx
131 popq %rsi
132 popq %rdi
133 addq $(NR_SKIPPED_REGS*8), %rsp
135 VMLOAD
136 VMRUN
137 VMSAVE
138 HVM_SAVE_ALL_NOSEGREGS
140 GET_CURRENT(%rbx)
141 leaq root_vmcb_pa(%rip), %rax
142 movl VCPU_processor(%rbx), %ecx
143 movq (%rax,%rcx,8), %rax
144 VMLOAD
146 STGI
147 call svm_vmexit_handler
148 jmp svm_asm_do_resume
150 ENTRY(svm_asm_do_resume)
151 svm_test_all_events:
152 GET_CURRENT(%rbx)
153 movq %rbx, %rdi
154 call hvm_do_resume
155 /*test_all_events:*/
156 cli # tests must not race interrupts
157 /*test_softirqs:*/
158 movl VCPU_processor(%rbx),%eax
159 shl $IRQSTAT_shift, %rax
160 leaq irq_stat(%rip), %rdx
161 testl $~0, (%rdx, %rax, 1)
162 jnz svm_process_softirqs
163 svm_restore_all_guest:
164 call svm_intr_assist
165 call svm_asid
166 call svm_load_cr2
167 sti
168 /*
169 * Check if we are going back to VMX-based VM
170 * By this time, all the setups in the VMCS must be complete.
171 */
172 jmp svm_asm_do_launch
174 ALIGN
175 svm_process_softirqs:
176 sti
177 call do_softirq
178 jmp svm_test_all_events