ia64/xen-unstable

view xen/include/asm-x86/hvm/vmx/vmx.h @ 17416:0553004fa328

x86, vmx: Enable VPID (Virtual Processor Identification)

Allows TLB entries to be retained across VM entry and VM exit, and Xen
can now identify distinct address spaces through a new
virtual-processor ID (VPID) field of the VMCS.

Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xiaohui Xin <Xiaohui.xin@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Apr 09 14:34:49 2008 +0100 (2008-04-09)
parents 9b635405ef90
children 9153b99a7066
line source
1 /*
2 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #ifndef __ASM_X86_HVM_VMX_VMX_H__
20 #define __ASM_X86_HVM_VMX_VMX_H__
22 #include <xen/sched.h>
23 #include <asm/types.h>
24 #include <asm/regs.h>
25 #include <asm/processor.h>
26 #include <asm/i387.h>
27 #include <asm/hvm/support.h>
28 #include <asm/hvm/trace.h>
29 #include <asm/hvm/vmx/vmcs.h>
31 typedef union {
32 struct {
33 u64 r : 1,
34 w : 1,
35 x : 1,
36 emt : 4,
37 sp_avail : 1,
38 avail1 : 4,
39 mfn : 45,
40 rsvd : 5,
41 avail2 : 2;
42 };
43 u64 epte;
44 } ept_entry_t;
46 #define EPT_TABLE_ORDER 9
48 void vmx_asm_vmexit_handler(struct cpu_user_regs);
49 void vmx_asm_do_vmentry(void);
50 void vmx_intr_assist(void);
51 void vmx_do_resume(struct vcpu *);
52 void set_guest_time(struct vcpu *v, u64 gtime);
53 void vmx_vlapic_msr_changed(struct vcpu *v);
54 void vmx_realmode(struct cpu_user_regs *regs);
56 /*
57 * Exit Reasons
58 */
59 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
61 #define EXIT_REASON_EXCEPTION_NMI 0
62 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
63 #define EXIT_REASON_TRIPLE_FAULT 2
64 #define EXIT_REASON_INIT 3
65 #define EXIT_REASON_SIPI 4
66 #define EXIT_REASON_IO_SMI 5
67 #define EXIT_REASON_OTHER_SMI 6
68 #define EXIT_REASON_PENDING_VIRT_INTR 7
69 #define EXIT_REASON_PENDING_VIRT_NMI 8
70 #define EXIT_REASON_TASK_SWITCH 9
71 #define EXIT_REASON_CPUID 10
72 #define EXIT_REASON_HLT 12
73 #define EXIT_REASON_INVD 13
74 #define EXIT_REASON_INVLPG 14
75 #define EXIT_REASON_RDPMC 15
76 #define EXIT_REASON_RDTSC 16
77 #define EXIT_REASON_RSM 17
78 #define EXIT_REASON_VMCALL 18
79 #define EXIT_REASON_VMCLEAR 19
80 #define EXIT_REASON_VMLAUNCH 20
81 #define EXIT_REASON_VMPTRLD 21
82 #define EXIT_REASON_VMPTRST 22
83 #define EXIT_REASON_VMREAD 23
84 #define EXIT_REASON_VMRESUME 24
85 #define EXIT_REASON_VMWRITE 25
86 #define EXIT_REASON_VMXOFF 26
87 #define EXIT_REASON_VMXON 27
88 #define EXIT_REASON_CR_ACCESS 28
89 #define EXIT_REASON_DR_ACCESS 29
90 #define EXIT_REASON_IO_INSTRUCTION 30
91 #define EXIT_REASON_MSR_READ 31
92 #define EXIT_REASON_MSR_WRITE 32
93 #define EXIT_REASON_INVALID_GUEST_STATE 33
94 #define EXIT_REASON_MSR_LOADING 34
95 #define EXIT_REASON_MWAIT_INSTRUCTION 36
96 #define EXIT_REASON_MONITOR_INSTRUCTION 39
97 #define EXIT_REASON_PAUSE_INSTRUCTION 40
98 #define EXIT_REASON_MACHINE_CHECK 41
99 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
100 #define EXIT_REASON_APIC_ACCESS 44
101 #define EXIT_REASON_EPT_VIOLATION 48
102 #define EXIT_REASON_EPT_MISCONFIG 49
103 #define EXIT_REASON_WBINVD 54
105 /*
106 * Interruption-information format
107 */
108 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
109 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
110 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
111 #define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000 /* 12 */
112 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
113 #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
115 /*
116 * Exit Qualifications for MOV for Control Register Access
117 */
118 /* 3:0 - control register number (CRn) */
119 #define VMX_CONTROL_REG_ACCESS_NUM 0xf
120 /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
121 #define VMX_CONTROL_REG_ACCESS_TYPE 0x30
122 /* 10:8 - general purpose register operand */
123 #define VMX_CONTROL_REG_ACCESS_GPR 0xf00
124 #define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR (0 << 4)
125 #define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
126 #define VMX_CONTROL_REG_ACCESS_TYPE_CLTS (2 << 4)
127 #define VMX_CONTROL_REG_ACCESS_TYPE_LMSW (3 << 4)
128 #define VMX_CONTROL_REG_ACCESS_GPR_EAX (0 << 8)
129 #define VMX_CONTROL_REG_ACCESS_GPR_ECX (1 << 8)
130 #define VMX_CONTROL_REG_ACCESS_GPR_EDX (2 << 8)
131 #define VMX_CONTROL_REG_ACCESS_GPR_EBX (3 << 8)
132 #define VMX_CONTROL_REG_ACCESS_GPR_ESP (4 << 8)
133 #define VMX_CONTROL_REG_ACCESS_GPR_EBP (5 << 8)
134 #define VMX_CONTROL_REG_ACCESS_GPR_ESI (6 << 8)
135 #define VMX_CONTROL_REG_ACCESS_GPR_EDI (7 << 8)
136 #define VMX_CONTROL_REG_ACCESS_GPR_R8 (8 << 8)
137 #define VMX_CONTROL_REG_ACCESS_GPR_R9 (9 << 8)
138 #define VMX_CONTROL_REG_ACCESS_GPR_R10 (10 << 8)
139 #define VMX_CONTROL_REG_ACCESS_GPR_R11 (11 << 8)
140 #define VMX_CONTROL_REG_ACCESS_GPR_R12 (12 << 8)
141 #define VMX_CONTROL_REG_ACCESS_GPR_R13 (13 << 8)
142 #define VMX_CONTROL_REG_ACCESS_GPR_R14 (14 << 8)
143 #define VMX_CONTROL_REG_ACCESS_GPR_R15 (15 << 8)
145 /*
146 * Access Rights
147 */
148 #define X86_SEG_AR_SEG_TYPE 0xf /* 3:0, segment type */
149 #define X86_SEG_AR_DESC_TYPE (1u << 4) /* 4, descriptor type */
150 #define X86_SEG_AR_DPL 0x60 /* 6:5, descriptor privilege level */
151 #define X86_SEG_AR_SEG_PRESENT (1u << 7) /* 7, segment present */
152 #define X86_SEG_AR_AVL (1u << 12) /* 12, available for system software */
153 #define X86_SEG_AR_CS_LM_ACTIVE (1u << 13) /* 13, long mode active (CS only) */
154 #define X86_SEG_AR_DEF_OP_SIZE (1u << 14) /* 14, default operation size */
155 #define X86_SEG_AR_GRANULARITY (1u << 15) /* 15, granularity */
156 #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */
158 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
159 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
160 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
161 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
162 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
163 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
164 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
165 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
166 #define INVEPT_OPCODE ".byte 0x66,0x0f,0x38,0x80\n" /* m128,r64/32 */
167 #define INVVPID_OPCODE ".byte 0x66,0x0f,0x38,0x81\n" /* m128,r64/32 */
168 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
169 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
171 #define MODRM_EAX_08 ".byte 0x08\n" /* ECX, [EAX] */
172 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
173 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
174 #define MODRM_EAX_ECX ".byte 0xc1\n" /* EAX, ECX */
176 static inline void __vmptrld(u64 addr)
177 {
178 asm volatile ( VMPTRLD_OPCODE
179 MODRM_EAX_06
180 /* CF==1 or ZF==1 --> crash (ud2) */
181 "ja 1f ; ud2 ; 1:\n"
182 :
183 : "a" (&addr)
184 : "memory");
185 }
187 static inline void __vmptrst(u64 addr)
188 {
189 asm volatile ( VMPTRST_OPCODE
190 MODRM_EAX_07
191 :
192 : "a" (&addr)
193 : "memory");
194 }
196 static inline void __vmpclear(u64 addr)
197 {
198 asm volatile ( VMCLEAR_OPCODE
199 MODRM_EAX_06
200 /* CF==1 or ZF==1 --> crash (ud2) */
201 "ja 1f ; ud2 ; 1:\n"
202 :
203 : "a" (&addr)
204 : "memory");
205 }
207 static inline unsigned long __vmread(unsigned long field)
208 {
209 unsigned long ecx;
211 asm volatile ( VMREAD_OPCODE
212 MODRM_EAX_ECX
213 /* CF==1 or ZF==1 --> crash (ud2) */
214 "ja 1f ; ud2 ; 1:\n"
215 : "=c" (ecx)
216 : "a" (field)
217 : "memory");
219 return ecx;
220 }
222 static inline void __vmwrite(unsigned long field, unsigned long value)
223 {
224 asm volatile ( VMWRITE_OPCODE
225 MODRM_EAX_ECX
226 /* CF==1 or ZF==1 --> crash (ud2) */
227 "ja 1f ; ud2 ; 1:\n"
228 :
229 : "a" (field) , "c" (value)
230 : "memory");
231 }
233 static inline unsigned long __vmread_safe(unsigned long field, int *error)
234 {
235 unsigned long ecx;
237 asm volatile ( VMREAD_OPCODE
238 MODRM_EAX_ECX
239 /* CF==1 or ZF==1 --> rc = -1 */
240 "setna %b0 ; neg %0"
241 : "=q" (*error), "=c" (ecx)
242 : "0" (0), "a" (field)
243 : "memory");
245 return ecx;
246 }
248 static inline void __vm_set_bit(unsigned long field, unsigned int bit)
249 {
250 __vmwrite(field, __vmread(field) | (1UL << bit));
251 }
253 static inline void __vm_clear_bit(unsigned long field, unsigned int bit)
254 {
255 __vmwrite(field, __vmread(field) & ~(1UL << bit));
256 }
258 static inline void __invept(int ext, u64 eptp, u64 gpa)
259 {
260 struct {
261 u64 eptp, gpa;
262 } operand = {eptp, gpa};
264 asm volatile ( INVEPT_OPCODE
265 MODRM_EAX_08
266 /* CF==1 or ZF==1 --> rc = -1 */
267 "ja 1f ; ud2 ; 1:\n"
268 :
269 : "a" (&operand), "c" (ext)
270 : "memory" );
271 }
273 static inline void __invvpid(int ext, u16 vpid, u64 gva)
274 {
275 struct {
276 u64 vpid:16;
277 u64 rsvd:48;
278 u64 gva;
279 } __attribute__ ((packed)) operand = {vpid, 0, gva};
281 asm volatile ( INVVPID_OPCODE
282 MODRM_EAX_08
283 /* CF==1 or ZF==1 --> rc = -1 */
284 "ja 1f ; ud2 ; 1:\n"
285 :
286 : "a" (&operand), "c" (ext)
287 : "memory" );
288 }
290 static inline void ept_sync_all(void)
291 {
292 if ( !current->domain->arch.hvm_domain.hap_enabled )
293 return;
295 __invept(2, 0, 0);
296 }
298 void ept_sync_domain(struct domain *d);
300 static inline void vpid_sync_vcpu_all(struct vcpu *v)
301 {
302 if ( cpu_has_vmx_vpid )
303 __invvpid(1, v->arch.hvm_vmx.vpid, 0);
304 }
306 static inline void vpid_sync_all(void)
307 {
308 if ( cpu_has_vmx_vpid )
309 __invvpid(2, 0, 0);
310 }
312 static inline void __vmxoff(void)
313 {
314 asm volatile (
315 VMXOFF_OPCODE
316 : : : "memory" );
317 }
319 static inline int __vmxon(u64 addr)
320 {
321 int rc;
323 asm volatile (
324 "1: " VMXON_OPCODE MODRM_EAX_06 "\n"
325 " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */
326 "2:\n"
327 ".section .fixup,\"ax\"\n"
328 "3: not %0 ; jmp 2b\n" /* #UD --> rc = -1 */
329 ".previous\n"
330 ".section __ex_table,\"a\"\n"
331 " "__FIXUP_ALIGN"\n"
332 " "__FIXUP_WORD" 1b,3b\n"
333 ".previous\n"
334 : "=q" (rc)
335 : "0" (0), "a" (&addr)
336 : "memory");
338 return rc;
339 }
341 void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code);
342 void vmx_inject_extint(struct vcpu *v, int trap);
343 void vmx_inject_nmi(struct vcpu *v);
345 void ept_p2m_init(struct domain *d);
347 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */