ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 15675:66147ca8f9c4

hvm: Define common (across VMX and SVM) set of event types.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Jul 31 10:11:47 2007 +0100 (2007-07-31)
parents 9174a8cfb578
children 66055f773d19
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/x86_emulate.h>
25 #include <public/domctl.h>
26 #include <public/hvm/save.h>
28 /*
29 * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
30 * segment descriptor. It happens to match the format of an AMD SVM VMCB.
31 */
32 typedef union segment_attributes {
33 u16 bytes;
34 struct
35 {
36 u16 type:4; /* 0; Bit 40-43 */
37 u16 s: 1; /* 4; Bit 44 */
38 u16 dpl: 2; /* 5; Bit 45-46 */
39 u16 p: 1; /* 7; Bit 47 */
40 u16 avl: 1; /* 8; Bit 52 */
41 u16 l: 1; /* 9; Bit 53 */
42 u16 db: 1; /* 10; Bit 54 */
43 u16 g: 1; /* 11; Bit 55 */
44 } fields;
45 } __attribute__ ((packed)) segment_attributes_t;
47 /*
48 * Full state of a segment register (visible and hidden portions).
49 * Again, this happens to match the format of an AMD SVM VMCB.
50 */
51 typedef struct segment_register {
52 u16 sel;
53 segment_attributes_t attr;
54 u32 limit;
55 u64 base;
56 } __attribute__ ((packed)) segment_register_t;
58 /* Interrupt acknowledgement sources. */
59 enum hvm_intack {
60 hvm_intack_none,
61 hvm_intack_pic,
62 hvm_intack_lapic,
63 hvm_intack_nmi
64 };
66 /*
67 * The hardware virtual machine (HVM) interface abstracts away from the
68 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
69 * supports Intel's VT-x and AMD's SVM extensions.
70 */
71 struct hvm_function_table {
72 char *name;
74 /*
75 * Initialise/destroy HVM domain/vcpu resources
76 */
77 int (*domain_initialise)(struct domain *d);
78 void (*domain_destroy)(struct domain *d);
79 int (*vcpu_initialise)(struct vcpu *v);
80 void (*vcpu_destroy)(struct vcpu *v);
82 /*
83 * Store and load guest state:
84 * 1) load/store guest register state,
85 * 2) modify guest state (e.g., set debug flags).
86 */
87 void (*store_cpu_guest_regs)(
88 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
89 void (*load_cpu_guest_regs)(
90 struct vcpu *v, struct cpu_user_regs *r);
92 /* save and load hvm guest cpu context for save/restore */
93 void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
94 int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
96 /*
97 * Examine specifics of the guest state:
98 * 1) determine whether paging is enabled,
99 * 2) determine whether long mode is enabled,
100 * 3) determine whether PAE paging is enabled,
101 * 4) determine whether NX is enabled,
102 * 5) determine whether interrupts are enabled or not,
103 * 6) determine the mode the guest is running in,
104 * 7) return the current guest control-register value
105 * 8) return the current guest segment descriptor base
106 * 9) return the current guest segment descriptor
107 */
108 int (*paging_enabled)(struct vcpu *v);
109 int (*long_mode_enabled)(struct vcpu *v);
110 int (*pae_enabled)(struct vcpu *v);
111 int (*nx_enabled)(struct vcpu *v);
112 int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
113 int (*guest_x86_mode)(struct vcpu *v);
114 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
115 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
116 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
117 struct segment_register *reg);
119 /*
120 * Re-set the value of CR3 that Xen runs on when handling VM exits
121 */
122 void (*update_host_cr3)(struct vcpu *v);
124 /*
125 * Called to inform HVM layer that a guest cr3 has changed
126 */
127 void (*update_guest_cr3)(struct vcpu *v);
129 /*
130 * Called to ensure than all guest-specific mappings in a tagged TLB
131 * are flushed; does *not* flush Xen's TLB entries, and on
132 * processors without a tagged TLB it will be a noop.
133 */
134 void (*flush_guest_tlbs)(void);
136 /*
137 * Reflect the virtual APIC's value in the guest's V_TPR register
138 */
139 void (*update_vtpr)(struct vcpu *v, unsigned long value);
141 /*
142 * Update specifics of the guest state:
143 * 1) TS bit in guest cr0
144 * 2) TSC offset in guest
145 */
146 void (*stts)(struct vcpu *v);
147 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
149 void (*inject_exception)(unsigned int trapnr, int errcode,
150 unsigned long cr2);
152 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
153 int vcpuid, int trampoline_vector);
155 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
157 int (*event_injection_faulted)(struct vcpu *v);
159 int (*cpu_up)(void);
160 void (*cpu_down)(void);
161 };
163 extern struct hvm_function_table hvm_funcs;
164 extern int hvm_enabled;
166 int hvm_domain_initialise(struct domain *d);
167 void hvm_domain_relinquish_resources(struct domain *d);
168 void hvm_domain_destroy(struct domain *d);
170 int hvm_vcpu_initialise(struct vcpu *v);
171 void hvm_vcpu_destroy(struct vcpu *v);
172 void hvm_vcpu_reset(struct vcpu *vcpu);
174 void hvm_send_assist_req(struct vcpu *v);
176 static inline void
177 hvm_store_cpu_guest_regs(
178 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
179 {
180 hvm_funcs.store_cpu_guest_regs(v, r, crs);
181 }
183 static inline void
184 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
185 {
186 hvm_funcs.load_cpu_guest_regs(v, r);
187 }
189 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
190 u64 hvm_get_guest_time(struct vcpu *v);
192 static inline int
193 hvm_paging_enabled(struct vcpu *v)
194 {
195 return hvm_funcs.paging_enabled(v);
196 }
198 #ifdef __x86_64__
199 static inline int
200 hvm_long_mode_enabled(struct vcpu *v)
201 {
202 return hvm_funcs.long_mode_enabled(v);
203 }
204 #else
205 #define hvm_long_mode_enabled(v) (v,0)
206 #endif
208 static inline int
209 hvm_pae_enabled(struct vcpu *v)
210 {
211 return hvm_funcs.pae_enabled(v);
212 }
214 static inline int
215 hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
216 {
217 return hvm_funcs.interrupts_enabled(v, type);
218 }
220 static inline int
221 hvm_nx_enabled(struct vcpu *v)
222 {
223 return hvm_funcs.nx_enabled(v);
224 }
226 static inline int
227 hvm_guest_x86_mode(struct vcpu *v)
228 {
229 return hvm_funcs.guest_x86_mode(v);
230 }
232 int hvm_instruction_length(unsigned long pc, int address_bytes);
234 static inline void
235 hvm_update_host_cr3(struct vcpu *v)
236 {
237 hvm_funcs.update_host_cr3(v);
238 }
240 static inline void
241 hvm_update_vtpr(struct vcpu *v, unsigned long value)
242 {
243 hvm_funcs.update_vtpr(v, value);
244 }
246 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
248 static inline void
249 hvm_flush_guest_tlbs(void)
250 {
251 if ( hvm_enabled )
252 hvm_funcs.flush_guest_tlbs();
253 }
255 void hvm_hypercall_page_initialise(struct domain *d,
256 void *hypercall_page);
258 static inline unsigned long
259 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
260 {
261 return hvm_funcs.get_guest_ctrl_reg(v, num);
262 }
264 static inline unsigned long
265 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
266 {
267 return hvm_funcs.get_segment_base(v, seg);
268 }
270 static inline void
271 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
272 struct segment_register *reg)
273 {
274 hvm_funcs.get_segment_register(v, seg, reg);
275 }
277 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
278 unsigned int *ecx, unsigned int *edx);
279 void hvm_stts(struct vcpu *v);
280 void hvm_migrate_timers(struct vcpu *v);
281 void hvm_do_resume(struct vcpu *v);
283 static inline void
284 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
285 int vcpuid, int trampoline_vector)
286 {
287 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
288 }
290 static inline void
291 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
292 {
293 hvm_funcs.inject_exception(trapnr, errcode, cr2);
294 }
296 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
298 static inline int hvm_event_injection_faulted(struct vcpu *v)
299 {
300 return hvm_funcs.event_injection_faulted(v);
301 }
303 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
304 #define HVM_CR0_GUEST_RESERVED_BITS \
305 (~((unsigned long) \
306 (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \
307 X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \
308 X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \
309 X86_CR0_CD | X86_CR0_PG)))
311 /* These bits in CR4 are owned by the host. */
312 #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
313 (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
315 /* These bits in CR4 cannot be set by the guest. */
316 #define HVM_CR4_GUEST_RESERVED_BITS \
317 (~((unsigned long) \
318 (X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \
319 X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
320 X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
321 X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT)))
323 /* These exceptions must always be intercepted. */
324 #define HVM_TRAP_MASK (1U << TRAP_machine_check)
326 /*
327 * x86 event types. This enumeration is valid for:
328 * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
329 * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
330 */
331 #define X86_EVENTTYPE_EXT_INTR 0 /* external interrupt */
332 #define X86_EVENTTYPE_NMI 2 /* NMI */
333 #define X86_EVENTTYPE_HW_EXCEPTION 3 /* hardware exception */
334 #define X86_EVENTTYPE_SW_INTERRUPT 4 /* software interrupt */
335 #define X86_EVENTTYPE_SW_EXCEPTION 6 /* software exception */
337 static inline int hvm_cpu_up(void)
338 {
339 if ( hvm_funcs.cpu_up )
340 return hvm_funcs.cpu_up();
341 return 1;
342 }
344 static inline void hvm_cpu_down(void)
345 {
346 if ( hvm_funcs.cpu_down )
347 hvm_funcs.cpu_down();
348 }
350 #endif /* __ASM_X86_HVM_HVM_H__ */