ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 16620:966a6d3b7408

SVM: Treat the vlapic's tpr as the master copy and sync the vtpr to it
before every vm entry. This fixes HVM save/restore/migrate, as the
vtpr value was only being synced on guest TPR writes before.

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Dec 14 11:50:24 2007 +0000 (2007-12-14)
parents 98e9485d8fcf
children d9ab9eb2bfee
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/current.h>
25 #include <asm/x86_emulate.h>
26 #include <public/domctl.h>
27 #include <public/hvm/save.h>
29 /* Interrupt acknowledgement sources. */
30 enum hvm_intsrc {
31 hvm_intsrc_none,
32 hvm_intsrc_pic,
33 hvm_intsrc_lapic,
34 hvm_intsrc_nmi
35 };
36 struct hvm_intack {
37 uint8_t source; /* enum hvm_intsrc */
38 uint8_t vector;
39 };
40 #define hvm_intack_none ( (struct hvm_intack) { hvm_intsrc_none, 0 } )
41 #define hvm_intack_pic(vec) ( (struct hvm_intack) { hvm_intsrc_pic, vec } )
42 #define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
43 #define hvm_intack_nmi ( (struct hvm_intack) { hvm_intsrc_nmi, 2 } )
44 enum hvm_intblk {
45 hvm_intblk_none, /* not blocked (deliverable) */
46 hvm_intblk_shadow, /* MOV-SS or STI shadow */
47 hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
48 hvm_intblk_tpr, /* LAPIC TPR too high */
49 hvm_intblk_nmi_iret /* NMI blocked until IRET */
50 };
52 /*
53 * The hardware virtual machine (HVM) interface abstracts away from the
54 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
55 * supports Intel's VT-x and AMD's SVM extensions.
56 */
57 struct hvm_function_table {
58 char *name;
60 /* Support Hardware-Assisted Paging? */
61 int hap_supported;
63 /*
64 * Initialise/destroy HVM domain/vcpu resources
65 */
66 int (*domain_initialise)(struct domain *d);
67 void (*domain_destroy)(struct domain *d);
68 int (*vcpu_initialise)(struct vcpu *v);
69 void (*vcpu_destroy)(struct vcpu *v);
71 /* save and load hvm guest cpu context for save/restore */
72 void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
73 int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
75 /*
76 * Examine specifics of the guest state:
77 * 1) determine whether interrupts are enabled or not
78 * 2) determine the mode the guest is running in
79 * 3) return the current guest segment descriptor base
80 * 4) return the current guest segment descriptor
81 */
82 enum hvm_intblk (*interrupt_blocked)(struct vcpu *v, struct hvm_intack);
83 int (*guest_x86_mode)(struct vcpu *v);
84 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
85 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
86 struct segment_register *reg);
87 void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
88 struct segment_register *reg);
90 /*
91 * Re-set the value of CR3 that Xen runs on when handling VM exits.
92 */
93 void (*update_host_cr3)(struct vcpu *v);
95 /*
96 * Called to inform HVM layer that a guest CRn or EFER has changed.
97 */
98 void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
99 void (*update_guest_efer)(struct vcpu *v);
101 /*
102 * Called to ensure than all guest-specific mappings in a tagged TLB
103 * are flushed; does *not* flush Xen's TLB entries, and on
104 * processors without a tagged TLB it will be a noop.
105 */
106 void (*flush_guest_tlbs)(void);
108 /*
109 * Update specifics of the guest state:
110 * 1) TS bit in guest cr0
111 * 2) TSC offset in guest
112 */
113 void (*stts)(struct vcpu *v);
114 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
116 void (*inject_exception)(unsigned int trapnr, int errcode,
117 unsigned long cr2);
119 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
121 int (*event_pending)(struct vcpu *v);
123 int (*cpu_up)(void);
124 void (*cpu_down)(void);
125 };
127 extern struct hvm_function_table hvm_funcs;
128 extern int hvm_enabled;
130 int hvm_domain_initialise(struct domain *d);
131 void hvm_domain_relinquish_resources(struct domain *d);
132 void hvm_domain_destroy(struct domain *d);
134 int hvm_vcpu_initialise(struct vcpu *v);
135 void hvm_vcpu_destroy(struct vcpu *v);
136 void hvm_vcpu_reset(struct vcpu *vcpu);
138 void hvm_send_assist_req(struct vcpu *v);
140 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc);
141 u64 hvm_get_guest_tsc(struct vcpu *v);
142 #define hvm_set_guest_time(vcpu, gtime) hvm_set_guest_tsc(vcpu, gtime)
143 #define hvm_get_guest_time(vcpu) hvm_get_guest_tsc(vcpu)
145 #define hvm_paging_enabled(v) \
146 (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
147 #define hvm_pae_enabled(v) \
148 (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
149 #define hvm_nx_enabled(v) \
150 (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
152 #ifdef __x86_64__
153 #define hvm_long_mode_enabled(v) \
154 ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
155 #else
156 #define hvm_long_mode_enabled(v) (v,0)
157 #endif
159 enum hvm_intblk
160 hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
162 static inline int
163 hvm_guest_x86_mode(struct vcpu *v)
164 {
165 ASSERT(v == current);
166 return hvm_funcs.guest_x86_mode(v);
167 }
169 int hvm_instruction_fetch(unsigned long pc, int address_bytes,
170 unsigned char *buf);
172 static inline void
173 hvm_update_host_cr3(struct vcpu *v)
174 {
175 hvm_funcs.update_host_cr3(v);
176 }
178 static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
179 {
180 hvm_funcs.update_guest_cr(v, cr);
181 }
183 static inline void hvm_update_guest_efer(struct vcpu *v)
184 {
185 hvm_funcs.update_guest_efer(v);
186 }
188 static inline void
189 hvm_flush_guest_tlbs(void)
190 {
191 if ( hvm_enabled )
192 hvm_funcs.flush_guest_tlbs();
193 }
195 void hvm_hypercall_page_initialise(struct domain *d,
196 void *hypercall_page);
198 static inline unsigned long
199 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
200 {
201 return hvm_funcs.get_segment_base(v, seg);
202 }
204 static inline void
205 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
206 struct segment_register *reg)
207 {
208 hvm_funcs.get_segment_register(v, seg, reg);
209 }
211 static inline void
212 hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
213 struct segment_register *reg)
214 {
215 hvm_funcs.set_segment_register(v, seg, reg);
216 }
218 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
219 unsigned int *ecx, unsigned int *edx);
220 void hvm_migrate_timers(struct vcpu *v);
221 void hvm_do_resume(struct vcpu *v);
223 static inline void
224 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
225 {
226 hvm_funcs.inject_exception(trapnr, errcode, cr2);
227 }
229 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
231 static inline int hvm_event_pending(struct vcpu *v)
232 {
233 return hvm_funcs.event_pending(v);
234 }
236 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
237 #define HVM_CR0_GUEST_RESERVED_BITS \
238 (~((unsigned long) \
239 (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \
240 X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \
241 X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \
242 X86_CR0_CD | X86_CR0_PG)))
244 /* These bits in CR4 are owned by the host. */
245 #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
246 (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
248 /* These bits in CR4 cannot be set by the guest. */
249 #define HVM_CR4_GUEST_RESERVED_BITS \
250 (~((unsigned long) \
251 (X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \
252 X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
253 X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
254 X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT)))
256 /* These exceptions must always be intercepted. */
257 #define HVM_TRAP_MASK (1U << TRAP_machine_check)
259 /*
260 * x86 event types. This enumeration is valid for:
261 * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
262 * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
263 */
264 #define X86_EVENTTYPE_EXT_INTR 0 /* external interrupt */
265 #define X86_EVENTTYPE_NMI 2 /* NMI */
266 #define X86_EVENTTYPE_HW_EXCEPTION 3 /* hardware exception */
267 #define X86_EVENTTYPE_SW_INTERRUPT 4 /* software interrupt */
268 #define X86_EVENTTYPE_SW_EXCEPTION 6 /* software exception */
270 /*
271 * Need to re-inject a given event? We avoid re-injecting software exceptions
272 * and interrupts because the faulting/trapping instruction can simply be
273 * re-executed (neither VMX nor SVM update RIP when they VMEXIT during
274 * INT3/INTO/INTn).
275 */
276 static inline int hvm_event_needs_reinjection(uint8_t type, uint8_t vector)
277 {
278 switch ( type )
279 {
280 case X86_EVENTTYPE_EXT_INTR:
281 case X86_EVENTTYPE_NMI:
282 return 1;
283 case X86_EVENTTYPE_HW_EXCEPTION:
284 /*
285 * SVM uses type 3 ("HW Exception") for #OF and #BP. We explicitly
286 * check for these vectors, as they are really SW Exceptions. SVM has
287 * not updated RIP to point after the trapping instruction (INT3/INTO).
288 */
289 return (vector != 3) && (vector != 4);
290 default:
291 /* Software exceptions/interrupts can be re-executed (e.g., INT n). */
292 break;
293 }
294 return 0;
295 }
297 static inline int hvm_cpu_up(void)
298 {
299 if ( hvm_funcs.cpu_up )
300 return hvm_funcs.cpu_up();
301 return 1;
302 }
304 static inline void hvm_cpu_down(void)
305 {
306 if ( hvm_funcs.cpu_down )
307 hvm_funcs.cpu_down();
308 }
310 enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
311 void hvm_task_switch(
312 uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
313 int32_t errcode);
315 enum hvm_access_type {
316 hvm_access_insn_fetch, hvm_access_read, hvm_access_write
317 };
318 int hvm_virtual_to_linear_addr(
319 enum x86_segment seg,
320 struct segment_register *reg,
321 unsigned long offset,
322 unsigned int bytes,
323 enum hvm_access_type access_type,
324 unsigned int addr_size,
325 unsigned long *linear_addr);
327 #endif /* __ASM_X86_HVM_HVM_H__ */