ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 19825:81edfffb3aff

Scaling guest's TSC when the target machine's frequency is different
with its requirement.

Using trap&emulate for guest's each rdtsc instruction first, maybe it
can be optimized later.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jun 24 11:05:22 2009 +0100 (2009-06-24)
parents 5b73fa1b9562
children
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/current.h>
25 #include <asm/x86_emulate.h>
26 #include <public/domctl.h>
27 #include <public/hvm/save.h>
29 /* Interrupt acknowledgement sources. */
30 enum hvm_intsrc {
31 hvm_intsrc_none,
32 hvm_intsrc_pic,
33 hvm_intsrc_lapic,
34 hvm_intsrc_nmi
35 };
36 struct hvm_intack {
37 uint8_t source; /* enum hvm_intsrc */
38 uint8_t vector;
39 };
40 #define hvm_intack_none ( (struct hvm_intack) { hvm_intsrc_none, 0 } )
41 #define hvm_intack_pic(vec) ( (struct hvm_intack) { hvm_intsrc_pic, vec } )
42 #define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
43 #define hvm_intack_nmi ( (struct hvm_intack) { hvm_intsrc_nmi, 2 } )
44 enum hvm_intblk {
45 hvm_intblk_none, /* not blocked (deliverable) */
46 hvm_intblk_shadow, /* MOV-SS or STI shadow */
47 hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
48 hvm_intblk_tpr, /* LAPIC TPR too high */
49 hvm_intblk_nmi_iret /* NMI blocked until IRET */
50 };
52 /* These happen to be the same as the VMX interrupt shadow definitions. */
53 #define HVM_INTR_SHADOW_STI 0x00000001
54 #define HVM_INTR_SHADOW_MOV_SS 0x00000002
55 #define HVM_INTR_SHADOW_SMI 0x00000004
56 #define HVM_INTR_SHADOW_NMI 0x00000008
58 /*
59 * The hardware virtual machine (HVM) interface abstracts away from the
60 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
61 * supports Intel's VT-x and AMD's SVM extensions.
62 */
63 struct hvm_function_table {
64 char *name;
66 /* Support Hardware-Assisted Paging? */
67 int hap_supported;
69 /*
70 * Initialise/destroy HVM domain/vcpu resources
71 */
72 int (*domain_initialise)(struct domain *d);
73 void (*domain_destroy)(struct domain *d);
74 int (*vcpu_initialise)(struct vcpu *v);
75 void (*vcpu_destroy)(struct vcpu *v);
77 /* save and load hvm guest cpu context for save/restore */
78 void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
79 int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
81 /* Examine specifics of the guest state. */
82 unsigned int (*get_interrupt_shadow)(struct vcpu *v);
83 void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
84 int (*guest_x86_mode)(struct vcpu *v);
85 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
86 struct segment_register *reg);
87 void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
88 struct segment_register *reg);
90 /*
91 * Re-set the value of CR3 that Xen runs on when handling VM exits.
92 */
93 void (*update_host_cr3)(struct vcpu *v);
95 /*
96 * Called to inform HVM layer that a guest CRn or EFER has changed.
97 */
98 void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
99 void (*update_guest_efer)(struct vcpu *v);
101 /*
102 * Called to ensure than all guest-specific mappings in a tagged TLB
103 * are flushed; does *not* flush Xen's TLB entries, and on
104 * processors without a tagged TLB it will be a noop.
105 */
106 void (*flush_guest_tlbs)(void);
108 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
110 void (*inject_exception)(unsigned int trapnr, int errcode,
111 unsigned long cr2);
113 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
115 int (*event_pending)(struct vcpu *v);
116 int (*do_pmu_interrupt)(struct cpu_user_regs *regs);
118 int (*cpu_up)(void);
119 void (*cpu_down)(void);
121 /* Instruction intercepts: non-void return values are X86EMUL codes. */
122 void (*cpuid_intercept)(
123 unsigned int *eax, unsigned int *ebx,
124 unsigned int *ecx, unsigned int *edx);
125 void (*wbinvd_intercept)(void);
126 void (*fpu_dirty_intercept)(void);
127 int (*msr_read_intercept)(struct cpu_user_regs *regs);
128 int (*msr_write_intercept)(struct cpu_user_regs *regs);
129 void (*invlpg_intercept)(unsigned long vaddr);
130 void (*set_uc_mode)(struct vcpu *v);
131 void (*set_info_guest)(struct vcpu *v);
132 void (*enable_rdtsc_exiting)(struct vcpu *v);
133 };
135 extern struct hvm_function_table hvm_funcs;
136 extern int hvm_enabled;
138 int hvm_domain_initialise(struct domain *d);
139 void hvm_domain_relinquish_resources(struct domain *d);
140 void hvm_domain_destroy(struct domain *d);
142 int hvm_vcpu_initialise(struct vcpu *v);
143 void hvm_vcpu_destroy(struct vcpu *v);
144 void hvm_vcpu_down(struct vcpu *v);
145 int hvm_vcpu_cacheattr_init(struct vcpu *v);
146 void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
147 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
149 void hvm_send_assist_req(struct vcpu *v);
151 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc);
152 u64 hvm_get_guest_tsc(struct vcpu *v);
154 void hvm_init_guest_time(struct domain *d);
155 void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
156 u64 hvm_get_guest_time(struct vcpu *v);
158 #define hvm_paging_enabled(v) \
159 (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
160 #define hvm_wp_enabled(v) \
161 (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
162 #define hvm_pae_enabled(v) \
163 (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
164 #define hvm_nx_enabled(v) \
165 (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
167 #ifdef __x86_64__
168 #define hvm_long_mode_enabled(v) \
169 ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
170 #else
171 #define hvm_long_mode_enabled(v) (v,0)
172 #endif
174 enum hvm_intblk
175 hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
177 static inline int
178 hvm_guest_x86_mode(struct vcpu *v)
179 {
180 ASSERT(v == current);
181 return hvm_funcs.guest_x86_mode(v);
182 }
184 static inline void
185 hvm_update_host_cr3(struct vcpu *v)
186 {
187 hvm_funcs.update_host_cr3(v);
188 }
190 static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
191 {
192 hvm_funcs.update_guest_cr(v, cr);
193 }
195 static inline void hvm_update_guest_efer(struct vcpu *v)
196 {
197 hvm_funcs.update_guest_efer(v);
198 }
200 static inline void
201 hvm_flush_guest_tlbs(void)
202 {
203 if ( hvm_enabled )
204 hvm_funcs.flush_guest_tlbs();
205 }
207 void hvm_hypercall_page_initialise(struct domain *d,
208 void *hypercall_page);
210 static inline void
211 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
212 struct segment_register *reg)
213 {
214 hvm_funcs.get_segment_register(v, seg, reg);
215 }
217 static inline void
218 hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
219 struct segment_register *reg)
220 {
221 hvm_funcs.set_segment_register(v, seg, reg);
222 }
224 #define is_viridian_domain(_d) \
225 (is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN]))
227 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
228 unsigned int *ecx, unsigned int *edx);
229 void hvm_migrate_timers(struct vcpu *v);
230 void hvm_do_resume(struct vcpu *v);
232 static inline void
233 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
234 {
235 hvm_funcs.inject_exception(trapnr, errcode, cr2);
236 }
238 static inline int hvm_event_pending(struct vcpu *v)
239 {
240 return hvm_funcs.event_pending(v);
241 }
243 static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
244 {
245 return hvm_funcs.do_pmu_interrupt(regs);
246 }
248 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
249 #define HVM_CR0_GUEST_RESERVED_BITS \
250 (~((unsigned long) \
251 (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \
252 X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \
253 X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \
254 X86_CR0_CD | X86_CR0_PG)))
256 /* These bits in CR4 are owned by the host. */
257 #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
258 (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
260 /* These bits in CR4 cannot be set by the guest. */
261 #define HVM_CR4_GUEST_RESERVED_BITS \
262 (~((unsigned long) \
263 (X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \
264 X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
265 X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
266 X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT)))
268 /* These exceptions must always be intercepted. */
269 #define HVM_TRAP_MASK (1U << TRAP_machine_check)
271 /*
272 * x86 event types. This enumeration is valid for:
273 * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
274 * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
275 */
276 #define X86_EVENTTYPE_EXT_INTR 0 /* external interrupt */
277 #define X86_EVENTTYPE_NMI 2 /* NMI */
278 #define X86_EVENTTYPE_HW_EXCEPTION 3 /* hardware exception */
279 #define X86_EVENTTYPE_SW_INTERRUPT 4 /* software interrupt */
280 #define X86_EVENTTYPE_SW_EXCEPTION 6 /* software exception */
282 int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
284 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
286 void hvm_enable_rdtsc_exiting(struct domain *d);
287 int hvm_gtsc_need_scale(struct domain *d);
289 static inline int hvm_cpu_up(void)
290 {
291 if ( hvm_funcs.cpu_up )
292 return hvm_funcs.cpu_up();
293 return 1;
294 }
296 static inline void hvm_cpu_down(void)
297 {
298 if ( hvm_funcs.cpu_down )
299 hvm_funcs.cpu_down();
300 }
302 enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
303 void hvm_task_switch(
304 uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
305 int32_t errcode);
307 enum hvm_access_type {
308 hvm_access_insn_fetch,
309 hvm_access_none,
310 hvm_access_read,
311 hvm_access_write
312 };
313 int hvm_virtual_to_linear_addr(
314 enum x86_segment seg,
315 struct segment_register *reg,
316 unsigned long offset,
317 unsigned int bytes,
318 enum hvm_access_type access_type,
319 unsigned int addr_size,
320 unsigned long *linear_addr);
322 static inline void hvm_set_info_guest(struct vcpu *v)
323 {
324 if ( hvm_funcs.set_info_guest )
325 return hvm_funcs.set_info_guest(v);
326 }
328 int hvm_debug_op(struct vcpu *v, int32_t op);
330 #endif /* __ASM_X86_HVM_HVM_H__ */