direct-io.hg

view xen/include/asm-x86/hvm/hvm.h @ 15388:50358c4b37f4

hvm: Support injection of virtual NMIs and clean up ExtInt handling in general.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jun 20 11:50:16 2007 +0100 (2007-06-20)
parents 739d698986e9
children 3cf5052ba5e5
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/x86_emulate.h>
25 #include <public/domctl.h>
26 #include <public/hvm/save.h>
28 /*
29 * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
30 * segment descriptor. It happens to match the format of an AMD SVM VMCB.
31 */
32 typedef union segment_attributes {
33 u16 bytes;
34 struct
35 {
36 u16 type:4; /* 0; Bit 40-43 */
37 u16 s: 1; /* 4; Bit 44 */
38 u16 dpl: 2; /* 5; Bit 45-46 */
39 u16 p: 1; /* 7; Bit 47 */
40 u16 avl: 1; /* 8; Bit 52 */
41 u16 l: 1; /* 9; Bit 53 */
42 u16 db: 1; /* 10; Bit 54 */
43 u16 g: 1; /* 11; Bit 55 */
44 } fields;
45 } __attribute__ ((packed)) segment_attributes_t;
47 /*
48 * Full state of a segment register (visible and hidden portions).
49 * Again, this happens to match the format of an AMD SVM VMCB.
50 */
51 typedef struct segment_register {
52 u16 sel;
53 segment_attributes_t attr;
54 u32 limit;
55 u64 base;
56 } __attribute__ ((packed)) segment_register_t;
58 /* Interrupt acknowledgement sources. */
59 enum hvm_intack {
60 hvm_intack_none,
61 hvm_intack_pic,
62 hvm_intack_lapic,
63 hvm_intack_nmi
64 };
66 /*
67 * The hardware virtual machine (HVM) interface abstracts away from the
68 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
69 * supports Intel's VT-x and AMD's SVM extensions.
70 */
71 struct hvm_function_table {
72 char *name;
74 /*
75 * Disable HVM functionality
76 */
77 void (*disable)(void);
79 /*
80 * Initialise/destroy HVM domain/vcpu resources
81 */
82 int (*domain_initialise)(struct domain *d);
83 void (*domain_destroy)(struct domain *d);
84 int (*vcpu_initialise)(struct vcpu *v);
85 void (*vcpu_destroy)(struct vcpu *v);
87 /*
88 * Store and load guest state:
89 * 1) load/store guest register state,
90 * 2) modify guest state (e.g., set debug flags).
91 */
92 void (*store_cpu_guest_regs)(
93 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
94 void (*load_cpu_guest_regs)(
95 struct vcpu *v, struct cpu_user_regs *r);
97 /* save and load hvm guest cpu context for save/restore */
98 void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
99 int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
101 /*
102 * Examine specifics of the guest state:
103 * 1) determine whether paging is enabled,
104 * 2) determine whether long mode is enabled,
105 * 3) determine whether PAE paging is enabled,
106 * 4) determine whether NX is enabled,
107 * 5) determine whether interrupts are enabled or not,
108 * 6) determine the mode the guest is running in,
109 * 7) return the current guest control-register value
110 * 8) return the current guest segment descriptor base
111 * 9) return the current guest segment descriptor
112 */
113 int (*paging_enabled)(struct vcpu *v);
114 int (*long_mode_enabled)(struct vcpu *v);
115 int (*pae_enabled)(struct vcpu *v);
116 int (*nx_enabled)(struct vcpu *v);
117 int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
118 int (*guest_x86_mode)(struct vcpu *v);
119 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
120 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
121 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
122 struct segment_register *reg);
124 /*
125 * Re-set the value of CR3 that Xen runs on when handling VM exits
126 */
127 void (*update_host_cr3)(struct vcpu *v);
129 /*
130 * Called to inform HVM layer that a guest cr3 has changed
131 */
132 void (*update_guest_cr3)(struct vcpu *v);
134 /*
135 * Called to ensure than all guest-specific mappings in a tagged TLB
136 * are flushed; does *not* flush Xen's TLB entries, and on
137 * processors without a tagged TLB it will be a noop.
138 */
139 void (*flush_guest_tlbs)(void);
141 /*
142 * Reflect the virtual APIC's value in the guest's V_TPR register
143 */
144 void (*update_vtpr)(struct vcpu *v, unsigned long value);
146 /*
147 * Update specifics of the guest state:
148 * 1) TS bit in guest cr0
149 * 2) TSC offset in guest
150 */
151 void (*stts)(struct vcpu *v);
152 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
154 void (*inject_exception)(unsigned int trapnr, int errcode,
155 unsigned long cr2);
157 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
158 int vcpuid, int trampoline_vector);
160 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
162 int (*event_injection_faulted)(struct vcpu *v);
163 };
165 extern struct hvm_function_table hvm_funcs;
166 extern int hvm_enabled;
168 int hvm_domain_initialise(struct domain *d);
169 void hvm_domain_relinquish_resources(struct domain *d);
170 void hvm_domain_destroy(struct domain *d);
172 int hvm_vcpu_initialise(struct vcpu *v);
173 void hvm_vcpu_destroy(struct vcpu *v);
174 void hvm_vcpu_reset(struct vcpu *vcpu);
176 void hvm_send_assist_req(struct vcpu *v);
178 static inline void
179 hvm_store_cpu_guest_regs(
180 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
181 {
182 hvm_funcs.store_cpu_guest_regs(v, r, crs);
183 }
185 static inline void
186 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
187 {
188 hvm_funcs.load_cpu_guest_regs(v, r);
189 }
191 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
192 u64 hvm_get_guest_time(struct vcpu *v);
194 static inline int
195 hvm_paging_enabled(struct vcpu *v)
196 {
197 return hvm_funcs.paging_enabled(v);
198 }
200 #ifdef __x86_64__
201 static inline int
202 hvm_long_mode_enabled(struct vcpu *v)
203 {
204 return hvm_funcs.long_mode_enabled(v);
205 }
206 #else
207 #define hvm_long_mode_enabled(v) (v,0)
208 #endif
210 static inline int
211 hvm_pae_enabled(struct vcpu *v)
212 {
213 return hvm_funcs.pae_enabled(v);
214 }
216 static inline int
217 hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
218 {
219 return hvm_funcs.interrupts_enabled(v, type);
220 }
222 static inline int
223 hvm_nx_enabled(struct vcpu *v)
224 {
225 return hvm_funcs.nx_enabled(v);
226 }
228 static inline int
229 hvm_guest_x86_mode(struct vcpu *v)
230 {
231 return hvm_funcs.guest_x86_mode(v);
232 }
234 int hvm_instruction_length(unsigned long pc, int address_bytes);
236 static inline void
237 hvm_update_host_cr3(struct vcpu *v)
238 {
239 hvm_funcs.update_host_cr3(v);
240 }
242 static inline void
243 hvm_update_vtpr(struct vcpu *v, unsigned long value)
244 {
245 hvm_funcs.update_vtpr(v, value);
246 }
248 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
250 static inline void
251 hvm_flush_guest_tlbs(void)
252 {
253 if ( hvm_enabled )
254 hvm_funcs.flush_guest_tlbs();
255 }
257 void hvm_hypercall_page_initialise(struct domain *d,
258 void *hypercall_page);
260 static inline unsigned long
261 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
262 {
263 return hvm_funcs.get_guest_ctrl_reg(v, num);
264 }
266 static inline unsigned long
267 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
268 {
269 return hvm_funcs.get_segment_base(v, seg);
270 }
272 static inline void
273 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
274 struct segment_register *reg)
275 {
276 hvm_funcs.get_segment_register(v, seg, reg);
277 }
279 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
280 unsigned int *ecx, unsigned int *edx);
281 void hvm_stts(struct vcpu *v);
282 void hvm_migrate_timers(struct vcpu *v);
283 void hvm_do_resume(struct vcpu *v);
285 static inline void
286 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
287 int vcpuid, int trampoline_vector)
288 {
289 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
290 }
292 static inline void
293 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
294 {
295 hvm_funcs.inject_exception(trapnr, errcode, cr2);
296 }
298 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
300 static inline int hvm_event_injection_faulted(struct vcpu *v)
301 {
302 return hvm_funcs.event_injection_faulted(v);
303 }
305 #endif /* __ASM_X86_HVM_HVM_H__ */