ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 14086:e8470a1a01af

hvm: Rename injection_pending() to event_injection_faulted().
Fix the VMX and SVM handlers to reflect the new semantics (which is
what is actually required by the one caller, in shadow fault path).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Feb 23 10:35:16 2007 +0000 (2007-02-23)
parents 3f7e8c763b55
children cdc765772f69
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/x86_emulate.h>
25 #include <public/domctl.h>
26 #include <public/hvm/save.h>
28 /*
29 * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
30 * segment descriptor. It happens to match the format of an AMD SVM VMCB.
31 */
32 typedef union segment_attributes {
33 u16 bytes;
34 struct
35 {
36 u16 type:4; /* 0; Bit 40-43 */
37 u16 s: 1; /* 4; Bit 44 */
38 u16 dpl: 2; /* 5; Bit 45-46 */
39 u16 p: 1; /* 7; Bit 47 */
40 u16 avl: 1; /* 8; Bit 52 */
41 u16 l: 1; /* 9; Bit 53 */
42 u16 db: 1; /* 10; Bit 54 */
43 u16 g: 1; /* 11; Bit 55 */
44 } fields;
45 } __attribute__ ((packed)) segment_attributes_t;
47 /*
48 * Full state of a segment register (visible and hidden portions).
49 * Again, this happens to match the format of an AMD SVM VMCB.
50 */
51 typedef struct segment_register {
52 u16 sel;
53 segment_attributes_t attr;
54 u32 limit;
55 u64 base;
56 } __attribute__ ((packed)) segment_register_t;
58 /*
59 * The hardware virtual machine (HVM) interface abstracts away from the
60 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
61 * supports Intel's VT-x and AMD's SVM extensions.
62 */
63 struct hvm_function_table {
64 /*
65 * Disable HVM functionality
66 */
67 void (*disable)(void);
69 /*
70 * Initialise/destroy HVM VCPU resources
71 */
72 int (*vcpu_initialise)(struct vcpu *v);
73 void (*vcpu_destroy)(struct vcpu *v);
75 /*
76 * Store and load guest state:
77 * 1) load/store guest register state,
78 * 2) modify guest state (e.g., set debug flags).
79 */
80 void (*store_cpu_guest_regs)(
81 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
82 void (*load_cpu_guest_regs)(
83 struct vcpu *v, struct cpu_user_regs *r);
85 /* save and load hvm guest cpu context for save/restore */
86 void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
87 int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
89 /*
90 * Examine specifics of the guest state:
91 * 1) determine whether paging is enabled,
92 * 2) determine whether long mode is enabled,
93 * 3) determine whether PAE paging is enabled,
94 * 4) determine the mode the guest is running in,
95 * 5) return the current guest control-register value
96 * 6) return the current guest segment descriptor base
97 */
98 int (*paging_enabled)(struct vcpu *v);
99 int (*long_mode_enabled)(struct vcpu *v);
100 int (*pae_enabled)(struct vcpu *v);
101 int (*guest_x86_mode)(struct vcpu *v);
102 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
103 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
104 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
105 struct segment_register *reg);
107 /*
108 * Re-set the value of CR3 that Xen runs on when handling VM exits
109 */
110 void (*update_host_cr3)(struct vcpu *v);
112 /*
113 * Called to inform HVM layer that a guest cr3 has changed
114 */
115 void (*update_guest_cr3)(struct vcpu *v);
117 /*
118 * Reflect the virtual APIC's value in the guest's V_TPR register
119 */
120 void (*update_vtpr)(struct vcpu *v, unsigned long value);
122 /*
123 * Update specifics of the guest state:
124 * 1) TS bit in guest cr0
125 * 2) TSC offset in guest
126 */
127 void (*stts)(struct vcpu *v);
128 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
130 void (*inject_exception)(unsigned int trapnr, int errcode,
131 unsigned long cr2);
133 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
134 int vcpuid, int trampoline_vector);
136 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
138 int (*event_injection_faulted)(struct vcpu *v);
139 };
141 extern struct hvm_function_table hvm_funcs;
143 /*
144 * For convenience, we use short hands.
145 */
146 static inline void
147 hvm_disable(void)
148 {
149 if ( hvm_funcs.disable )
150 hvm_funcs.disable();
151 }
153 int hvm_domain_initialise(struct domain *d);
154 void hvm_domain_destroy(struct domain *d);
156 int hvm_vcpu_initialise(struct vcpu *v);
157 void hvm_vcpu_destroy(struct vcpu *v);
158 void hvm_vcpu_reset(struct vcpu *vcpu);
160 void hvm_send_assist_req(struct vcpu *v);
162 static inline void
163 hvm_store_cpu_guest_regs(
164 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
165 {
166 hvm_funcs.store_cpu_guest_regs(v, r, crs);
167 }
169 static inline void
170 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
171 {
172 hvm_funcs.load_cpu_guest_regs(v, r);
173 }
175 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
176 u64 hvm_get_guest_time(struct vcpu *v);
178 static inline int
179 hvm_paging_enabled(struct vcpu *v)
180 {
181 return hvm_funcs.paging_enabled(v);
182 }
184 #ifdef __x86_64__
185 static inline int
186 hvm_long_mode_enabled(struct vcpu *v)
187 {
188 return hvm_funcs.long_mode_enabled(v);
189 }
190 #else
191 #define hvm_long_mode_enabled(v) (v,0)
192 #endif
194 static inline int
195 hvm_pae_enabled(struct vcpu *v)
196 {
197 return hvm_funcs.pae_enabled(v);
198 }
200 static inline int
201 hvm_guest_x86_mode(struct vcpu *v)
202 {
203 return hvm_funcs.guest_x86_mode(v);
204 }
206 int hvm_instruction_length(unsigned long pc, int address_bytes);
208 static inline void
209 hvm_update_host_cr3(struct vcpu *v)
210 {
211 hvm_funcs.update_host_cr3(v);
212 }
214 static inline void
215 hvm_update_vtpr(struct vcpu *v, unsigned long value)
216 {
217 hvm_funcs.update_vtpr(v, value);
218 }
220 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
222 void hvm_hypercall_page_initialise(struct domain *d,
223 void *hypercall_page);
225 static inline unsigned long
226 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
227 {
228 if ( hvm_funcs.get_guest_ctrl_reg )
229 return hvm_funcs.get_guest_ctrl_reg(v, num);
230 return 0; /* force to fail */
231 }
233 static inline unsigned long
234 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
235 {
236 return hvm_funcs.get_segment_base(v, seg);
237 }
239 static inline void
240 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
241 struct segment_register *reg)
242 {
243 hvm_funcs.get_segment_register(v, seg, reg);
244 }
246 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
247 unsigned int *ecx, unsigned int *edx);
248 void hvm_stts(struct vcpu *v);
249 void hvm_migrate_timers(struct vcpu *v);
250 void hvm_do_resume(struct vcpu *v);
252 static inline void
253 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
254 int vcpuid, int trampoline_vector)
255 {
256 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
257 }
259 static inline void
260 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
261 {
262 hvm_funcs.inject_exception(trapnr, errcode, cr2);
263 }
265 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
267 static inline int hvm_event_injection_faulted(struct vcpu *v)
268 {
269 return hvm_funcs.event_injection_faulted(v);
270 }
272 #endif /* __ASM_X86_HVM_HVM_H__ */