ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 14090:cdc765772f69

hvm: Clean up initialisation of hvm_funcs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Feb 23 11:32:25 2007 +0000 (2007-02-23)
parents e8470a1a01af
children d39dcdb9cca3
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/x86_emulate.h>
25 #include <public/domctl.h>
26 #include <public/hvm/save.h>
28 /*
29 * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
30 * segment descriptor. It happens to match the format of an AMD SVM VMCB.
31 */
32 typedef union segment_attributes {
33 u16 bytes;
34 struct
35 {
36 u16 type:4; /* 0; Bit 40-43 */
37 u16 s: 1; /* 4; Bit 44 */
38 u16 dpl: 2; /* 5; Bit 45-46 */
39 u16 p: 1; /* 7; Bit 47 */
40 u16 avl: 1; /* 8; Bit 52 */
41 u16 l: 1; /* 9; Bit 53 */
42 u16 db: 1; /* 10; Bit 54 */
43 u16 g: 1; /* 11; Bit 55 */
44 } fields;
45 } __attribute__ ((packed)) segment_attributes_t;
47 /*
48 * Full state of a segment register (visible and hidden portions).
49 * Again, this happens to match the format of an AMD SVM VMCB.
50 */
51 typedef struct segment_register {
52 u16 sel;
53 segment_attributes_t attr;
54 u32 limit;
55 u64 base;
56 } __attribute__ ((packed)) segment_register_t;
58 /*
59 * The hardware virtual machine (HVM) interface abstracts away from the
60 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
61 * supports Intel's VT-x and AMD's SVM extensions.
62 */
63 struct hvm_function_table {
64 /*
65 * Disable HVM functionality
66 */
67 void (*disable)(void);
69 /*
70 * Initialise/destroy HVM VCPU resources
71 */
72 int (*vcpu_initialise)(struct vcpu *v);
73 void (*vcpu_destroy)(struct vcpu *v);
75 /*
76 * Store and load guest state:
77 * 1) load/store guest register state,
78 * 2) modify guest state (e.g., set debug flags).
79 */
80 void (*store_cpu_guest_regs)(
81 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
82 void (*load_cpu_guest_regs)(
83 struct vcpu *v, struct cpu_user_regs *r);
85 /* save and load hvm guest cpu context for save/restore */
86 void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
87 int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
89 /*
90 * Examine specifics of the guest state:
91 * 1) determine whether paging is enabled,
92 * 2) determine whether long mode is enabled,
93 * 3) determine whether PAE paging is enabled,
94 * 4) determine the mode the guest is running in,
95 * 5) return the current guest control-register value
96 * 6) return the current guest segment descriptor base
97 */
98 int (*paging_enabled)(struct vcpu *v);
99 int (*long_mode_enabled)(struct vcpu *v);
100 int (*pae_enabled)(struct vcpu *v);
101 int (*guest_x86_mode)(struct vcpu *v);
102 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
103 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
104 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
105 struct segment_register *reg);
107 /*
108 * Re-set the value of CR3 that Xen runs on when handling VM exits
109 */
110 void (*update_host_cr3)(struct vcpu *v);
112 /*
113 * Called to inform HVM layer that a guest cr3 has changed
114 */
115 void (*update_guest_cr3)(struct vcpu *v);
117 /*
118 * Reflect the virtual APIC's value in the guest's V_TPR register
119 */
120 void (*update_vtpr)(struct vcpu *v, unsigned long value);
122 /*
123 * Update specifics of the guest state:
124 * 1) TS bit in guest cr0
125 * 2) TSC offset in guest
126 */
127 void (*stts)(struct vcpu *v);
128 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
130 void (*inject_exception)(unsigned int trapnr, int errcode,
131 unsigned long cr2);
133 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
134 int vcpuid, int trampoline_vector);
136 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
138 int (*event_injection_faulted)(struct vcpu *v);
139 };
141 extern struct hvm_function_table hvm_funcs;
143 /*
144 * For convenience, we use short hands.
145 */
146 static inline void
147 hvm_disable(void)
148 {
149 hvm_funcs.disable();
150 }
152 int hvm_domain_initialise(struct domain *d);
153 void hvm_domain_destroy(struct domain *d);
155 int hvm_vcpu_initialise(struct vcpu *v);
156 void hvm_vcpu_destroy(struct vcpu *v);
157 void hvm_vcpu_reset(struct vcpu *vcpu);
159 void hvm_send_assist_req(struct vcpu *v);
161 static inline void
162 hvm_store_cpu_guest_regs(
163 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
164 {
165 hvm_funcs.store_cpu_guest_regs(v, r, crs);
166 }
168 static inline void
169 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
170 {
171 hvm_funcs.load_cpu_guest_regs(v, r);
172 }
174 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
175 u64 hvm_get_guest_time(struct vcpu *v);
177 static inline int
178 hvm_paging_enabled(struct vcpu *v)
179 {
180 return hvm_funcs.paging_enabled(v);
181 }
183 #ifdef __x86_64__
184 static inline int
185 hvm_long_mode_enabled(struct vcpu *v)
186 {
187 return hvm_funcs.long_mode_enabled(v);
188 }
189 #else
190 #define hvm_long_mode_enabled(v) (v,0)
191 #endif
193 static inline int
194 hvm_pae_enabled(struct vcpu *v)
195 {
196 return hvm_funcs.pae_enabled(v);
197 }
199 static inline int
200 hvm_guest_x86_mode(struct vcpu *v)
201 {
202 return hvm_funcs.guest_x86_mode(v);
203 }
205 int hvm_instruction_length(unsigned long pc, int address_bytes);
207 static inline void
208 hvm_update_host_cr3(struct vcpu *v)
209 {
210 hvm_funcs.update_host_cr3(v);
211 }
213 static inline void
214 hvm_update_vtpr(struct vcpu *v, unsigned long value)
215 {
216 hvm_funcs.update_vtpr(v, value);
217 }
219 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
221 void hvm_hypercall_page_initialise(struct domain *d,
222 void *hypercall_page);
224 static inline unsigned long
225 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
226 {
227 return hvm_funcs.get_guest_ctrl_reg(v, num);
228 }
230 static inline unsigned long
231 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
232 {
233 return hvm_funcs.get_segment_base(v, seg);
234 }
236 static inline void
237 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
238 struct segment_register *reg)
239 {
240 hvm_funcs.get_segment_register(v, seg, reg);
241 }
243 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
244 unsigned int *ecx, unsigned int *edx);
245 void hvm_stts(struct vcpu *v);
246 void hvm_migrate_timers(struct vcpu *v);
247 void hvm_do_resume(struct vcpu *v);
249 static inline void
250 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
251 int vcpuid, int trampoline_vector)
252 {
253 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
254 }
256 static inline void
257 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
258 {
259 hvm_funcs.inject_exception(trapnr, errcode, cr2);
260 }
262 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
264 static inline int hvm_event_injection_faulted(struct vcpu *v)
265 {
266 return hvm_funcs.event_injection_faulted(v);
267 }
269 #endif /* __ASM_X86_HVM_HVM_H__ */