ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 14181:d39dcdb9cca3

hvm: Only do hvm_disable() on HVM-enabled systems.

Original patch by Jan Beulich.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Feb 28 14:44:52 2007 +0000 (2007-02-28)
parents cdc765772f69
children 2b715386b4cf
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/x86_emulate.h>
25 #include <public/domctl.h>
26 #include <public/hvm/save.h>
28 /*
29 * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
30 * segment descriptor. It happens to match the format of an AMD SVM VMCB.
31 */
32 typedef union segment_attributes {
33 u16 bytes;
34 struct
35 {
36 u16 type:4; /* 0; Bit 40-43 */
37 u16 s: 1; /* 4; Bit 44 */
38 u16 dpl: 2; /* 5; Bit 45-46 */
39 u16 p: 1; /* 7; Bit 47 */
40 u16 avl: 1; /* 8; Bit 52 */
41 u16 l: 1; /* 9; Bit 53 */
42 u16 db: 1; /* 10; Bit 54 */
43 u16 g: 1; /* 11; Bit 55 */
44 } fields;
45 } __attribute__ ((packed)) segment_attributes_t;
47 /*
48 * Full state of a segment register (visible and hidden portions).
49 * Again, this happens to match the format of an AMD SVM VMCB.
50 */
51 typedef struct segment_register {
52 u16 sel;
53 segment_attributes_t attr;
54 u32 limit;
55 u64 base;
56 } __attribute__ ((packed)) segment_register_t;
58 /*
59 * The hardware virtual machine (HVM) interface abstracts away from the
60 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
61 * supports Intel's VT-x and AMD's SVM extensions.
62 */
63 struct hvm_function_table {
64 /*
65 * Disable HVM functionality
66 */
67 void (*disable)(void);
69 /*
70 * Initialise/destroy HVM VCPU resources
71 */
72 int (*vcpu_initialise)(struct vcpu *v);
73 void (*vcpu_destroy)(struct vcpu *v);
75 /*
76 * Store and load guest state:
77 * 1) load/store guest register state,
78 * 2) modify guest state (e.g., set debug flags).
79 */
80 void (*store_cpu_guest_regs)(
81 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
82 void (*load_cpu_guest_regs)(
83 struct vcpu *v, struct cpu_user_regs *r);
85 /* save and load hvm guest cpu context for save/restore */
86 void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
87 int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
89 /*
90 * Examine specifics of the guest state:
91 * 1) determine whether paging is enabled,
92 * 2) determine whether long mode is enabled,
93 * 3) determine whether PAE paging is enabled,
94 * 4) determine the mode the guest is running in,
95 * 5) return the current guest control-register value
96 * 6) return the current guest segment descriptor base
97 */
98 int (*paging_enabled)(struct vcpu *v);
99 int (*long_mode_enabled)(struct vcpu *v);
100 int (*pae_enabled)(struct vcpu *v);
101 int (*guest_x86_mode)(struct vcpu *v);
102 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
103 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
104 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
105 struct segment_register *reg);
107 /*
108 * Re-set the value of CR3 that Xen runs on when handling VM exits
109 */
110 void (*update_host_cr3)(struct vcpu *v);
112 /*
113 * Called to inform HVM layer that a guest cr3 has changed
114 */
115 void (*update_guest_cr3)(struct vcpu *v);
117 /*
118 * Reflect the virtual APIC's value in the guest's V_TPR register
119 */
120 void (*update_vtpr)(struct vcpu *v, unsigned long value);
122 /*
123 * Update specifics of the guest state:
124 * 1) TS bit in guest cr0
125 * 2) TSC offset in guest
126 */
127 void (*stts)(struct vcpu *v);
128 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
130 void (*inject_exception)(unsigned int trapnr, int errcode,
131 unsigned long cr2);
133 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
134 int vcpuid, int trampoline_vector);
136 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
138 int (*event_injection_faulted)(struct vcpu *v);
139 };
141 extern struct hvm_function_table hvm_funcs;
143 int hvm_domain_initialise(struct domain *d);
144 void hvm_domain_destroy(struct domain *d);
146 int hvm_vcpu_initialise(struct vcpu *v);
147 void hvm_vcpu_destroy(struct vcpu *v);
148 void hvm_vcpu_reset(struct vcpu *vcpu);
150 void hvm_send_assist_req(struct vcpu *v);
152 static inline void
153 hvm_store_cpu_guest_regs(
154 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
155 {
156 hvm_funcs.store_cpu_guest_regs(v, r, crs);
157 }
159 static inline void
160 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
161 {
162 hvm_funcs.load_cpu_guest_regs(v, r);
163 }
165 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
166 u64 hvm_get_guest_time(struct vcpu *v);
168 static inline int
169 hvm_paging_enabled(struct vcpu *v)
170 {
171 return hvm_funcs.paging_enabled(v);
172 }
174 #ifdef __x86_64__
175 static inline int
176 hvm_long_mode_enabled(struct vcpu *v)
177 {
178 return hvm_funcs.long_mode_enabled(v);
179 }
180 #else
181 #define hvm_long_mode_enabled(v) (v,0)
182 #endif
184 static inline int
185 hvm_pae_enabled(struct vcpu *v)
186 {
187 return hvm_funcs.pae_enabled(v);
188 }
190 static inline int
191 hvm_guest_x86_mode(struct vcpu *v)
192 {
193 return hvm_funcs.guest_x86_mode(v);
194 }
196 int hvm_instruction_length(unsigned long pc, int address_bytes);
198 static inline void
199 hvm_update_host_cr3(struct vcpu *v)
200 {
201 hvm_funcs.update_host_cr3(v);
202 }
204 static inline void
205 hvm_update_vtpr(struct vcpu *v, unsigned long value)
206 {
207 hvm_funcs.update_vtpr(v, value);
208 }
210 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
212 void hvm_hypercall_page_initialise(struct domain *d,
213 void *hypercall_page);
215 static inline unsigned long
216 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
217 {
218 return hvm_funcs.get_guest_ctrl_reg(v, num);
219 }
221 static inline unsigned long
222 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
223 {
224 return hvm_funcs.get_segment_base(v, seg);
225 }
227 static inline void
228 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
229 struct segment_register *reg)
230 {
231 hvm_funcs.get_segment_register(v, seg, reg);
232 }
234 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
235 unsigned int *ecx, unsigned int *edx);
236 void hvm_stts(struct vcpu *v);
237 void hvm_migrate_timers(struct vcpu *v);
238 void hvm_do_resume(struct vcpu *v);
240 static inline void
241 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
242 int vcpuid, int trampoline_vector)
243 {
244 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
245 }
247 static inline void
248 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
249 {
250 hvm_funcs.inject_exception(trapnr, errcode, cr2);
251 }
253 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
255 static inline int hvm_event_injection_faulted(struct vcpu *v)
256 {
257 return hvm_funcs.event_injection_faulted(v);
258 }
260 #endif /* __ASM_X86_HVM_HVM_H__ */