ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 12770:3bd721db6db5

[XEN] Fix 32-bit build.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Dec 03 17:44:14 2006 +0000 (2006-12-03)
parents 5c82a274733e
children 6cbed96fedac
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_HVM_H__
22 #define __ASM_X86_HVM_HVM_H__
24 #include <asm/x86_emulate.h>
26 /*
27 * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
28 * segment descriptor. It happens to match the format of an AMD SVM VMCB.
29 */
30 typedef union segment_attributes {
31 u16 bytes;
32 struct
33 {
34 u16 type:4; /* 0; Bit 40-43 */
35 u16 s: 1; /* 4; Bit 44 */
36 u16 dpl: 2; /* 5; Bit 45-46 */
37 u16 p: 1; /* 7; Bit 47 */
38 u16 avl: 1; /* 8; Bit 52 */
39 u16 l: 1; /* 9; Bit 53 */
40 u16 db: 1; /* 10; Bit 54 */
41 u16 g: 1; /* 11; Bit 55 */
42 } fields;
43 } __attribute__ ((packed)) segment_attributes_t;
45 /*
46 * Full state of a segment register (visible and hidden portions).
47 * Again, this happens to match the format of an AMD SVM VMCB.
48 */
49 typedef struct segment_register {
50 u16 sel;
51 segment_attributes_t attr;
52 u32 limit;
53 u64 base;
54 } __attribute__ ((packed)) segment_register_t;
56 /*
57 * The hardware virtual machine (HVM) interface abstracts away from the
58 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
59 * supports Intel's VT-x and AMD's SVM extensions.
60 */
61 struct hvm_function_table {
62 /*
63 * Disable HVM functionality
64 */
65 void (*disable)(void);
67 /*
68 * Initialise/destroy HVM VCPU resources
69 */
70 int (*vcpu_initialise)(struct vcpu *v);
71 void (*vcpu_destroy)(struct vcpu *v);
73 /*
74 * Store and load guest state:
75 * 1) load/store guest register state,
76 * 2) modify guest state (e.g., set debug flags).
77 */
78 void (*store_cpu_guest_regs)(
79 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
80 void (*load_cpu_guest_regs)(
81 struct vcpu *v, struct cpu_user_regs *r);
82 /*
83 * Examine specifics of the guest state:
84 * 1) determine whether paging is enabled,
85 * 2) determine whether long mode is enabled,
86 * 3) determine whether PAE paging is enabled,
87 * 4) determine the mode the guest is running in,
88 * 5) return the current guest control-register value
89 * 6) return the current guest segment descriptor base
90 */
91 int (*paging_enabled)(struct vcpu *v);
92 int (*long_mode_enabled)(struct vcpu *v);
93 int (*pae_enabled)(struct vcpu *v);
94 int (*guest_x86_mode)(struct vcpu *v);
95 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
96 unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
97 void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
98 struct segment_register *reg);
100 /*
101 * Re-set the value of CR3 that Xen runs on when handling VM exits
102 */
103 void (*update_host_cr3)(struct vcpu *v);
105 /*
106 * Update specifics of the guest state:
107 * 1) TS bit in guest cr0
108 * 2) TSC offset in guest
109 */
110 void (*stts)(struct vcpu *v);
111 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
113 void (*inject_exception)(unsigned int trapnr, int errcode,
114 unsigned long cr2);
116 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
117 int vcpuid, int trampoline_vector);
119 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
120 };
122 extern struct hvm_function_table hvm_funcs;
124 /*
125 * For convenience, we use short hands.
126 */
127 static inline void
128 hvm_disable(void)
129 {
130 if ( hvm_funcs.disable )
131 hvm_funcs.disable();
132 }
134 int hvm_domain_initialise(struct domain *d);
135 void hvm_domain_destroy(struct domain *d);
137 int hvm_vcpu_initialise(struct vcpu *v);
138 void hvm_vcpu_destroy(struct vcpu *v);
140 void hvm_send_assist_req(struct vcpu *v);
142 static inline void
143 hvm_store_cpu_guest_regs(
144 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
145 {
146 hvm_funcs.store_cpu_guest_regs(v, r, crs);
147 }
149 static inline void
150 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
151 {
152 hvm_funcs.load_cpu_guest_regs(v, r);
153 }
155 static inline int
156 hvm_paging_enabled(struct vcpu *v)
157 {
158 return hvm_funcs.paging_enabled(v);
159 }
161 #ifdef __x86_64__
162 static inline int
163 hvm_long_mode_enabled(struct vcpu *v)
164 {
165 return hvm_funcs.long_mode_enabled(v);
166 }
167 #else
168 #define hvm_long_mode_enabled(v) (v,0)
169 #endif
171 static inline int
172 hvm_pae_enabled(struct vcpu *v)
173 {
174 return hvm_funcs.pae_enabled(v);
175 }
177 static inline int
178 hvm_guest_x86_mode(struct vcpu *v)
179 {
180 return hvm_funcs.guest_x86_mode(v);
181 }
183 int hvm_instruction_length(unsigned long pc, int mode);
185 static inline void
186 hvm_update_host_cr3(struct vcpu *v)
187 {
188 hvm_funcs.update_host_cr3(v);
189 }
191 void hvm_hypercall_page_initialise(struct domain *d,
192 void *hypercall_page);
194 static inline unsigned long
195 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
196 {
197 if ( hvm_funcs.get_guest_ctrl_reg )
198 return hvm_funcs.get_guest_ctrl_reg(v, num);
199 return 0; /* force to fail */
200 }
202 static inline unsigned long
203 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
204 {
205 return hvm_funcs.get_segment_base(v, seg);
206 }
208 static inline void
209 hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
210 struct segment_register *reg)
211 {
212 hvm_funcs.get_segment_register(v, seg, reg);
213 }
215 void hvm_stts(struct vcpu *v);
216 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
217 void hvm_freeze_time(struct vcpu *v);
218 void hvm_migrate_timers(struct vcpu *v);
219 void hvm_do_resume(struct vcpu *v);
221 static inline void
222 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
223 int vcpuid, int trampoline_vector)
224 {
225 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
226 }
228 static inline void
229 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
230 {
231 hvm_funcs.inject_exception(trapnr, errcode, cr2);
232 }
234 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
236 #endif /* __ASM_X86_HVM_HVM_H__ */