ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 12226:45e34f00a78f

[HVM] Clean up VCPU initialisation in Xen. No longer
parse HVM e820 tables in Xen (add some extra HVM parameters as a
cleaner alternative). Lots of code removal.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Nov 02 15:55:51 2006 +0000 (2006-11-02)
parents e0db5a3a2ef6
children 2368e779f89f
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
20 #ifndef __ASM_X86_HVM_HVM_H__
21 #define __ASM_X86_HVM_HVM_H__
23 /*
24 * The hardware virtual machine (HVM) interface abstracts away from the
25 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
26 * supports Intel's VT-x and AMD's SVM extensions.
27 */
29 struct hvm_function_table {
30 /*
31 * Disable HVM functionality
32 */
33 void (*disable)(void);
35 /*
36 * Initialize/relinguish HVM guest resources
37 */
38 int (*vcpu_initialise)(struct vcpu *v);
39 void (*relinquish_guest_resources)(struct domain *d);
41 /*
42 * Store and load guest state:
43 * 1) load/store guest register state,
44 * 2) modify guest state (e.g., set debug flags).
45 */
46 void (*store_cpu_guest_regs)(
47 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
48 void (*load_cpu_guest_regs)(
49 struct vcpu *v, struct cpu_user_regs *r);
50 /*
51 * Examine specifics of the guest state:
52 * 1) determine whether the guest is in real or vm8086 mode,
53 * 2) determine whether paging is enabled,
54 * 3) return the current guest control-register value
55 */
56 int (*realmode)(struct vcpu *v);
57 int (*paging_enabled)(struct vcpu *v);
58 int (*long_mode_enabled)(struct vcpu *v);
59 int (*pae_enabled)(struct vcpu *v);
60 int (*guest_x86_mode)(struct vcpu *v);
61 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
63 /*
64 * Re-set the value of CR3 that Xen runs on when handling VM exits
65 */
66 void (*update_host_cr3)(struct vcpu *v);
68 /*
69 * Update specifics of the guest state:
70 * 1) TS bit in guest cr0
71 * 2) TSC offset in guest
72 */
73 void (*stts)(struct vcpu *v);
74 void (*set_tsc_offset)(struct vcpu *v, u64 offset);
76 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
77 int vcpuid, int trampoline_vector);
79 void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
80 };
82 extern struct hvm_function_table hvm_funcs;
84 /*
85 * For convenience, we use short hands.
86 */
87 static inline void
88 hvm_disable(void)
89 {
90 if ( hvm_funcs.disable )
91 hvm_funcs.disable();
92 }
94 void hvm_create_event_channel(struct vcpu *v);
96 static inline int
97 hvm_vcpu_initialise(struct vcpu *v)
98 {
99 int rc;
100 if ( (rc = hvm_funcs.vcpu_initialise(v)) == 0 )
101 hvm_create_event_channel(v);
102 return rc;
103 }
105 static inline void
106 hvm_relinquish_guest_resources(struct domain *d)
107 {
108 hvm_funcs.relinquish_guest_resources(d);
109 }
111 static inline void
112 hvm_store_cpu_guest_regs(
113 struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
114 {
115 hvm_funcs.store_cpu_guest_regs(v, r, crs);
116 }
118 static inline void
119 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
120 {
121 hvm_funcs.load_cpu_guest_regs(v, r);
122 }
124 static inline int
125 hvm_realmode(struct vcpu *v)
126 {
127 return hvm_funcs.realmode(v);
128 }
130 static inline int
131 hvm_paging_enabled(struct vcpu *v)
132 {
133 return hvm_funcs.paging_enabled(v);
134 }
136 static inline int
137 hvm_long_mode_enabled(struct vcpu *v)
138 {
139 return hvm_funcs.long_mode_enabled(v);
140 }
142 static inline int
143 hvm_pae_enabled(struct vcpu *v)
144 {
145 return hvm_funcs.pae_enabled(v);
146 }
148 static inline int
149 hvm_guest_x86_mode(struct vcpu *v)
150 {
151 return hvm_funcs.guest_x86_mode(v);
152 }
154 int hvm_instruction_length(struct cpu_user_regs *regs, int mode);
156 static inline void
157 hvm_update_host_cr3(struct vcpu *v)
158 {
159 hvm_funcs.update_host_cr3(v);
160 }
162 void hvm_hypercall_page_initialise(struct domain *d,
163 void *hypercall_page);
165 static inline unsigned long
166 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
167 {
168 if ( hvm_funcs.get_guest_ctrl_reg )
169 return hvm_funcs.get_guest_ctrl_reg(v, num);
170 return 0; /* force to fail */
171 }
173 void hvm_stts(struct vcpu *v);
174 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
175 void hvm_do_resume(struct vcpu *v);
177 static inline void
178 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
179 int vcpuid, int trampoline_vector)
180 {
181 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
182 }
184 int hvm_bringup_ap(int vcpuid, int trampoline_vector);
186 #endif /* __ASM_X86_HVM_HVM_H__ */