ia64/xen-unstable

view xen/include/asm-x86/hvm/hvm.h @ 9016:cf1c1bb9f6d2

Bring up AP of VMX domain.
1) add INIT-SIPI-SIPI IPI sequence handling code to HVM virtual lapic
code.
2) add an new interface init_ap_context to hvm_funcs, and implement the
VMX side.
3) add a hvm generic function hvm_bringup_ap, which in turn calls
init_ap_context.

Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Feb 24 17:32:58 2006 +0100 (2006-02-24)
parents eaeb26494a39
children 796ac2386a24
line source
1 /*
2 * hvm.h: Hardware virtual machine assist interface definitions.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
20 #ifndef __ASM_X86_HVM_HVM_H__
21 #define __ASM_X86_HVM_HVM_H__
23 /*
24 * The hardware virtual machine (HVM) interface abstracts away from the
25 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
26 * supports Intel's VT-x and AMD's SVM extensions.
27 */
29 struct hvm_function_table {
30 /*
31 * Disable HVM functionality
32 */
33 void (*disable)(void);
35 /*
36 * Initialize/relinguish HVM guest resources
37 */
38 int (*initialize_guest_resources)(struct vcpu *v);
39 int (*relinquish_guest_resources)(struct vcpu *v);
41 /*
42 * Store and load guest state:
43 * 1) load/store guest register state,
44 * 2) load/store segment state (x86_64 only),
45 * 3) load/store msr register state (x86_64 only),
46 * 4) store guest control register state (used for panic dumps),
47 * 5) modify guest state (e.g., set debug flags).
48 */
49 void (*store_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
50 void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
51 #ifdef __x86_64__
52 void (*save_segments)(struct vcpu *v);
53 void (*load_msrs)(void);
54 void (*restore_msrs)(struct vcpu *v);
55 #endif
56 void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]);
57 void (*modify_guest_state)(struct vcpu *v);
59 /*
60 * Examine specifics of the guest state:
61 * 1) determine whether the guest is in real or vm8086 mode,
62 * 2) determine whether paging is enabled,
63 * 3) return the length of the instruction that caused an exit.
64 * 4) return the current guest control-register value
65 */
66 int (*realmode)(struct vcpu *v);
67 int (*paging_enabled)(struct vcpu *v);
68 int (*instruction_length)(struct vcpu *v);
69 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
71 void (*init_ap_context)(struct vcpu_guest_context *ctxt,
72 int vcpuid, int trampoline_vector);
73 };
75 extern struct hvm_function_table hvm_funcs;
77 /*
78 * For convenience, we use short hands.
79 */
80 static inline void
81 hvm_disable(void)
82 {
83 if (hvm_funcs.disable)
84 hvm_funcs.disable();
85 }
87 static inline int
88 hvm_initialize_guest_resources(struct vcpu *v)
89 {
90 if (hvm_funcs.initialize_guest_resources)
91 return hvm_funcs.initialize_guest_resources(v);
92 return 0;
93 }
95 static inline int
96 hvm_relinquish_guest_resources(struct vcpu *v)
97 {
98 if (hvm_funcs.relinquish_guest_resources)
99 return hvm_funcs.relinquish_guest_resources(v);
100 return 0;
101 }
103 static inline void
104 hvm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
105 {
106 hvm_funcs.store_cpu_guest_regs(v, r);
107 }
109 static inline void
110 hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
111 {
112 hvm_funcs.load_cpu_guest_regs(v, r);
113 }
115 #ifdef __x86_64__
116 static inline void
117 hvm_save_segments(struct vcpu *v)
118 {
119 if (hvm_funcs.save_segments)
120 hvm_funcs.save_segments(v);
121 }
123 static inline void
124 hvm_load_msrs(void)
125 {
126 if (hvm_funcs.load_msrs)
127 hvm_funcs.load_msrs();
128 }
130 static inline void
131 hvm_restore_msrs(struct vcpu *v)
132 {
133 if (hvm_funcs.restore_msrs)
134 hvm_funcs.restore_msrs(v);
135 }
136 #else
137 #define hvm_save_segments(v) ((void)0)
138 #define hvm_load_msrs(v) ((void)0)
139 #define hvm_restore_msrs(v) ((void)0)
140 #endif /* __x86_64__ */
142 static inline void
143 hvm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
144 {
145 hvm_funcs.store_cpu_guest_ctrl_regs(v, crs);
146 }
148 static inline void
149 hvm_modify_guest_state(struct vcpu *v)
150 {
151 hvm_funcs.modify_guest_state(v);
152 }
154 static inline int
155 hvm_realmode(struct vcpu *v)
156 {
157 return hvm_funcs.realmode(v);
158 }
160 static inline int
161 hvm_paging_enabled(struct vcpu *v)
162 {
163 return hvm_funcs.paging_enabled(v);
164 }
166 static inline int
167 hvm_instruction_length(struct vcpu *v)
168 {
169 return hvm_funcs.instruction_length(v);
170 }
172 static inline unsigned long
173 hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
174 {
175 if ( hvm_funcs.get_guest_ctrl_reg )
176 return hvm_funcs.get_guest_ctrl_reg(v, num);
177 return 0; /* force to fail */
178 }
180 static inline void
181 hvm_init_ap_context(struct vcpu_guest_context *ctxt,
182 int vcpuid, int trampoline_vector)
183 {
184 return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
185 }
187 extern int hvm_bringup_ap(int vcpuid, int trampoline_vector);
189 #endif /* __ASM_X86_HVM_HVM_H__ */