ia64/xen-unstable

view xen/include/asm-x86/hvm/support.h @ 14181:d39dcdb9cca3

hvm: Only do hvm_disable() on HVM-enabled systems.

Original patch by Jan Beulich.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Feb 28 14:44:52 2007 +0000 (2007-02-28)
parents d2a91b73899a
children c0c5e37b20ae
line source
1 /*
2 * support.h: HVM support routines used by VT-x and SVM.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_SUPPORT_H__
22 #define __ASM_X86_HVM_SUPPORT_H__
24 #include <xen/sched.h>
25 #include <asm/types.h>
26 #include <asm/regs.h>
27 #include <asm/processor.h>
29 #ifndef NDEBUG
30 #define HVM_DEBUG 1
31 #else
32 #define HVM_DEBUG 1
33 #endif
35 static inline shared_iopage_t *get_sp(struct domain *d)
36 {
37 return (shared_iopage_t *) d->arch.hvm_domain.shared_page_va;
38 }
40 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
41 {
42 return &get_sp(d)->vcpu_iodata[cpu];
43 }
45 /* XXX these are really VMX specific */
46 #define TYPE_MOV_TO_DR (0 << 4)
47 #define TYPE_MOV_FROM_DR (1 << 4)
48 #define TYPE_MOV_TO_CR (0 << 4)
49 #define TYPE_MOV_FROM_CR (1 << 4)
50 #define TYPE_CLTS (2 << 4)
51 #define TYPE_LMSW (3 << 4)
53 enum hval_bitmaps {
54 EXCEPTION_BITMAP_TABLE=0,
55 };
57 #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
58 #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
59 #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */
60 #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */
61 #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */
62 #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */
63 #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */
64 #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */
65 #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */
66 /* reserved */
67 #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */
68 #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */
69 #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */
70 #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */
71 #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */
72 #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */
73 #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */
74 #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
75 #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
77 /* Pending Debug exceptions */
78 #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
79 #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
81 #ifdef XEN_DEBUGGER
82 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
83 ( EXCEPTION_BITMAP_PG | \
84 EXCEPTION_BITMAP_DB | \
85 EXCEPTION_BITMAP_BP | \
86 EXCEPTION_BITMAP_GP )
87 #else
88 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
89 ( EXCEPTION_BITMAP_PG | \
90 EXCEPTION_BITMAP_BP )
91 #endif
93 #define VMX_DELIVER_NO_ERROR_CODE -1
95 #if HVM_DEBUG
96 #define DBG_LEVEL_0 (1 << 0)
97 #define DBG_LEVEL_1 (1 << 1)
98 #define DBG_LEVEL_2 (1 << 2)
99 #define DBG_LEVEL_3 (1 << 3)
100 #define DBG_LEVEL_IO (1 << 4)
101 #define DBG_LEVEL_VMMU (1 << 5)
102 #define DBG_LEVEL_VLAPIC (1 << 6)
103 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
104 #define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
105 #define DBG_LEVEL_IOAPIC (1 << 9)
107 extern unsigned int opt_hvm_debug_level;
108 #define HVM_DBG_LOG(level, _f, _a...) \
109 do { \
110 if ( unlikely((level) & opt_hvm_debug_level) ) \
111 printk("[HVM:%d.%d] <%s> " _f "\n", \
112 current->domain->domain_id, current->vcpu_id, __func__, \
113 ## _a); \
114 } while (0)
115 #else
116 #define HVM_DBG_LOG(level, _f, _a...)
117 #endif
119 /*
120 * Save/restore support
121 */
123 /* Marshalling and unmarshalling uses a buffer with size and cursor. */
124 typedef struct hvm_domain_context {
125 uint32_t cur;
126 uint32_t size;
127 uint8_t *data;
128 } hvm_domain_context_t;
130 /* Marshalling an entry: check space and fill in the header */
131 static inline int _hvm_init_entry(struct hvm_domain_context *h,
132 uint16_t tc, uint16_t inst, uint32_t len)
133 {
134 struct hvm_save_descriptor *d
135 = (struct hvm_save_descriptor *)&h->data[h->cur];
136 if ( h->size - h->cur < len + sizeof (*d) )
137 {
138 gdprintk(XENLOG_WARNING,
139 "HVM save: no room for %"PRIu32" + %u bytes "
140 "for typecode %"PRIu16"\n",
141 len, (unsigned) sizeof (*d), tc);
142 return -1;
143 }
144 d->typecode = tc;
145 d->instance = inst;
146 d->length = len;
147 h->cur += sizeof (*d);
148 return 0;
149 }
151 /* Marshalling: copy the contents in a type-safe way */
152 #define _hvm_write_entry(_x, _h, _src) do { \
153 *(HVM_SAVE_TYPE(_x) *)(&(_h)->data[(_h)->cur]) = *(_src); \
154 (_h)->cur += HVM_SAVE_LENGTH(_x); \
155 } while (0)
157 /* Marshalling: init and copy; evaluates to zero on success */
158 #define hvm_save_entry(_x, _inst, _h, _src) ({ \
159 int r; \
160 r = _hvm_init_entry((_h), HVM_SAVE_CODE(_x), \
161 (_inst), HVM_SAVE_LENGTH(_x)); \
162 if ( r == 0 ) \
163 _hvm_write_entry(_x, (_h), (_src)); \
164 r; })
166 /* Unmarshalling: test an entry's size and typecode and record the instance */
167 static inline int _hvm_check_entry(struct hvm_domain_context *h,
168 uint16_t type, uint32_t len)
169 {
170 struct hvm_save_descriptor *d
171 = (struct hvm_save_descriptor *)&h->data[h->cur];
172 if ( len + sizeof (*d) > h->size - h->cur)
173 {
174 gdprintk(XENLOG_WARNING,
175 "HVM restore: not enough data left to read %u bytes "
176 "for type %u\n", len, type);
177 return -1;
178 }
179 if ( type != d->typecode || len != d->length )
180 {
181 gdprintk(XENLOG_WARNING,
182 "HVM restore mismatch: expected type %u length %u, "
183 "saw type %u length %u\n", type, len, d->typecode, d->length);
184 return -1;
185 }
186 h->cur += sizeof (*d);
187 return 0;
188 }
190 /* Unmarshalling: copy the contents in a type-safe way */
191 #define _hvm_read_entry(_x, _h, _dst) do { \
192 *(_dst) = *(HVM_SAVE_TYPE(_x) *) (&(_h)->data[(_h)->cur]); \
193 (_h)->cur += HVM_SAVE_LENGTH(_x); \
194 } while (0)
196 /* Unmarshalling: check, then copy. Evaluates to zero on success. */
197 #define hvm_load_entry(_x, _h, _dst) ({ \
198 int r; \
199 r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), HVM_SAVE_LENGTH(_x)); \
200 if ( r == 0 ) \
201 _hvm_read_entry(_x, (_h), (_dst)); \
202 r; })
204 /* Unmarshalling: what is the instance ID of the next entry? */
205 static inline uint16_t hvm_load_instance(struct hvm_domain_context *h)
206 {
207 struct hvm_save_descriptor *d
208 = (struct hvm_save_descriptor *)&h->data[h->cur];
209 return d->instance;
210 }
212 /* Handler types for different types of save-file entry.
213 * The save handler may save multiple instances of a type into the buffer;
214 * the load handler will be called once for each instance found when
215 * restoring. Both return non-zero on error. */
216 typedef int (*hvm_save_handler) (struct domain *d,
217 hvm_domain_context_t *h);
218 typedef int (*hvm_load_handler) (struct domain *d,
219 hvm_domain_context_t *h);
221 /* Init-time function to declare a pair of handlers for a type,
222 * and the maximum buffer space needed to save this type of state */
223 void hvm_register_savevm(uint16_t typecode,
224 const char *name,
225 hvm_save_handler save_state,
226 hvm_load_handler load_state,
227 size_t size, int kind);
229 /* The space needed for saving can be per-domain or per-vcpu: */
230 #define HVMSR_PER_DOM 0
231 #define HVMSR_PER_VCPU 1
233 /* Syntactic sugar around that function: specify the max number of
234 * saves, and this calculates the size of buffer needed */
235 #define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k) \
236 static int __hvm_register_##_x##_save_and_restore(void) \
237 { \
238 hvm_register_savevm(HVM_SAVE_CODE(_x), \
239 #_x, \
240 &_save, \
241 &_load, \
242 (_num) * (HVM_SAVE_LENGTH(_x) \
243 + sizeof (struct hvm_save_descriptor)), \
244 _k); \
245 return 0; \
246 } \
247 __initcall(__hvm_register_##_x##_save_and_restore);
250 /* Entry points for saving and restoring HVM domain state */
251 size_t hvm_save_size(struct domain *d);
252 int hvm_save(struct domain *d, hvm_domain_context_t *h);
253 int hvm_load(struct domain *d, hvm_domain_context_t *h);
255 /* End of save/restore */
257 extern char hvm_io_bitmap[];
258 extern int hvm_enabled;
260 void hvm_enable(struct hvm_function_table *);
261 void hvm_disable(void);
263 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
264 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
265 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
266 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
268 void hvm_print_line(struct vcpu *v, const char c);
269 void hlt_timer_fn(void *data);
271 void hvm_do_hypercall(struct cpu_user_regs *pregs);
273 void hvm_hlt(unsigned long rflags);
274 void hvm_triple_fault(void);
276 #endif /* __ASM_X86_HVM_SUPPORT_H__ */