ia64/xen-unstable

view xen/include/asm-x86/hvm/support.h @ 14090:cdc765772f69

hvm: Clean up initialisation of hvm_funcs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Feb 23 11:32:25 2007 +0000 (2007-02-23)
parents 4d7ee9f4336a
children d2a91b73899a
line source
1 /*
2 * support.h: HVM support routines used by VT-x and SVM.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_SUPPORT_H__
22 #define __ASM_X86_HVM_SUPPORT_H__
24 #include <xen/sched.h>
25 #include <asm/types.h>
26 #include <asm/regs.h>
27 #include <asm/processor.h>
29 #ifndef NDEBUG
30 #define HVM_DEBUG 1
31 #else
32 #define HVM_DEBUG 1
33 #endif
35 static inline shared_iopage_t *get_sp(struct domain *d)
36 {
37 return (shared_iopage_t *) d->arch.hvm_domain.shared_page_va;
38 }
40 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
41 {
42 return &get_sp(d)->vcpu_iodata[cpu];
43 }
45 /* XXX these are really VMX specific */
46 #define TYPE_MOV_TO_DR (0 << 4)
47 #define TYPE_MOV_FROM_DR (1 << 4)
48 #define TYPE_MOV_TO_CR (0 << 4)
49 #define TYPE_MOV_FROM_CR (1 << 4)
50 #define TYPE_CLTS (2 << 4)
51 #define TYPE_LMSW (3 << 4)
53 enum hval_bitmaps {
54 EXCEPTION_BITMAP_TABLE=0,
55 };
57 #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
58 #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
59 #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */
60 #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */
61 #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */
62 #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */
63 #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */
64 #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */
65 #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */
66 /* reserved */
67 #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */
68 #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */
69 #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */
70 #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */
71 #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */
72 #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */
73 #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */
74 #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
75 #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
77 /* Pending Debug exceptions */
78 #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
79 #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
81 #ifdef XEN_DEBUGGER
82 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
83 ( EXCEPTION_BITMAP_PG | \
84 EXCEPTION_BITMAP_DB | \
85 EXCEPTION_BITMAP_BP | \
86 EXCEPTION_BITMAP_GP )
87 #else
88 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
89 ( EXCEPTION_BITMAP_PG | \
90 EXCEPTION_BITMAP_BP )
91 #endif
93 #define VMX_DELIVER_NO_ERROR_CODE -1
95 #if HVM_DEBUG
96 #define DBG_LEVEL_0 (1 << 0)
97 #define DBG_LEVEL_1 (1 << 1)
98 #define DBG_LEVEL_2 (1 << 2)
99 #define DBG_LEVEL_3 (1 << 3)
100 #define DBG_LEVEL_IO (1 << 4)
101 #define DBG_LEVEL_VMMU (1 << 5)
102 #define DBG_LEVEL_VLAPIC (1 << 6)
103 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
104 #define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
105 #define DBG_LEVEL_IOAPIC (1 << 9)
107 extern unsigned int opt_hvm_debug_level;
108 #define HVM_DBG_LOG(level, _f, _a...) \
109 do { \
110 if ( unlikely((level) & opt_hvm_debug_level) ) \
111 printk("[HVM:%d.%d] <%s> " _f "\n", \
112 current->domain->domain_id, current->vcpu_id, __func__, \
113 ## _a); \
114 } while (0)
115 #else
116 #define HVM_DBG_LOG(level, _f, _a...)
117 #endif
119 #define TRACE_VMEXIT(index, value) \
120 current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
122 /*
123 * Save/restore support
124 */
126 /* Marshalling and unmarshalling uses a buffer with size and cursor. */
127 typedef struct hvm_domain_context {
128 uint32_t cur;
129 uint32_t size;
130 uint8_t *data;
131 } hvm_domain_context_t;
133 /* Marshalling an entry: check space and fill in the header */
134 static inline int _hvm_init_entry(struct hvm_domain_context *h,
135 uint16_t tc, uint16_t inst, uint32_t len)
136 {
137 struct hvm_save_descriptor *d
138 = (struct hvm_save_descriptor *)&h->data[h->cur];
139 if ( h->size - h->cur < len + sizeof (*d) )
140 {
141 gdprintk(XENLOG_WARNING,
142 "HVM save: no room for %"PRIu32" + %u bytes "
143 "for typecode %"PRIu16"\n",
144 len, (unsigned) sizeof (*d), tc);
145 return -1;
146 }
147 d->typecode = tc;
148 d->instance = inst;
149 d->length = len;
150 h->cur += sizeof (*d);
151 return 0;
152 }
154 /* Marshalling: copy the contents in a type-safe way */
155 #define _hvm_write_entry(_x, _h, _src) do { \
156 *(HVM_SAVE_TYPE(_x) *)(&(_h)->data[(_h)->cur]) = *(_src); \
157 (_h)->cur += HVM_SAVE_LENGTH(_x); \
158 } while (0)
160 /* Marshalling: init and copy; evaluates to zero on success */
161 #define hvm_save_entry(_x, _inst, _h, _src) ({ \
162 int r; \
163 r = _hvm_init_entry((_h), HVM_SAVE_CODE(_x), \
164 (_inst), HVM_SAVE_LENGTH(_x)); \
165 if ( r == 0 ) \
166 _hvm_write_entry(_x, (_h), (_src)); \
167 r; })
169 /* Unmarshalling: test an entry's size and typecode and record the instance */
170 static inline int _hvm_check_entry(struct hvm_domain_context *h,
171 uint16_t type, uint32_t len)
172 {
173 struct hvm_save_descriptor *d
174 = (struct hvm_save_descriptor *)&h->data[h->cur];
175 if ( len + sizeof (*d) > h->size - h->cur)
176 {
177 gdprintk(XENLOG_WARNING,
178 "HVM restore: not enough data left to read %u bytes "
179 "for type %u\n", len, type);
180 return -1;
181 }
182 if ( type != d->typecode || len != d->length )
183 {
184 gdprintk(XENLOG_WARNING,
185 "HVM restore mismatch: expected type %u length %u, "
186 "saw type %u length %u\n", type, len, d->typecode, d->length);
187 return -1;
188 }
189 h->cur += sizeof (*d);
190 return 0;
191 }
193 /* Unmarshalling: copy the contents in a type-safe way */
194 #define _hvm_read_entry(_x, _h, _dst) do { \
195 *(_dst) = *(HVM_SAVE_TYPE(_x) *) (&(_h)->data[(_h)->cur]); \
196 (_h)->cur += HVM_SAVE_LENGTH(_x); \
197 } while (0)
199 /* Unmarshalling: check, then copy. Evaluates to zero on success. */
200 #define hvm_load_entry(_x, _h, _dst) ({ \
201 int r; \
202 r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), HVM_SAVE_LENGTH(_x)); \
203 if ( r == 0 ) \
204 _hvm_read_entry(_x, (_h), (_dst)); \
205 r; })
207 /* Unmarshalling: what is the instance ID of the next entry? */
208 static inline uint16_t hvm_load_instance(struct hvm_domain_context *h)
209 {
210 struct hvm_save_descriptor *d
211 = (struct hvm_save_descriptor *)&h->data[h->cur];
212 return d->instance;
213 }
215 /* Handler types for different types of save-file entry.
216 * The save handler may save multiple instances of a type into the buffer;
217 * the load handler will be called once for each instance found when
218 * restoring. Both return non-zero on error. */
219 typedef int (*hvm_save_handler) (struct domain *d,
220 hvm_domain_context_t *h);
221 typedef int (*hvm_load_handler) (struct domain *d,
222 hvm_domain_context_t *h);
224 /* Init-time function to declare a pair of handlers for a type,
225 * and the maximum buffer space needed to save this type of state */
226 void hvm_register_savevm(uint16_t typecode,
227 const char *name,
228 hvm_save_handler save_state,
229 hvm_load_handler load_state,
230 size_t size, int kind);
232 /* The space needed for saving can be per-domain or per-vcpu: */
233 #define HVMSR_PER_DOM 0
234 #define HVMSR_PER_VCPU 1
236 /* Syntactic sugar around that function: specify the max number of
237 * saves, and this calculates the size of buffer needed */
238 #define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k) \
239 static int __hvm_register_##_x##_save_and_restore(void) \
240 { \
241 hvm_register_savevm(HVM_SAVE_CODE(_x), \
242 #_x, \
243 &_save, \
244 &_load, \
245 (_num) * (HVM_SAVE_LENGTH(_x) \
246 + sizeof (struct hvm_save_descriptor)), \
247 _k); \
248 return 0; \
249 } \
250 __initcall(__hvm_register_##_x##_save_and_restore);
253 /* Entry points for saving and restoring HVM domain state */
254 size_t hvm_save_size(struct domain *d);
255 int hvm_save(struct domain *d, hvm_domain_context_t *h);
256 int hvm_load(struct domain *d, hvm_domain_context_t *h);
258 /* End of save/restore */
260 extern char hvm_io_bitmap[];
261 extern int hvm_enabled;
263 void hvm_enable(struct hvm_function_table *);
265 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
266 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
267 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
268 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
270 void hvm_print_line(struct vcpu *v, const char c);
271 void hlt_timer_fn(void *data);
273 void hvm_do_hypercall(struct cpu_user_regs *pregs);
275 void hvm_hlt(unsigned long rflags);
276 void hvm_triple_fault(void);
278 #endif /* __ASM_X86_HVM_SUPPORT_H__ */