ia64/xen-unstable

view xen/include/asm-x86/hvm/support.h @ 14635:5c52e5ca8459

hvm: Clean up handling of exception intercepts.
Only intercept #DB/#BP if a debugger is attached.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Mar 28 18:47:17 2007 +0100 (2007-03-28)
parents c0c5e37b20ae
children 9a839ead4870
line source
1 /*
2 * support.h: HVM support routines used by VT-x and SVM.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_SUPPORT_H__
22 #define __ASM_X86_HVM_SUPPORT_H__
24 #include <xen/sched.h>
25 #include <asm/types.h>
26 #include <asm/regs.h>
27 #include <asm/processor.h>
29 #ifndef NDEBUG
30 #define HVM_DEBUG 1
31 #else
32 #define HVM_DEBUG 1
33 #endif
35 static inline shared_iopage_t *get_sp(struct domain *d)
36 {
37 return (shared_iopage_t *) d->arch.hvm_domain.shared_page_va;
38 }
40 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
41 {
42 return &get_sp(d)->vcpu_iodata[cpu];
43 }
45 /* XXX these are really VMX specific */
46 #define TYPE_MOV_TO_DR (0 << 4)
47 #define TYPE_MOV_FROM_DR (1 << 4)
48 #define TYPE_MOV_TO_CR (0 << 4)
49 #define TYPE_MOV_FROM_CR (1 << 4)
50 #define TYPE_CLTS (2 << 4)
51 #define TYPE_LMSW (3 << 4)
53 #define VMX_DELIVER_NO_ERROR_CODE -1
55 #if HVM_DEBUG
56 #define DBG_LEVEL_0 (1 << 0)
57 #define DBG_LEVEL_1 (1 << 1)
58 #define DBG_LEVEL_2 (1 << 2)
59 #define DBG_LEVEL_3 (1 << 3)
60 #define DBG_LEVEL_IO (1 << 4)
61 #define DBG_LEVEL_VMMU (1 << 5)
62 #define DBG_LEVEL_VLAPIC (1 << 6)
63 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
64 #define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
65 #define DBG_LEVEL_IOAPIC (1 << 9)
67 extern unsigned int opt_hvm_debug_level;
68 #define HVM_DBG_LOG(level, _f, _a...) \
69 do { \
70 if ( unlikely((level) & opt_hvm_debug_level) ) \
71 printk("[HVM:%d.%d] <%s> " _f "\n", \
72 current->domain->domain_id, current->vcpu_id, __func__, \
73 ## _a); \
74 } while (0)
75 #else
76 #define HVM_DBG_LOG(level, _f, _a...)
77 #endif
79 /*
80 * Save/restore support
81 */
83 /* Marshalling and unmarshalling uses a buffer with size and cursor. */
84 typedef struct hvm_domain_context {
85 uint32_t cur;
86 uint32_t size;
87 uint8_t *data;
88 } hvm_domain_context_t;
90 /* Marshalling an entry: check space and fill in the header */
91 static inline int _hvm_init_entry(struct hvm_domain_context *h,
92 uint16_t tc, uint16_t inst, uint32_t len)
93 {
94 struct hvm_save_descriptor *d
95 = (struct hvm_save_descriptor *)&h->data[h->cur];
96 if ( h->size - h->cur < len + sizeof (*d) )
97 {
98 gdprintk(XENLOG_WARNING,
99 "HVM save: no room for %"PRIu32" + %u bytes "
100 "for typecode %"PRIu16"\n",
101 len, (unsigned) sizeof (*d), tc);
102 return -1;
103 }
104 d->typecode = tc;
105 d->instance = inst;
106 d->length = len;
107 h->cur += sizeof (*d);
108 return 0;
109 }
111 /* Marshalling: copy the contents in a type-safe way */
112 #define _hvm_write_entry(_x, _h, _src) do { \
113 *(HVM_SAVE_TYPE(_x) *)(&(_h)->data[(_h)->cur]) = *(_src); \
114 (_h)->cur += HVM_SAVE_LENGTH(_x); \
115 } while (0)
117 /* Marshalling: init and copy; evaluates to zero on success */
118 #define hvm_save_entry(_x, _inst, _h, _src) ({ \
119 int r; \
120 r = _hvm_init_entry((_h), HVM_SAVE_CODE(_x), \
121 (_inst), HVM_SAVE_LENGTH(_x)); \
122 if ( r == 0 ) \
123 _hvm_write_entry(_x, (_h), (_src)); \
124 r; })
126 /* Unmarshalling: test an entry's size and typecode and record the instance */
127 static inline int _hvm_check_entry(struct hvm_domain_context *h,
128 uint16_t type, uint32_t len)
129 {
130 struct hvm_save_descriptor *d
131 = (struct hvm_save_descriptor *)&h->data[h->cur];
132 if ( len + sizeof (*d) > h->size - h->cur)
133 {
134 gdprintk(XENLOG_WARNING,
135 "HVM restore: not enough data left to read %u bytes "
136 "for type %u\n", len, type);
137 return -1;
138 }
139 if ( type != d->typecode || len != d->length )
140 {
141 gdprintk(XENLOG_WARNING,
142 "HVM restore mismatch: expected type %u length %u, "
143 "saw type %u length %u\n", type, len, d->typecode, d->length);
144 return -1;
145 }
146 h->cur += sizeof (*d);
147 return 0;
148 }
150 /* Unmarshalling: copy the contents in a type-safe way */
151 #define _hvm_read_entry(_x, _h, _dst) do { \
152 *(_dst) = *(HVM_SAVE_TYPE(_x) *) (&(_h)->data[(_h)->cur]); \
153 (_h)->cur += HVM_SAVE_LENGTH(_x); \
154 } while (0)
156 /* Unmarshalling: check, then copy. Evaluates to zero on success. */
157 #define hvm_load_entry(_x, _h, _dst) ({ \
158 int r; \
159 r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), HVM_SAVE_LENGTH(_x)); \
160 if ( r == 0 ) \
161 _hvm_read_entry(_x, (_h), (_dst)); \
162 r; })
164 /* Unmarshalling: what is the instance ID of the next entry? */
165 static inline uint16_t hvm_load_instance(struct hvm_domain_context *h)
166 {
167 struct hvm_save_descriptor *d
168 = (struct hvm_save_descriptor *)&h->data[h->cur];
169 return d->instance;
170 }
172 /* Handler types for different types of save-file entry.
173 * The save handler may save multiple instances of a type into the buffer;
174 * the load handler will be called once for each instance found when
175 * restoring. Both return non-zero on error. */
176 typedef int (*hvm_save_handler) (struct domain *d,
177 hvm_domain_context_t *h);
178 typedef int (*hvm_load_handler) (struct domain *d,
179 hvm_domain_context_t *h);
181 /* Init-time function to declare a pair of handlers for a type,
182 * and the maximum buffer space needed to save this type of state */
183 void hvm_register_savevm(uint16_t typecode,
184 const char *name,
185 hvm_save_handler save_state,
186 hvm_load_handler load_state,
187 size_t size, int kind);
189 /* The space needed for saving can be per-domain or per-vcpu: */
190 #define HVMSR_PER_DOM 0
191 #define HVMSR_PER_VCPU 1
193 /* Syntactic sugar around that function: specify the max number of
194 * saves, and this calculates the size of buffer needed */
195 #define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k) \
196 static int __hvm_register_##_x##_save_and_restore(void) \
197 { \
198 hvm_register_savevm(HVM_SAVE_CODE(_x), \
199 #_x, \
200 &_save, \
201 &_load, \
202 (_num) * (HVM_SAVE_LENGTH(_x) \
203 + sizeof (struct hvm_save_descriptor)), \
204 _k); \
205 return 0; \
206 } \
207 __initcall(__hvm_register_##_x##_save_and_restore);
210 /* Entry points for saving and restoring HVM domain state */
211 size_t hvm_save_size(struct domain *d);
212 int hvm_save(struct domain *d, hvm_domain_context_t *h);
213 int hvm_load(struct domain *d, hvm_domain_context_t *h);
215 /* End of save/restore */
217 extern char hvm_io_bitmap[];
218 extern int hvm_enabled;
220 void hvm_enable(struct hvm_function_table *);
221 void hvm_disable(void);
223 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
224 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
225 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
226 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
228 void hvm_print_line(struct vcpu *v, const char c);
229 void hlt_timer_fn(void *data);
231 void hvm_do_hypercall(struct cpu_user_regs *pregs);
233 void hvm_hlt(unsigned long rflags);
234 void hvm_triple_fault(void);
236 #endif /* __ASM_X86_HVM_SUPPORT_H__ */