ia64/xen-unstable

view xen/arch/x86/x86_64/compat/traps.c @ 16263:23582bcda6e1

x86: Clean up NMI delivery logic. Allow set_trap_table vector 2 to be
specified as not disabling event delivery, just like any other vector.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Mon Oct 29 09:49:39 2007 +0000 (2007-10-29)
parents 185a13c03255
children 14fd83fe71c3
line source
1 #ifdef CONFIG_COMPAT
3 #include <xen/event.h>
4 #include <asm/regs.h>
5 #include <compat/callback.h>
6 #include <compat/arch-x86_32.h>
8 void compat_show_guest_stack(struct cpu_user_regs *regs, int debug_stack_lines)
9 {
10 unsigned int i, *stack, addr;
12 stack = (unsigned int *)(unsigned long)regs->_esp;
13 printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
15 for ( i = 0; i < debug_stack_lines * 8; i++ )
16 {
17 if ( (((long)stack + 3) & (STACK_SIZE - 4)) == 0 )
18 break;
19 if ( get_user(addr, stack) )
20 {
21 if ( i != 0 )
22 printk("\n ");
23 printk("Fault while accessing guest memory.");
24 i = 1;
25 break;
26 }
27 if ( (i != 0) && ((i % 8) == 0) )
28 printk("\n ");
29 printk(" %08x", addr);
30 stack++;
31 }
32 if ( i == 0 )
33 printk("Stack empty.");
34 printk("\n");
35 }
37 unsigned int compat_iret(void)
38 {
39 struct cpu_user_regs *regs = guest_cpu_user_regs();
40 struct vcpu *v = current;
41 u32 eflags;
43 /* Trim stack pointer to 32 bits. */
44 regs->rsp = (u32)regs->rsp;
46 /* Restore EAX (clobbered by hypercall). */
47 if ( unlikely(__get_user(regs->_eax, (u32 *)regs->rsp)) )
48 goto exit_and_crash;
50 /* Restore CS and EIP. */
51 if ( unlikely(__get_user(regs->_eip, (u32 *)regs->rsp + 1)) ||
52 unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
53 goto exit_and_crash;
55 /*
56 * Fix up and restore EFLAGS. We fix up in a local staging area
57 * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
58 */
59 if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
60 goto exit_and_crash;
61 regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
63 if ( unlikely(eflags & X86_EFLAGS_VM) )
64 {
65 /*
66 * Cannot return to VM86 mode: inject a GP fault instead. Note that
67 * the GP fault is reported on the first VM86 mode instruction, not on
68 * the IRET (which is why we can simply leave the stack frame as-is
69 * (except for perhaps having to copy it), which in turn seems better
70 * than teaching create_bounce_frame() to needlessly deal with vm86
71 * mode frames).
72 */
73 const struct trap_info *ti;
74 u32 x, ksp = v->arch.guest_context.kernel_sp - 40;
75 unsigned int i;
76 int rc = 0;
78 gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
79 regs->_esp, ksp);
80 if ( ksp < regs->_esp )
81 {
82 for (i = 1; i < 10; ++i)
83 {
84 rc |= __get_user(x, (u32 *)regs->rsp + i);
85 rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
86 }
87 }
88 else if ( ksp > regs->_esp )
89 {
90 for (i = 9; i > 0; ++i)
91 {
92 rc |= __get_user(x, (u32 *)regs->rsp + i);
93 rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
94 }
95 }
96 if ( rc )
97 goto exit_and_crash;
98 regs->_esp = ksp;
99 regs->ss = v->arch.guest_context.kernel_ss;
101 ti = &v->arch.guest_context.trap_ctxt[13];
102 if ( TI_GET_IF(ti) )
103 eflags &= ~X86_EFLAGS_IF;
104 regs->_eflags = eflags & ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
105 X86_EFLAGS_NT|X86_EFLAGS_TF);
107 if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
108 goto exit_and_crash;
109 regs->_eip = ti->address;
110 regs->cs = ti->cs;
111 }
112 else if ( unlikely(ring_0(regs)) )
113 goto exit_and_crash;
114 else if ( !ring_1(regs) )
115 {
116 /* Return to ring 2/3: restore ESP and SS. */
117 if ( __get_user(regs->ss, (u32 *)regs->rsp + 5)
118 || __get_user(regs->_esp, (u32 *)regs->rsp + 4))
119 goto exit_and_crash;
120 }
121 else
122 regs->_esp += 16;
124 /* No longer in NMI context. */
125 v->nmi_masked = 0;
127 /* Restore upcall mask from supplied EFLAGS.IF. */
128 vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
130 /*
131 * The hypercall exit path will overwrite EAX with this return
132 * value.
133 */
134 return regs->_eax;
136 exit_and_crash:
137 gdprintk(XENLOG_ERR, "Fatal error\n");
138 domain_crash(v->domain);
139 return 0;
140 }
142 static long compat_register_guest_callback(
143 struct compat_callback_register *reg)
144 {
145 long ret = 0;
146 struct vcpu *v = current;
148 fixup_guest_code_selector(v->domain, reg->address.cs);
150 switch ( reg->type )
151 {
152 case CALLBACKTYPE_event:
153 v->arch.guest_context.event_callback_cs = reg->address.cs;
154 v->arch.guest_context.event_callback_eip = reg->address.eip;
155 break;
157 case CALLBACKTYPE_failsafe:
158 v->arch.guest_context.failsafe_callback_cs = reg->address.cs;
159 v->arch.guest_context.failsafe_callback_eip = reg->address.eip;
160 if ( reg->flags & CALLBACKF_mask_events )
161 set_bit(_VGCF_failsafe_disables_events,
162 &v->arch.guest_context.flags);
163 else
164 clear_bit(_VGCF_failsafe_disables_events,
165 &v->arch.guest_context.flags);
166 break;
168 case CALLBACKTYPE_syscall32:
169 v->arch.syscall32_callback_cs = reg->address.cs;
170 v->arch.syscall32_callback_eip = reg->address.eip;
171 v->arch.syscall32_disables_events =
172 (reg->flags & CALLBACKF_mask_events) != 0;
173 break;
175 case CALLBACKTYPE_sysenter:
176 v->arch.sysenter_callback_cs = reg->address.cs;
177 v->arch.sysenter_callback_eip = reg->address.eip;
178 v->arch.sysenter_disables_events =
179 (reg->flags & CALLBACKF_mask_events) != 0;
180 break;
182 case CALLBACKTYPE_nmi:
183 ret = register_guest_nmi_callback(reg->address.eip);
184 break;
186 default:
187 ret = -ENOSYS;
188 break;
189 }
191 return ret;
192 }
194 static long compat_unregister_guest_callback(
195 struct compat_callback_unregister *unreg)
196 {
197 long ret;
199 switch ( unreg->type )
200 {
201 case CALLBACKTYPE_event:
202 case CALLBACKTYPE_failsafe:
203 case CALLBACKTYPE_syscall32:
204 case CALLBACKTYPE_sysenter:
205 ret = -EINVAL;
206 break;
208 case CALLBACKTYPE_nmi:
209 ret = unregister_guest_nmi_callback();
210 break;
212 default:
213 ret = -ENOSYS;
214 break;
215 }
217 return ret;
218 }
221 long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
222 {
223 long ret;
225 switch ( cmd )
226 {
227 case CALLBACKOP_register:
228 {
229 struct compat_callback_register reg;
231 ret = -EFAULT;
232 if ( copy_from_guest(&reg, arg, 1) )
233 break;
235 ret = compat_register_guest_callback(&reg);
236 }
237 break;
239 case CALLBACKOP_unregister:
240 {
241 struct compat_callback_unregister unreg;
243 ret = -EFAULT;
244 if ( copy_from_guest(&unreg, arg, 1) )
245 break;
247 ret = compat_unregister_guest_callback(&unreg);
248 }
249 break;
251 default:
252 ret = -EINVAL;
253 break;
254 }
256 return ret;
257 }
259 long compat_set_callbacks(unsigned long event_selector,
260 unsigned long event_address,
261 unsigned long failsafe_selector,
262 unsigned long failsafe_address)
263 {
264 struct compat_callback_register event = {
265 .type = CALLBACKTYPE_event,
266 .address = {
267 .cs = event_selector,
268 .eip = event_address
269 }
270 };
271 struct compat_callback_register failsafe = {
272 .type = CALLBACKTYPE_failsafe,
273 .address = {
274 .cs = failsafe_selector,
275 .eip = failsafe_address
276 }
277 };
279 compat_register_guest_callback(&event);
280 compat_register_guest_callback(&failsafe);
282 return 0;
283 }
285 DEFINE_XEN_GUEST_HANDLE(trap_info_compat_t);
287 int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
288 {
289 struct compat_trap_info cur;
290 struct trap_info *dst = current->arch.guest_context.trap_ctxt;
291 long rc = 0;
293 /* If no table is presented then clear the entire virtual IDT. */
294 if ( guest_handle_is_null(traps) )
295 {
296 memset(dst, 0, 256 * sizeof(*dst));
297 return 0;
298 }
300 for ( ; ; )
301 {
302 if ( hypercall_preempt_check() )
303 {
304 rc = hypercall_create_continuation(
305 __HYPERVISOR_set_trap_table, "h", traps);
306 break;
307 }
309 if ( copy_from_guest(&cur, traps, 1) )
310 {
311 rc = -EFAULT;
312 break;
313 }
315 if ( cur.address == 0 )
316 break;
318 fixup_guest_code_selector(current->domain, cur.cs);
320 XLAT_trap_info(dst + cur.vector, &cur);
322 if ( cur.vector == 0x80 )
323 init_int80_direct_trap(current);
325 guest_handle_add_offset(traps, 1);
326 }
328 return rc;
329 }
331 #endif /* CONFIG_COMPAT */
333 static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
334 {
335 char *p;
336 int i;
338 /* Fill in all the transfer points with template machine code. */
340 for ( i = 0; i < (PAGE_SIZE / 32); i++ )
341 {
342 p = (char *)(hypercall_page + (i * 32));
343 *(u8 *)(p+ 0) = 0xb8; /* mov $<i>,%eax */
344 *(u32 *)(p+ 1) = i;
345 *(u16 *)(p+ 5) = 0x82cd; /* int $0x82 */
346 *(u8 *)(p+ 7) = 0xc3; /* ret */
347 }
349 /*
350 * HYPERVISOR_iret is special because it doesn't return and expects a
351 * special stack frame. Guests jump at this transfer point instead of
352 * calling it.
353 */
354 p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
355 *(u8 *)(p+ 0) = 0x50; /* push %eax */
356 *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
357 *(u32 *)(p+ 2) = __HYPERVISOR_iret;
358 *(u16 *)(p+ 6) = 0x82cd; /* int $0x82 */
359 }
361 /*
362 * Local variables:
363 * mode: C
364 * c-set-style: "BSD"
365 * c-basic-offset: 4
366 * tab-width: 4
367 * indent-tabs-mode: nil
368 * End:
369 */