ia64/xen-unstable

view xen/arch/x86/x86_64/compat/traps.c @ 16185:42d8dadb5864

x86: Allow NMI callback CS to be specified via set_trap_table()
hypercall.
Based on a patch by Jan Beulich.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Mon Oct 22 13:04:32 2007 +0100 (2007-10-22)
parents ae3b1331ce19
children aeebd173c3fa
line source
1 #ifdef CONFIG_COMPAT
3 #include <xen/event.h>
4 #include <asm/regs.h>
5 #include <compat/callback.h>
6 #include <compat/arch-x86_32.h>
8 void compat_show_guest_stack(struct cpu_user_regs *regs, int debug_stack_lines)
9 {
10 unsigned int i, *stack, addr;
12 stack = (unsigned int *)(unsigned long)regs->_esp;
13 printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
15 for ( i = 0; i < debug_stack_lines * 8; i++ )
16 {
17 if ( (((long)stack + 3) & (STACK_SIZE - 4)) == 0 )
18 break;
19 if ( get_user(addr, stack) )
20 {
21 if ( i != 0 )
22 printk("\n ");
23 printk("Fault while accessing guest memory.");
24 i = 1;
25 break;
26 }
27 if ( (i != 0) && ((i % 8) == 0) )
28 printk("\n ");
29 printk(" %08x", addr);
30 stack++;
31 }
32 if ( i == 0 )
33 printk("Stack empty.");
34 printk("\n");
35 }
37 unsigned int compat_iret(void)
38 {
39 struct cpu_user_regs *regs = guest_cpu_user_regs();
40 struct vcpu *v = current;
41 u32 eflags;
43 /* Trim stack pointer to 32 bits. */
44 regs->rsp = (u32)regs->rsp;
46 /* Restore EAX (clobbered by hypercall). */
47 if ( unlikely(__get_user(regs->_eax, (u32 *)regs->rsp)) )
48 goto exit_and_crash;
50 /* Restore CS and EIP. */
51 if ( unlikely(__get_user(regs->_eip, (u32 *)regs->rsp + 1)) ||
52 unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
53 goto exit_and_crash;
55 /*
56 * Fix up and restore EFLAGS. We fix up in a local staging area
57 * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
58 */
59 if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
60 goto exit_and_crash;
61 regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
63 if ( unlikely(eflags & X86_EFLAGS_VM) )
64 {
65 /*
66 * Cannot return to VM86 mode: inject a GP fault instead. Note that
67 * the GP fault is reported on the first VM86 mode instruction, not on
68 * the IRET (which is why we can simply leave the stack frame as-is
69 * (except for perhaps having to copy it), which in turn seems better
70 * than teaching create_bounce_frame() to needlessly deal with vm86
71 * mode frames).
72 */
73 const struct trap_info *ti;
74 u32 x, ksp = v->arch.guest_context.kernel_sp - 40;
75 unsigned int i;
76 int rc = 0;
78 gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
79 regs->_esp, ksp);
80 if ( ksp < regs->_esp )
81 {
82 for (i = 1; i < 10; ++i)
83 {
84 rc |= __get_user(x, (u32 *)regs->rsp + i);
85 rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
86 }
87 }
88 else if ( ksp > regs->_esp )
89 {
90 for (i = 9; i > 0; ++i)
91 {
92 rc |= __get_user(x, (u32 *)regs->rsp + i);
93 rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
94 }
95 }
96 if ( rc )
97 goto exit_and_crash;
98 regs->_esp = ksp;
99 regs->ss = v->arch.guest_context.kernel_ss;
101 ti = &v->arch.guest_context.trap_ctxt[13];
102 if ( TI_GET_IF(ti) )
103 eflags &= ~X86_EFLAGS_IF;
104 regs->_eflags = eflags & ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
105 X86_EFLAGS_NT|X86_EFLAGS_TF);
107 if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
108 goto exit_and_crash;
109 regs->_eip = ti->address;
110 regs->cs = ti->cs;
111 }
112 else if ( unlikely(ring_0(regs)) )
113 goto exit_and_crash;
114 else if ( !ring_1(regs) )
115 {
116 /* Return to ring 2/3: restore ESP and SS. */
117 if ( __get_user(regs->ss, (u32 *)regs->rsp + 5)
118 || __get_user(regs->_esp, (u32 *)regs->rsp + 4))
119 goto exit_and_crash;
120 }
121 else
122 regs->_esp += 16;
124 /* No longer in NMI context. */
125 v->nmi_masked = 0;
127 /* Restore upcall mask from supplied EFLAGS.IF. */
128 vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
130 /*
131 * The hypercall exit path will overwrite EAX with this return
132 * value.
133 */
134 return regs->_eax;
136 exit_and_crash:
137 gdprintk(XENLOG_ERR, "Fatal error\n");
138 domain_crash(v->domain);
139 return 0;
140 }
142 static long compat_register_guest_callback(
143 struct compat_callback_register *reg)
144 {
145 long ret = 0;
146 struct vcpu *v = current;
148 fixup_guest_code_selector(v->domain, reg->address.cs);
150 switch ( reg->type )
151 {
152 case CALLBACKTYPE_event:
153 v->arch.guest_context.event_callback_cs = reg->address.cs;
154 v->arch.guest_context.event_callback_eip = reg->address.eip;
155 break;
157 case CALLBACKTYPE_failsafe:
158 v->arch.guest_context.failsafe_callback_cs = reg->address.cs;
159 v->arch.guest_context.failsafe_callback_eip = reg->address.eip;
160 if ( reg->flags & CALLBACKF_mask_events )
161 set_bit(_VGCF_failsafe_disables_events,
162 &v->arch.guest_context.flags);
163 else
164 clear_bit(_VGCF_failsafe_disables_events,
165 &v->arch.guest_context.flags);
166 break;
168 case CALLBACKTYPE_nmi:
169 ret = register_guest_nmi_callback(reg->address.eip);
170 break;
172 default:
173 ret = -EINVAL;
174 break;
175 }
177 return ret;
178 }
180 static long compat_unregister_guest_callback(
181 struct compat_callback_unregister *unreg)
182 {
183 long ret;
185 switch ( unreg->type )
186 {
187 case CALLBACKTYPE_nmi:
188 ret = unregister_guest_nmi_callback();
189 break;
191 default:
192 ret = -EINVAL;
193 break;
194 }
196 return ret;
197 }
200 long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
201 {
202 long ret;
204 switch ( cmd )
205 {
206 case CALLBACKOP_register:
207 {
208 struct compat_callback_register reg;
210 ret = -EFAULT;
211 if ( copy_from_guest(&reg, arg, 1) )
212 break;
214 ret = compat_register_guest_callback(&reg);
215 }
216 break;
218 case CALLBACKOP_unregister:
219 {
220 struct compat_callback_unregister unreg;
222 ret = -EFAULT;
223 if ( copy_from_guest(&unreg, arg, 1) )
224 break;
226 ret = compat_unregister_guest_callback(&unreg);
227 }
228 break;
230 default:
231 ret = -EINVAL;
232 break;
233 }
235 return ret;
236 }
238 long compat_set_callbacks(unsigned long event_selector,
239 unsigned long event_address,
240 unsigned long failsafe_selector,
241 unsigned long failsafe_address)
242 {
243 struct compat_callback_register event = {
244 .type = CALLBACKTYPE_event,
245 .address = {
246 .cs = event_selector,
247 .eip = event_address
248 }
249 };
250 struct compat_callback_register failsafe = {
251 .type = CALLBACKTYPE_failsafe,
252 .address = {
253 .cs = failsafe_selector,
254 .eip = failsafe_address
255 }
256 };
258 compat_register_guest_callback(&event);
259 compat_register_guest_callback(&failsafe);
261 return 0;
262 }
264 DEFINE_XEN_GUEST_HANDLE(trap_info_compat_t);
266 int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
267 {
268 struct compat_trap_info cur;
269 struct trap_info *dst = current->arch.guest_context.trap_ctxt;
270 long rc = 0;
272 /* If no table is presented then clear the entire virtual IDT. */
273 if ( guest_handle_is_null(traps) )
274 {
275 memset(dst, 0, 256 * sizeof(*dst));
276 return 0;
277 }
279 for ( ; ; )
280 {
281 if ( hypercall_preempt_check() )
282 {
283 rc = hypercall_create_continuation(
284 __HYPERVISOR_set_trap_table, "h", traps);
285 break;
286 }
288 if ( copy_from_guest(&cur, traps, 1) )
289 {
290 rc = -EFAULT;
291 break;
292 }
294 if ( cur.address == 0 )
295 break;
297 if ( (cur.vector == TRAP_nmi) && !TI_GET_IF(&cur) )
298 {
299 rc = -EINVAL;
300 break;
301 }
303 fixup_guest_code_selector(current->domain, cur.cs);
305 XLAT_trap_info(dst + cur.vector, &cur);
307 if ( cur.vector == 0x80 )
308 init_int80_direct_trap(current);
310 guest_handle_add_offset(traps, 1);
311 }
313 return rc;
314 }
316 #endif /* CONFIG_COMPAT */
318 static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
319 {
320 char *p;
321 int i;
323 /* Fill in all the transfer points with template machine code. */
325 for ( i = 0; i < (PAGE_SIZE / 32); i++ )
326 {
327 p = (char *)(hypercall_page + (i * 32));
328 *(u8 *)(p+ 0) = 0xb8; /* mov $<i>,%eax */
329 *(u32 *)(p+ 1) = i;
330 *(u16 *)(p+ 5) = 0x82cd; /* int $0x82 */
331 *(u8 *)(p+ 7) = 0xc3; /* ret */
332 }
334 /*
335 * HYPERVISOR_iret is special because it doesn't return and expects a
336 * special stack frame. Guests jump at this transfer point instead of
337 * calling it.
338 */
339 p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
340 *(u8 *)(p+ 0) = 0x50; /* push %eax */
341 *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
342 *(u32 *)(p+ 2) = __HYPERVISOR_iret;
343 *(u16 *)(p+ 6) = 0x82cd; /* int $0x82 */
344 }
346 /*
347 * Local variables:
348 * mode: C
349 * c-set-style: "BSD"
350 * c-basic-offset: 4
351 * tab-width: 4
352 * indent-tabs-mode: nil
353 * End:
354 */