ia64/xen-unstable

view linux-2.4.27-xen-sparse/arch/xen/kernel/traps.c @ 2621:9402048e2325

bitkeeper revision 1.1159.1.218 (416a8128OiHXHyk_Sy8FsA0YUQcEnA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-26dom0
into freefall.cl.cam.ac.uk:/local/scratch/cl349/xeno.bk-26dom0
author cl349@freefall.cl.cam.ac.uk
date Mon Oct 11 12:48:40 2004 +0000 (2004-10-11)
parents c326283ef029
children 4c4ec1d8c1f1 f0fe276ae088
line source
1 /*
2 * linux/arch/i386/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
10 /*
11 * 'Traps.c' handles hardware traps and faults after we have saved some
12 * state in 'asm.s'.
13 */
14 #include <linux/config.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/mm.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/highmem.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/atomic.h>
32 #include <asm/debugreg.h>
33 #include <asm/desc.h>
34 #include <asm/i387.h>
36 #include <asm/smp.h>
37 #include <asm/pgalloc.h>
39 #include <asm/hypervisor.h>
41 #include <linux/irq.h>
42 #include <linux/module.h>
44 asmlinkage int system_call(void);
45 asmlinkage void lcall7(void);
46 asmlinkage void lcall27(void);
48 asmlinkage void divide_error(void);
49 asmlinkage void debug(void);
50 asmlinkage void int3(void);
51 asmlinkage void overflow(void);
52 asmlinkage void bounds(void);
53 asmlinkage void invalid_op(void);
54 asmlinkage void device_not_available(void);
55 asmlinkage void double_fault(void);
56 asmlinkage void coprocessor_segment_overrun(void);
57 asmlinkage void invalid_TSS(void);
58 asmlinkage void segment_not_present(void);
59 asmlinkage void stack_segment(void);
60 asmlinkage void general_protection(void);
61 asmlinkage void page_fault(void);
62 asmlinkage void safe_page_fault(void);
63 asmlinkage void coprocessor_error(void);
64 asmlinkage void simd_coprocessor_error(void);
65 asmlinkage void alignment_check(void);
66 asmlinkage void fixup_4gb_segment(void);
67 asmlinkage void machine_check(void);
69 int kstack_depth_to_print = 24;
72 /*
73 * If the address is either in the .text section of the
74 * kernel, or in the vmalloc'ed module regions, it *may*
75 * be the address of a calling routine
76 */
78 #ifdef CONFIG_MODULES
80 extern struct module *module_list;
81 extern struct module kernel_module;
83 static inline int kernel_text_address(unsigned long addr)
84 {
85 int retval = 0;
86 struct module *mod;
88 if (addr >= (unsigned long) &_stext &&
89 addr <= (unsigned long) &_etext)
90 return 1;
92 for (mod = module_list; mod != &kernel_module; mod = mod->next) {
93 /* mod_bound tests for addr being inside the vmalloc'ed
94 * module area. Of course it'd be better to test only
95 * for the .text subset... */
96 if (mod_bound(addr, 0, mod)) {
97 retval = 1;
98 break;
99 }
100 }
102 return retval;
103 }
105 #else
107 static inline int kernel_text_address(unsigned long addr)
108 {
109 return (addr >= (unsigned long) &_stext &&
110 addr <= (unsigned long) &_etext);
111 }
113 #endif
115 void show_trace(unsigned long * stack)
116 {
117 int i;
118 unsigned long addr;
120 if (!stack)
121 stack = (unsigned long*)&stack;
123 printk("Call Trace: ");
124 i = 1;
125 while (((long) stack & (THREAD_SIZE-1)) != 0) {
126 addr = *stack++;
127 if (kernel_text_address(addr)) {
128 if (i && ((i % 6) == 0))
129 printk("\n ");
130 printk("[<%08lx>] ", addr);
131 i++;
132 }
133 }
134 printk("\n");
135 }
137 void show_trace_task(struct task_struct *tsk)
138 {
139 unsigned long esp = tsk->thread.esp;
141 /* User space on another CPU? */
142 if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1))
143 return;
144 show_trace((unsigned long *)esp);
145 }
147 void show_stack(unsigned long * esp)
148 {
149 unsigned long *stack;
150 int i;
152 // debugging aid: "show_stack(NULL);" prints the
153 // back trace for this cpu.
155 if(esp==NULL)
156 esp=(unsigned long*)&esp;
158 stack = esp;
159 for(i=0; i < kstack_depth_to_print; i++) {
160 if (((long) stack & (THREAD_SIZE-1)) == 0)
161 break;
162 if (i && ((i % 8) == 0))
163 printk("\n ");
164 printk("%08lx ", *stack++);
165 }
166 printk("\n");
167 show_trace(esp);
168 }
170 void show_registers(struct pt_regs *regs)
171 {
172 int in_kernel = 1;
173 unsigned long esp;
174 unsigned short ss;
176 esp = (unsigned long) (&regs->esp);
177 ss = __KERNEL_DS;
178 if (regs->xcs & 2) {
179 in_kernel = 0;
180 esp = regs->esp;
181 ss = regs->xss & 0xffff;
182 }
183 printk(KERN_ALERT "CPU: %d\n", smp_processor_id() );
184 printk(KERN_ALERT "EIP: %04x:[<%08lx>] %s\n",
185 0xffff & regs->xcs, regs->eip, print_tainted());
186 printk(KERN_ALERT "EFLAGS: %08lx\n",regs->eflags);
187 printk(KERN_ALERT "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
188 regs->eax, regs->ebx, regs->ecx, regs->edx);
189 printk(KERN_ALERT "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
190 regs->esi, regs->edi, regs->ebp, esp);
191 printk(KERN_ALERT "ds: %04x es: %04x ss: %04x\n",
192 regs->xds & 0xffff, regs->xes & 0xffff, ss);
193 printk(KERN_ALERT "Process %s (pid: %d, stackpage=%08lx)",
194 current->comm, current->pid, 4096+(unsigned long)current);
195 /*
196 * When in-kernel, we also print out the stack and code at the
197 * time of the fault..
198 */
199 if (in_kernel) {
201 printk(KERN_ALERT "\nStack: ");
202 show_stack((unsigned long*)esp);
204 #if 0
205 {
206 int i;
207 printk(KERN_ALERT "\nCode: ");
208 if(regs->eip < PAGE_OFFSET)
209 goto bad;
211 for(i=0;i<20;i++)
212 {
213 unsigned char c;
214 if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
215 bad:
216 printk(KERN_ALERT " Bad EIP value.");
217 break;
218 }
219 printk("%02x ", c);
220 }
221 }
222 #endif
223 }
224 printk(KERN_ALERT "\n");
225 }
227 spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
229 void die(const char * str, struct pt_regs * regs, long err)
230 {
231 console_verbose();
232 spin_lock_irq(&die_lock);
233 bust_spinlocks(1);
234 printk("%s: %04lx\n", str, err & 0xffff);
235 show_registers(regs);
236 bust_spinlocks(0);
237 spin_unlock_irq(&die_lock);
238 do_exit(SIGSEGV);
239 }
241 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
242 {
243 if (!(2 & regs->xcs))
244 die(str, regs, err);
245 }
248 static void inline do_trap(int trapnr, int signr, char *str,
249 struct pt_regs * regs, long error_code,
250 siginfo_t *info)
251 {
252 if (!(regs->xcs & 2))
253 goto kernel_trap;
255 /*trap_signal:*/ {
256 struct task_struct *tsk = current;
257 tsk->thread.error_code = error_code;
258 tsk->thread.trap_no = trapnr;
259 if (info)
260 force_sig_info(signr, info, tsk);
261 else
262 force_sig(signr, tsk);
263 return;
264 }
266 kernel_trap: {
267 unsigned long fixup = search_exception_table(regs->eip);
268 if (fixup)
269 regs->eip = fixup;
270 else
271 die(str, regs, error_code);
272 return;
273 }
274 }
276 #define DO_ERROR(trapnr, signr, str, name) \
277 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
278 { \
279 do_trap(trapnr, signr, str, regs, error_code, NULL); \
280 }
282 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
283 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
284 { \
285 siginfo_t info; \
286 info.si_signo = signr; \
287 info.si_errno = 0; \
288 info.si_code = sicode; \
289 info.si_addr = (void *)siaddr; \
290 do_trap(trapnr, signr, str, regs, error_code, &info); \
291 }
293 DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
294 DO_ERROR( 3, SIGTRAP, "int3", int3)
295 DO_ERROR( 4, SIGSEGV, "overflow", overflow)
296 DO_ERROR( 5, SIGSEGV, "bounds", bounds)
297 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
298 DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
299 DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
300 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
301 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
302 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
303 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
304 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
305 DO_ERROR(18, SIGBUS, "machine check", machine_check)
307 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
308 {
309 /*
310 * If we trapped on an LDT access then ensure that the default_ldt is
311 * loaded, if nothing else. We load default_ldt lazily because LDT
312 * switching costs time and many applications don't need it.
313 */
314 if ( unlikely((error_code & 6) == 4) )
315 {
316 unsigned long ldt;
317 __asm__ __volatile__ ( "sldt %0" : "=r" (ldt) );
318 if ( ldt == 0 )
319 {
320 mmu_update_t u;
321 u.ptr = MMU_EXTENDED_COMMAND;
322 u.ptr |= (unsigned long)&default_ldt[0];
323 u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
324 if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0) )
325 {
326 show_trace(NULL);
327 panic("Failed to install default LDT");
328 }
329 return;
330 }
331 }
333 if (!(regs->xcs & 2))
334 goto gp_in_kernel;
336 current->thread.error_code = error_code;
337 current->thread.trap_no = 13;
338 force_sig(SIGSEGV, current);
339 return;
341 gp_in_kernel:
342 {
343 unsigned long fixup;
344 fixup = search_exception_table(regs->eip);
345 if (fixup) {
346 regs->eip = fixup;
347 return;
348 }
349 die("general protection fault", regs, error_code);
350 }
351 }
354 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
355 {
356 unsigned int condition;
357 struct task_struct *tsk = current;
358 siginfo_t info;
360 condition = HYPERVISOR_get_debugreg(6);
362 /* Mask out spurious debug traps due to lazy DR7 setting */
363 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
364 if (!tsk->thread.debugreg[7])
365 goto clear_dr7;
366 }
368 /* Save debug status register where ptrace can see it */
369 tsk->thread.debugreg[6] = condition;
371 /* Mask out spurious TF errors due to lazy TF clearing */
372 if (condition & DR_STEP) {
373 /*
374 * The TF error should be masked out only if the current
375 * process is not traced and if the TRAP flag has been set
376 * previously by a tracing process (condition detected by
377 * the PT_DTRACE flag); remember that the i386 TRAP flag
378 * can be modified by the process itself in user mode,
379 * allowing programs to debug themselves without the ptrace()
380 * interface.
381 */
382 if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
383 goto clear_TF;
384 }
386 /* Ok, finally something we can handle */
387 tsk->thread.trap_no = 1;
388 tsk->thread.error_code = error_code;
389 info.si_signo = SIGTRAP;
390 info.si_errno = 0;
391 info.si_code = TRAP_BRKPT;
393 /* If this is a kernel mode trap, save the user PC on entry to
394 * the kernel, that's what the debugger can make sense of.
395 */
396 info.si_addr = ((regs->xcs & 2) == 0) ? (void *)tsk->thread.eip :
397 (void *)regs->eip;
398 force_sig_info(SIGTRAP, &info, tsk);
400 /* Disable additional traps. They'll be re-enabled when
401 * the signal is delivered.
402 */
403 clear_dr7:
404 HYPERVISOR_set_debugreg(7, 0);
405 return;
407 clear_TF:
408 regs->eflags &= ~TF_MASK;
409 return;
410 }
413 /*
414 * Note that we play around with the 'TS' bit in an attempt to get
415 * the correct behaviour even in the presence of the asynchronous
416 * IRQ13 behaviour
417 */
418 void math_error(void *eip)
419 {
420 struct task_struct * task;
421 siginfo_t info;
422 unsigned short cwd, swd;
424 /*
425 * Save the info for the exception handler and clear the error.
426 */
427 task = current;
428 save_init_fpu(task);
429 task->thread.trap_no = 16;
430 task->thread.error_code = 0;
431 info.si_signo = SIGFPE;
432 info.si_errno = 0;
433 info.si_code = __SI_FAULT;
434 info.si_addr = eip;
435 /*
436 * (~cwd & swd) will mask out exceptions that are not set to unmasked
437 * status. 0x3f is the exception bits in these regs, 0x200 is the
438 * C1 reg you need in case of a stack fault, 0x040 is the stack
439 * fault bit. We should only be taking one exception at a time,
440 * so if this combination doesn't produce any single exception,
441 * then we have a bad program that isn't syncronizing its FPU usage
442 * and it will suffer the consequences since we won't be able to
443 * fully reproduce the context of the exception
444 */
445 cwd = get_fpu_cwd(task);
446 swd = get_fpu_swd(task);
447 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
448 case 0x000:
449 default:
450 break;
451 case 0x001: /* Invalid Op */
452 case 0x041: /* Stack Fault */
453 case 0x241: /* Stack Fault | Direction */
454 info.si_code = FPE_FLTINV;
455 break;
456 case 0x002: /* Denormalize */
457 case 0x010: /* Underflow */
458 info.si_code = FPE_FLTUND;
459 break;
460 case 0x004: /* Zero Divide */
461 info.si_code = FPE_FLTDIV;
462 break;
463 case 0x008: /* Overflow */
464 info.si_code = FPE_FLTOVF;
465 break;
466 case 0x020: /* Precision */
467 info.si_code = FPE_FLTRES;
468 break;
469 }
470 force_sig_info(SIGFPE, &info, task);
471 }
473 asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
474 {
475 ignore_irq13 = 1;
476 math_error((void *)regs->eip);
477 }
479 void simd_math_error(void *eip)
480 {
481 struct task_struct * task;
482 siginfo_t info;
483 unsigned short mxcsr;
485 /*
486 * Save the info for the exception handler and clear the error.
487 */
488 task = current;
489 save_init_fpu(task);
490 task->thread.trap_no = 19;
491 task->thread.error_code = 0;
492 info.si_signo = SIGFPE;
493 info.si_errno = 0;
494 info.si_code = __SI_FAULT;
495 info.si_addr = eip;
496 /*
497 * The SIMD FPU exceptions are handled a little differently, as there
498 * is only a single status/control register. Thus, to determine which
499 * unmasked exception was caught we must mask the exception mask bits
500 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
501 */
502 mxcsr = get_fpu_mxcsr(task);
503 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
504 case 0x000:
505 default:
506 break;
507 case 0x001: /* Invalid Op */
508 info.si_code = FPE_FLTINV;
509 break;
510 case 0x002: /* Denormalize */
511 case 0x010: /* Underflow */
512 info.si_code = FPE_FLTUND;
513 break;
514 case 0x004: /* Zero Divide */
515 info.si_code = FPE_FLTDIV;
516 break;
517 case 0x008: /* Overflow */
518 info.si_code = FPE_FLTOVF;
519 break;
520 case 0x020: /* Precision */
521 info.si_code = FPE_FLTRES;
522 break;
523 }
524 force_sig_info(SIGFPE, &info, task);
525 }
527 asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
528 long error_code)
529 {
530 if (cpu_has_xmm) {
531 /* Handle SIMD FPU exceptions on PIII+ processors. */
532 ignore_irq13 = 1;
533 simd_math_error((void *)regs->eip);
534 } else {
535 die_if_kernel("cache flush denied", regs, error_code);
536 current->thread.trap_no = 19;
537 current->thread.error_code = error_code;
538 force_sig(SIGSEGV, current);
539 }
540 }
542 /*
543 * 'math_state_restore()' saves the current math information in the
544 * old math state array, and gets the new ones from the current task
545 *
546 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
547 * Don't touch unless you *really* know how it works.
548 */
549 asmlinkage void math_state_restore(struct pt_regs regs)
550 {
551 /*
552 * A trap in kernel mode can be ignored. It'll be the fast XOR or
553 * copying libraries, which will correctly save/restore state and
554 * reset the TS bit in CR0.
555 */
556 if ( (regs.xcs & 2) == 0 )
557 return;
559 if (current->used_math) {
560 restore_fpu(current);
561 } else {
562 init_fpu();
563 }
564 current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */
565 }
568 #define _set_gate(gate_addr,type,dpl,addr) \
569 do { \
570 int __d0, __d1; \
571 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
572 "movw %4,%%dx\n\t" \
573 "movl %%eax,%0\n\t" \
574 "movl %%edx,%1" \
575 :"=m" (*((long *) (gate_addr))), \
576 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
577 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
578 "3" ((char *) (addr)),"2" (__KERNEL_CS << 16)); \
579 } while (0)
581 static void __init set_call_gate(void *a, void *addr)
582 {
583 _set_gate(a,12,3,addr);
584 }
587 /* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
588 static trap_info_t trap_table[] = {
589 { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
590 { 1, 0, __KERNEL_CS, (unsigned long)debug },
591 { 3, 3, __KERNEL_CS, (unsigned long)int3 },
592 { 4, 3, __KERNEL_CS, (unsigned long)overflow },
593 { 5, 3, __KERNEL_CS, (unsigned long)bounds },
594 { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
595 { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
596 { 8, 0, __KERNEL_CS, (unsigned long)double_fault },
597 { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
598 { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
599 { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
600 { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
601 { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
602 { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
603 { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
604 { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
605 { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
606 { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
607 { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
608 { SYSCALL_VECTOR,
609 3, __KERNEL_CS, (unsigned long)system_call },
610 { 0, 0, 0, 0 }
611 };
614 void __init trap_init(void)
615 {
616 HYPERVISOR_set_trap_table(trap_table);
617 HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
619 /*
620 * The default LDT is a single-entry callgate to lcall7 for iBCS and a
621 * callgate to lcall27 for Solaris/x86 binaries.
622 */
623 clear_page(&default_ldt[0]);
624 set_call_gate(&default_ldt[0],lcall7);
625 set_call_gate(&default_ldt[4],lcall27);
626 __make_page_readonly(&default_ldt[0]);
628 cpu_init();
629 }
632 /*
633 * install_safe_pf_handler / install_normal_pf_handler:
634 *
635 * These are used within the failsafe_callback handler in entry.S to avoid
636 * taking a full page fault when reloading FS and GS. This is because FS and
637 * GS could be invalid at pretty much any point while Xenolinux executes (we
638 * don't set them to safe values on entry to the kernel). At *any* point Xen
639 * may be entered due to a hardware interrupt --- on exit from Xen an invalid
640 * FS/GS will cause our failsafe_callback to be executed. This could occur,
641 * for example, while the mmu_update_queue is in an inconsistent state. This
642 * is disastrous because the normal page-fault handler touches the update
643 * queue!
644 *
645 * Fortunately, within the failsafe handler it is safe to force DS/ES/FS/GS
646 * to zero if they cannot be reloaded -- at this point executing a normal
647 * page fault would not change this effect. The safe page-fault handler
648 * ensures this end result (blow away the selector value) without the dangers
649 * of the normal page-fault handler.
650 *
651 * NB. Perhaps this can all go away after we have implemented writable
652 * page tables. :-)
653 */
655 asmlinkage void do_safe_page_fault(struct pt_regs *regs,
656 unsigned long error_code,
657 unsigned long address)
658 {
659 unsigned long fixup;
661 if ( (fixup = search_exception_table(regs->eip)) != 0 )
662 {
663 regs->eip = fixup;
664 return;
665 }
667 die("Unhandleable 'safe' page fault!", regs, error_code);
668 }
670 unsigned long install_safe_pf_handler(void)
671 {
672 static trap_info_t safe_pf[] = {
673 { 14, 0, __KERNEL_CS, (unsigned long)safe_page_fault },
674 { 0, 0, 0, 0 }
675 };
676 unsigned long flags;
677 local_irq_save(flags);
678 HYPERVISOR_set_trap_table(safe_pf);
679 return flags; /* This is returned in %%eax */
680 }
682 __attribute__((regparm(3))) /* This function take its arg in %%eax */
683 void install_normal_pf_handler(unsigned long flags)
684 {
685 static trap_info_t normal_pf[] = {
686 { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
687 { 0, 0, 0, 0 }
688 };
689 HYPERVISOR_set_trap_table(normal_pf);
690 local_irq_restore(flags);
691 }