ia64/xen-unstable

view xen/arch/x86/traps.c @ 3650:8c6281ec8860

bitkeeper revision 1.1159.212.87 (4203b25aTB_XSOA2G0yxgrj0ey-vIA)

Defined per-CPU SYSCALL entry point for hypercalls. We enter the DOM0
kernel and can receive hypercalls. Now probably need to fix the
user-access (uaccess.h) functions and macros.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Fri Feb 04 17:35:22 2005 +0000 (2005-02-04)
parents d55d523078f7
children 393483ae9f62 ed6875102ea3 e8ee717f2423
line source
1 /******************************************************************************
2 * arch/x86/traps.c
3 *
4 * Modifications to Linux original are copyright (c) 2002-2004, K A Fraser
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Copyright (C) 1991, 1992 Linus Torvalds
23 *
24 * Pentium III FXSR, SSE support
25 * Gareth Hughes <gareth@valinux.com>, May 2000
26 */
28 #include <xen/config.h>
29 #include <xen/init.h>
30 #include <xen/sched.h>
31 #include <xen/lib.h>
32 #include <xen/errno.h>
33 #include <xen/mm.h>
34 #include <xen/console.h>
35 #include <asm/regs.h>
36 #include <xen/delay.h>
37 #include <xen/event.h>
38 #include <xen/spinlock.h>
39 #include <xen/irq.h>
40 #include <xen/perfc.h>
41 #include <xen/softirq.h>
42 #include <asm/shadow.h>
43 #include <asm/domain_page.h>
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/atomic.h>
47 #include <asm/desc.h>
48 #include <asm/debugreg.h>
49 #include <asm/smp.h>
50 #include <asm/flushtlb.h>
51 #include <asm/uaccess.h>
52 #include <asm/i387.h>
53 #include <asm/debugger.h>
54 #include <asm/msr.h>
56 /*
57 * opt_nmi: one of 'ignore', 'dom0', or 'fatal'.
58 * fatal: Xen prints diagnostic message and then hangs.
59 * dom0: The NMI is virtualised to DOM0.
60 * ignore: The NMI error is cleared and ignored.
61 */
62 #ifdef NDEBUG
63 char opt_nmi[10] = "dom0";
64 #else
65 char opt_nmi[10] = "fatal";
66 #endif
67 string_param("nmi", opt_nmi);
69 asmlinkage int hypercall(void);
71 /* Master table, and the one used by CPU0. */
72 idt_entry_t idt_table[IDT_ENTRIES] = { {0, 0}, };
73 /* All other CPUs have their own copy. */
74 idt_entry_t *idt_tables[NR_CPUS] = { 0 };
76 asmlinkage void divide_error(void);
77 asmlinkage void debug(void);
78 asmlinkage void nmi(void);
79 asmlinkage void int3(void);
80 asmlinkage void overflow(void);
81 asmlinkage void bounds(void);
82 asmlinkage void invalid_op(void);
83 asmlinkage void device_not_available(void);
84 asmlinkage void coprocessor_segment_overrun(void);
85 asmlinkage void invalid_TSS(void);
86 asmlinkage void segment_not_present(void);
87 asmlinkage void stack_segment(void);
88 asmlinkage void general_protection(void);
89 asmlinkage void page_fault(void);
90 asmlinkage void coprocessor_error(void);
91 asmlinkage void simd_coprocessor_error(void);
92 asmlinkage void alignment_check(void);
93 asmlinkage void spurious_interrupt_bug(void);
94 asmlinkage void machine_check(void);
96 /*
97 * This is called for faults at very unexpected times (e.g., when interrupts
98 * are disabled). In such situations we can't do much that is safe. We try to
99 * print out some tracing and then we just spin.
100 */
101 asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs)
102 {
103 int cpu = smp_processor_id();
104 unsigned long cr2;
105 static char *trapstr[] = {
106 "divide error", "debug", "nmi", "bkpt", "overflow", "bounds",
107 "invalid operation", "device not available", "double fault",
108 "coprocessor segment", "invalid tss", "segment not found",
109 "stack error", "general protection fault", "page fault",
110 "spurious interrupt", "coprocessor error", "alignment check",
111 "machine check", "simd error"
112 };
114 show_registers(regs);
116 if ( trapnr == TRAP_page_fault )
117 {
118 __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (cr2) : );
119 printk("Faulting linear address might be %0lx %lx\n", cr2, cr2);
120 }
122 printk("************************************\n");
123 printk("CPU%d FATAL TRAP %d (%s), ERROR_CODE %04x%s.\n",
124 cpu, trapnr, trapstr[trapnr], regs->error_code,
125 (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
126 printk("System shutting down -- need manual reset.\n");
127 printk("************************************\n");
129 /* Lock up the console to prevent spurious output from other CPUs. */
130 console_force_lock();
132 /* Wait for manual reset. */
133 for ( ; ; )
134 __asm__ __volatile__ ( "hlt" );
135 }
137 static inline int do_trap(int trapnr, char *str,
138 struct xen_regs *regs,
139 int use_error_code)
140 {
141 struct exec_domain *ed = current;
142 struct trap_bounce *tb = &ed->thread.trap_bounce;
143 trap_info_t *ti;
144 unsigned long fixup;
146 DEBUGGER_trap_entry(trapnr, regs);
148 if ( !GUEST_FAULT(regs) )
149 goto xen_fault;
151 ti = current->thread.traps + trapnr;
152 tb->flags = TBF_EXCEPTION;
153 tb->cs = ti->cs;
154 tb->eip = ti->address;
155 if ( use_error_code )
156 {
157 tb->flags |= TBF_EXCEPTION_ERRCODE;
158 tb->error_code = regs->error_code;
159 }
160 if ( TI_GET_IF(ti) )
161 ed->vcpu_info->evtchn_upcall_mask = 1;
162 return 0;
164 xen_fault:
166 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
167 {
168 DPRINTK("Trap %d: %p -> %p\n", trapnr, regs->eip, fixup);
169 regs->eip = fixup;
170 return 0;
171 }
173 DEBUGGER_trap_fatal(trapnr, regs);
175 show_registers(regs);
176 panic("CPU%d FATAL TRAP: vector = %d (%s)\n"
177 "[error_code=%04x]\n",
178 smp_processor_id(), trapnr, str, regs->error_code);
179 return 0;
180 }
182 #define DO_ERROR_NOCODE(trapnr, str, name) \
183 asmlinkage int do_##name(struct xen_regs *regs) \
184 { \
185 return do_trap(trapnr, str, regs, 0); \
186 }
188 #define DO_ERROR(trapnr, str, name) \
189 asmlinkage int do_##name(struct xen_regs *regs) \
190 { \
191 return do_trap(trapnr, str, regs, 1); \
192 }
194 DO_ERROR_NOCODE( 0, "divide error", divide_error)
195 DO_ERROR_NOCODE( 4, "overflow", overflow)
196 DO_ERROR_NOCODE( 5, "bounds", bounds)
197 DO_ERROR_NOCODE( 6, "invalid operand", invalid_op)
198 DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
199 DO_ERROR(10, "invalid TSS", invalid_TSS)
200 DO_ERROR(11, "segment not present", segment_not_present)
201 DO_ERROR(12, "stack segment", stack_segment)
202 DO_ERROR_NOCODE(16, "fpu error", coprocessor_error)
203 DO_ERROR(17, "alignment check", alignment_check)
204 DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
206 asmlinkage int do_int3(struct xen_regs *regs)
207 {
208 struct exec_domain *ed = current;
209 struct trap_bounce *tb = &ed->thread.trap_bounce;
210 trap_info_t *ti;
212 DEBUGGER_trap_entry(TRAP_int3, regs);
214 if ( !GUEST_FAULT(regs) )
215 {
216 DEBUGGER_trap_fatal(TRAP_int3, regs);
217 show_registers(regs);
218 panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id());
219 }
221 ti = current->thread.traps + 3;
222 tb->flags = TBF_EXCEPTION;
223 tb->cs = ti->cs;
224 tb->eip = ti->address;
225 if ( TI_GET_IF(ti) )
226 ed->vcpu_info->evtchn_upcall_mask = 1;
228 return 0;
229 }
231 asmlinkage void do_machine_check(struct xen_regs *regs)
232 {
233 fatal_trap(TRAP_machine_check, regs);
234 }
236 void propagate_page_fault(unsigned long addr, u16 error_code)
237 {
238 trap_info_t *ti;
239 struct exec_domain *ed = current;
240 struct trap_bounce *tb = &ed->thread.trap_bounce;
242 ti = ed->thread.traps + 14;
243 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
244 tb->cr2 = addr;
245 tb->error_code = error_code;
246 tb->cs = ti->cs;
247 tb->eip = ti->address;
248 if ( TI_GET_IF(ti) )
249 ed->vcpu_info->evtchn_upcall_mask = 1;
251 ed->mm.guest_cr2 = addr;
252 }
254 asmlinkage int do_page_fault(struct xen_regs *regs)
255 {
256 unsigned long off, addr, fixup;
257 struct exec_domain *ed = current;
258 struct domain *d = ed->domain;
259 extern int map_ldt_shadow_page(unsigned int);
260 int cpu = ed->processor;
261 int ret;
263 __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
265 DEBUGGER_trap_entry(TRAP_page_fault, regs);
267 perfc_incrc(page_faults);
269 if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
270 {
271 LOCK_BIGLOCK(d);
272 if ( unlikely(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) &&
273 unlikely((addr >> L2_PAGETABLE_SHIFT) ==
274 ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx) )
275 {
276 ptwr_flush(PTWR_PT_ACTIVE);
277 UNLOCK_BIGLOCK(d);
278 return EXCRET_fault_fixed;
279 }
281 if ( (addr < PAGE_OFFSET) &&
282 ((regs->error_code & 3) == 3) && /* write-protection fault */
283 ptwr_do_page_fault(addr) )
284 {
285 if ( unlikely(ed->mm.shadow_mode) )
286 (void)shadow_fault(addr, regs->error_code);
287 UNLOCK_BIGLOCK(d);
288 return EXCRET_fault_fixed;
289 }
290 UNLOCK_BIGLOCK(d);
291 }
293 if ( unlikely(ed->mm.shadow_mode) &&
294 (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) )
295 return EXCRET_fault_fixed;
297 if ( unlikely(addr >= LDT_VIRT_START(ed)) &&
298 (addr < (LDT_VIRT_START(ed) + (ed->mm.ldt_ents*LDT_ENTRY_SIZE))) )
299 {
300 /*
301 * Copy a mapping from the guest's LDT, if it is valid. Otherwise we
302 * send the fault up to the guest OS to be handled.
303 */
304 LOCK_BIGLOCK(d);
305 off = addr - LDT_VIRT_START(ed);
306 addr = ed->mm.ldt_base + off;
307 ret = map_ldt_shadow_page(off >> PAGE_SHIFT);
308 UNLOCK_BIGLOCK(d);
309 if ( likely(ret) )
310 return EXCRET_fault_fixed; /* successfully copied the mapping */
311 }
313 if ( !GUEST_FAULT(regs) )
314 goto xen_fault;
316 propagate_page_fault(addr, regs->error_code);
317 return 0;
319 xen_fault:
321 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
322 {
323 perfc_incrc(copy_user_faults);
324 if ( !ed->mm.shadow_mode )
325 DPRINTK("Page fault: %p -> %p\n", regs->eip, fixup);
326 regs->eip = fixup;
327 return 0;
328 }
330 DEBUGGER_trap_fatal(TRAP_page_fault, regs);
332 show_registers(regs);
333 show_page_walk(addr);
334 panic("CPU%d FATAL PAGE FAULT\n"
335 "[error_code=%04x]\n"
336 "Faulting linear address might be %p\n",
337 smp_processor_id(), regs->error_code, addr);
338 return 0;
339 }
341 static int emulate_privileged_op(struct xen_regs *regs)
342 {
343 extern long do_fpu_taskswitch(void);
344 extern void *decode_reg(struct xen_regs *regs, u8 b);
346 struct exec_domain *ed = current;
347 unsigned long *reg, eip = regs->eip;
348 u8 opcode;
350 if ( get_user(opcode, (u8 *)eip) )
351 goto page_fault;
352 eip += 1;
353 if ( (opcode & 0xff) != 0x0f )
354 goto fail;
356 if ( get_user(opcode, (u8 *)eip) )
357 goto page_fault;
358 eip += 1;
360 switch ( opcode )
361 {
362 case 0x06: /* CLTS */
363 (void)do_fpu_taskswitch();
364 break;
366 case 0x09: /* WBINVD */
367 if ( !IS_CAPABLE_PHYSDEV(ed->domain) )
368 {
369 DPRINTK("Non-physdev domain attempted WBINVD.\n");
370 goto fail;
371 }
372 wbinvd();
373 break;
375 case 0x20: /* MOV CR?,<reg> */
376 if ( get_user(opcode, (u8 *)eip) )
377 goto page_fault;
378 eip += 1;
379 if ( (opcode & 0xc0) != 0xc0 )
380 goto fail;
381 reg = decode_reg(regs, opcode & 7);
382 switch ( (opcode >> 3) & 7 )
383 {
384 case 0: /* Read CR0 */
385 *reg =
386 (read_cr0() & ~X86_CR0_TS) |
387 (test_bit(EDF_GUEST_STTS, &ed->ed_flags) ? X86_CR0_TS : 0);
388 break;
390 case 2: /* Read CR2 */
391 *reg = ed->mm.guest_cr2;
392 break;
394 case 3: /* Read CR3 */
395 *reg = pagetable_val(ed->mm.pagetable);
396 break;
398 default:
399 goto fail;
400 }
401 break;
403 case 0x22: /* MOV <reg>,CR? */
404 if ( get_user(opcode, (u8 *)eip) )
405 goto page_fault;
406 eip += 1;
407 if ( (opcode & 0xc0) != 0xc0 )
408 goto fail;
409 reg = decode_reg(regs, opcode & 7);
410 switch ( (opcode >> 3) & 7 )
411 {
412 case 0: /* Write CR0 */
413 if ( *reg & X86_CR0_TS ) /* XXX ignore all but TS bit */
414 (void)do_fpu_taskswitch;
415 break;
417 case 2: /* Write CR2 */
418 ed->mm.guest_cr2 = *reg;
419 break;
421 case 3: /* Write CR3 */
422 LOCK_BIGLOCK(ed->domain);
423 (void)new_guest_cr3(*reg);
424 UNLOCK_BIGLOCK(ed->domain);
425 break;
427 default:
428 goto fail;
429 }
430 break;
432 case 0x30: /* WRMSR */
433 if ( !IS_PRIV(ed->domain) )
434 {
435 DPRINTK("Non-priv domain attempted WRMSR.\n");
436 goto fail;
437 }
438 wrmsr(regs->ecx, regs->eax, regs->edx);
439 break;
441 case 0x32: /* RDMSR */
442 if ( !IS_PRIV(ed->domain) )
443 {
444 DPRINTK("Non-priv domain attempted RDMSR.\n");
445 goto fail;
446 }
447 rdmsr(regs->ecx, regs->eax, regs->edx);
448 break;
450 default:
451 goto fail;
452 }
454 regs->eip = eip;
455 return EXCRET_fault_fixed;
457 fail:
458 return 0;
460 page_fault:
461 propagate_page_fault(eip, 0);
462 return EXCRET_fault_fixed;
463 }
465 asmlinkage int do_general_protection(struct xen_regs *regs)
466 {
467 struct exec_domain *ed = current;
468 struct trap_bounce *tb = &ed->thread.trap_bounce;
469 trap_info_t *ti;
470 unsigned long fixup;
472 DEBUGGER_trap_entry(TRAP_gp_fault, regs);
474 if ( regs->error_code & 1 )
475 goto hardware_gp;
477 if ( !GUEST_FAULT(regs) )
478 goto gp_in_kernel;
480 /*
481 * Cunning trick to allow arbitrary "INT n" handling.
482 *
483 * We set DPL == 0 on all vectors in the IDT. This prevents any INT <n>
484 * instruction from trapping to the appropriate vector, when that might not
485 * be expected by Xen or the guest OS. For example, that entry might be for
486 * a fault handler (unlike traps, faults don't increment EIP), or might
487 * expect an error code on the stack (which a software trap never
488 * provides), or might be a hardware interrupt handler that doesn't like
489 * being called spuriously.
490 *
491 * Instead, a GPF occurs with the faulting IDT vector in the error code.
492 * Bit 1 is set to indicate that an IDT entry caused the fault. Bit 0 is
493 * clear to indicate that it's a software fault, not hardware.
494 *
495 * NOTE: Vectors 3 and 4 are dealt with from their own handler. This is
496 * okay because they can only be triggered by an explicit DPL-checked
497 * instruction. The DPL specified by the guest OS for these vectors is NOT
498 * CHECKED!!
499 */
500 if ( (regs->error_code & 3) == 2 )
501 {
502 /* This fault must be due to <INT n> instruction. */
503 ti = current->thread.traps + (regs->error_code>>3);
504 if ( TI_GET_DPL(ti) >= (VM86_MODE(regs) ? 3 : (regs->cs & 3)) )
505 {
506 tb->flags = TBF_EXCEPTION;
507 regs->eip += 2;
508 goto finish_propagation;
509 }
510 }
512 /* Emulate some simple privileged instructions when exec'ed in ring 1. */
513 if ( (regs->error_code == 0) &&
514 RING_1(regs) &&
515 emulate_privileged_op(regs) )
516 return 0;
518 #if defined(__i386__)
519 if ( VM_ASSIST(ed->domain, VMASST_TYPE_4gb_segments) &&
520 (regs->error_code == 0) &&
521 gpf_emulate_4gb(regs) )
522 return 0;
523 #endif
525 /* Pass on GPF as is. */
526 ti = current->thread.traps + 13;
527 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
528 tb->error_code = regs->error_code;
529 finish_propagation:
530 tb->cs = ti->cs;
531 tb->eip = ti->address;
532 if ( TI_GET_IF(ti) )
533 ed->vcpu_info->evtchn_upcall_mask = 1;
534 return 0;
536 gp_in_kernel:
538 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
539 {
540 DPRINTK("GPF (%04x): %p -> %p\n",
541 regs->error_code, regs->eip, fixup);
542 regs->eip = fixup;
543 return 0;
544 }
546 DEBUGGER_trap_fatal(TRAP_gp_fault, regs);
548 hardware_gp:
549 show_registers(regs);
550 panic("CPU%d GENERAL PROTECTION FAULT\n[error_code=%04x]\n",
551 smp_processor_id(), regs->error_code);
552 return 0;
553 }
555 asmlinkage void mem_parity_error(struct xen_regs *regs)
556 {
557 console_force_unlock();
558 printk("\n\nNMI - MEMORY ERROR\n");
559 fatal_trap(TRAP_nmi, regs);
560 }
562 asmlinkage void io_check_error(struct xen_regs *regs)
563 {
564 console_force_unlock();
566 printk("\n\nNMI - I/O ERROR\n");
567 fatal_trap(TRAP_nmi, regs);
568 }
570 static void unknown_nmi_error(unsigned char reason, struct xen_regs * regs)
571 {
572 printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
573 printk("Dazed and confused, but trying to continue\n");
574 printk("Do you have a strange power saving mode enabled?\n");
575 }
577 asmlinkage void do_nmi(struct xen_regs * regs, unsigned long reason)
578 {
579 ++nmi_count(smp_processor_id());
581 #if CONFIG_X86_LOCAL_APIC
582 if ( nmi_watchdog )
583 nmi_watchdog_tick(regs);
584 else
585 #endif
586 unknown_nmi_error((unsigned char)(reason&0xff), regs);
587 }
589 unsigned long nmi_softirq_reason;
590 static void nmi_softirq(void)
591 {
592 if ( dom0 == NULL )
593 return;
595 if ( test_and_clear_bit(0, &nmi_softirq_reason) )
596 send_guest_virq(dom0->exec_domain[0], VIRQ_PARITY_ERR);
598 if ( test_and_clear_bit(1, &nmi_softirq_reason) )
599 send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
600 }
602 asmlinkage int math_state_restore(struct xen_regs *regs)
603 {
604 /* Prevent recursion. */
605 clts();
607 if ( !test_bit(EDF_USEDFPU, &current->ed_flags) )
608 {
609 if ( test_bit(EDF_DONEFPUINIT, &current->ed_flags) )
610 restore_fpu(current);
611 else
612 init_fpu();
613 set_bit(EDF_USEDFPU, &current->ed_flags); /* so we fnsave on switch_to() */
614 }
616 if ( test_and_clear_bit(EDF_GUEST_STTS, &current->ed_flags) )
617 {
618 struct trap_bounce *tb = &current->thread.trap_bounce;
619 tb->flags = TBF_EXCEPTION;
620 tb->cs = current->thread.traps[7].cs;
621 tb->eip = current->thread.traps[7].address;
622 }
624 return EXCRET_fault_fixed;
625 }
627 asmlinkage int do_debug(struct xen_regs *regs)
628 {
629 unsigned long condition;
630 struct exec_domain *d = current;
631 struct trap_bounce *tb = &d->thread.trap_bounce;
633 DEBUGGER_trap_entry(TRAP_debug, regs);
635 __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
637 /* Mask out spurious debug traps due to lazy DR7 setting */
638 if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
639 (d->thread.debugreg[7] == 0) )
640 {
641 __asm__("mov %0,%%db7" : : "r" (0UL));
642 goto out;
643 }
645 if ( !GUEST_FAULT(regs) )
646 {
647 /* Clear TF just for absolute sanity. */
648 regs->eflags &= ~EF_TF;
649 /*
650 * We ignore watchpoints when they trigger within Xen. This may happen
651 * when a buffer is passed to us which previously had a watchpoint set
652 * on it. No need to bump EIP; the only faulting trap is an instruction
653 * breakpoint, which can't happen to us.
654 */
655 goto out;
656 }
658 /* Save debug status register where guest OS can peek at it */
659 d->thread.debugreg[6] = condition;
661 tb->flags = TBF_EXCEPTION;
662 tb->cs = d->thread.traps[1].cs;
663 tb->eip = d->thread.traps[1].address;
665 out:
666 return EXCRET_not_a_fault;
667 }
669 asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs)
670 {
671 return EXCRET_not_a_fault;
672 }
674 void set_intr_gate(unsigned int n, void *addr)
675 {
676 _set_gate(idt_table+n,14,0,addr);
677 }
679 void set_system_gate(unsigned int n, void *addr)
680 {
681 _set_gate(idt_table+n,14,3,addr);
682 }
684 void set_task_gate(unsigned int n, unsigned int sel)
685 {
686 idt_table[n].a = sel << 16;
687 idt_table[n].b = 0x8500;
688 }
690 void set_tss_desc(unsigned int n, void *addr)
691 {
692 _set_tssldt_desc(
693 gdt_table + __TSS(n),
694 (unsigned long)addr,
695 offsetof(struct tss_struct, __cacheline_filler) - 1,
696 9);
697 }
699 void __init trap_init(void)
700 {
701 extern void doublefault_init(void);
702 doublefault_init();
704 /*
705 * Note that interrupt gates are always used, rather than trap gates. We
706 * must have interrupts disabled until DS/ES/FS/GS are saved because the
707 * first activation must have the "bad" value(s) for these registers and
708 * we may lose them if another activation is installed before they are
709 * saved. The page-fault handler also needs interrupts disabled until %cr2
710 * has been read and saved on the stack.
711 */
712 set_intr_gate(TRAP_divide_error,&divide_error);
713 set_intr_gate(TRAP_debug,&debug);
714 set_intr_gate(TRAP_nmi,&nmi);
715 set_system_gate(TRAP_int3,&int3); /* usable from all privileges */
716 set_system_gate(TRAP_overflow,&overflow); /* usable from all privileges */
717 set_intr_gate(TRAP_bounds,&bounds);
718 set_intr_gate(TRAP_invalid_op,&invalid_op);
719 set_intr_gate(TRAP_no_device,&device_not_available);
720 set_intr_gate(TRAP_copro_seg,&coprocessor_segment_overrun);
721 set_intr_gate(TRAP_invalid_tss,&invalid_TSS);
722 set_intr_gate(TRAP_no_segment,&segment_not_present);
723 set_intr_gate(TRAP_stack_error,&stack_segment);
724 set_intr_gate(TRAP_gp_fault,&general_protection);
725 set_intr_gate(TRAP_page_fault,&page_fault);
726 set_intr_gate(TRAP_spurious_int,&spurious_interrupt_bug);
727 set_intr_gate(TRAP_copro_error,&coprocessor_error);
728 set_intr_gate(TRAP_alignment_check,&alignment_check);
729 set_intr_gate(TRAP_machine_check,&machine_check);
730 set_intr_gate(TRAP_simd_error,&simd_coprocessor_error);
731 set_intr_gate(TRAP_deferred_nmi,&nmi);
733 #if defined(__i386__)
734 _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
735 #endif
737 /* CPU0 uses the master IDT. */
738 idt_tables[0] = idt_table;
740 /*
741 * Should be a barrier for any external CPU state.
742 */
743 {
744 extern void cpu_init(void);
745 cpu_init();
746 }
748 open_softirq(NMI_SOFTIRQ, nmi_softirq);
749 }
752 long do_set_trap_table(trap_info_t *traps)
753 {
754 trap_info_t cur;
755 trap_info_t *dst = current->thread.traps;
757 LOCK_BIGLOCK(current->domain);
759 for ( ; ; )
760 {
761 if ( hypercall_preempt_check() )
762 {
763 UNLOCK_BIGLOCK(current->domain);
764 return hypercall_create_continuation(
765 __HYPERVISOR_set_trap_table, 1, traps);
766 }
768 if ( copy_from_user(&cur, traps, sizeof(cur)) ) return -EFAULT;
770 if ( cur.address == 0 ) break;
772 if ( !VALID_CODESEL(cur.cs) ) return -EPERM;
774 memcpy(dst+cur.vector, &cur, sizeof(cur));
775 traps++;
776 }
778 UNLOCK_BIGLOCK(current->domain);
780 return 0;
781 }
784 long do_set_callbacks(unsigned long event_selector,
785 unsigned long event_address,
786 unsigned long failsafe_selector,
787 unsigned long failsafe_address)
788 {
789 struct exec_domain *d = current;
791 if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
792 return -EPERM;
794 d->thread.event_selector = event_selector;
795 d->thread.event_address = event_address;
796 d->thread.failsafe_selector = failsafe_selector;
797 d->thread.failsafe_address = failsafe_address;
799 return 0;
800 }
803 long do_fpu_taskswitch(void)
804 {
805 set_bit(EDF_GUEST_STTS, &current->ed_flags);
806 stts();
807 return 0;
808 }
811 long set_debugreg(struct exec_domain *p, int reg, unsigned long value)
812 {
813 int i;
815 switch ( reg )
816 {
817 case 0:
818 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
819 if ( p == current )
820 __asm__ ( "mov %0, %%db0" : : "r" (value) );
821 break;
822 case 1:
823 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
824 if ( p == current )
825 __asm__ ( "mov %0, %%db1" : : "r" (value) );
826 break;
827 case 2:
828 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
829 if ( p == current )
830 __asm__ ( "mov %0, %%db2" : : "r" (value) );
831 break;
832 case 3:
833 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
834 if ( p == current )
835 __asm__ ( "mov %0, %%db3" : : "r" (value) );
836 break;
837 case 6:
838 /*
839 * DR6: Bits 4-11,16-31 reserved (set to 1).
840 * Bit 12 reserved (set to 0).
841 */
842 value &= 0xffffefff; /* reserved bits => 0 */
843 value |= 0xffff0ff0; /* reserved bits => 1 */
844 if ( p == current )
845 __asm__ ( "mov %0, %%db6" : : "r" (value) );
846 break;
847 case 7:
848 /*
849 * DR7: Bit 10 reserved (set to 1).
850 * Bits 11-12,14-15 reserved (set to 0).
851 * Privileged bits:
852 * GD (bit 13): must be 0.
853 * R/Wn (bits 16-17,20-21,24-25,28-29): mustn't be 10.
854 * LENn (bits 18-19,22-23,26-27,30-31): mustn't be 10.
855 */
856 /* DR7 == 0 => debugging disabled for this domain. */
857 if ( value != 0 )
858 {
859 value &= 0xffff27ff; /* reserved bits => 0 */
860 value |= 0x00000400; /* reserved bits => 1 */
861 if ( (value & (1<<13)) != 0 ) return -EPERM;
862 for ( i = 0; i < 16; i += 2 )
863 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
864 }
865 if ( p == current )
866 __asm__ ( "mov %0, %%db7" : : "r" (value) );
867 break;
868 default:
869 return -EINVAL;
870 }
872 p->thread.debugreg[reg] = value;
873 return 0;
874 }
876 long do_set_debugreg(int reg, unsigned long value)
877 {
878 return set_debugreg(current, reg, value);
879 }
881 unsigned long do_get_debugreg(int reg)
882 {
883 if ( (reg < 0) || (reg > 7) ) return -EINVAL;
884 return current->thread.debugreg[reg];
885 }