ia64/xen-unstable

view xen-2.4.16/arch/i386/traps.c @ 174:e98e082295e9

bitkeeper revision 1.40 (3e4d3d1cT1UtXot3hq1e1w8H9jqLFw)

traps.c:
Fix embarrassing do_set_trap_table() behaviour.
author kaf24@labyrinth.cl.cam.ac.uk
date Fri Feb 14 19:01:48 2003 +0000 (2003-02-14)
parents 9d397996348f
children fa8759fd4b27
line source
1 /*
2 * linux/arch/i386/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
10 #include <xeno/config.h>
11 #include <xeno/init.h>
12 #include <xeno/interrupt.h>
13 #include <xeno/sched.h>
14 #include <xeno/lib.h>
15 #include <xeno/errno.h>
16 #include <asm/ptrace.h>
17 #include <xeno/delay.h>
18 #include <xeno/spinlock.h>
19 #include <xeno/irq.h>
21 #include <asm/system.h>
22 #include <asm/io.h>
23 #include <asm/atomic.h>
24 #include <asm/desc.h>
25 #include <asm/debugreg.h>
26 #include <asm/smp.h>
27 #include <asm/pgalloc.h>
28 #include <asm/uaccess.h>
29 #include <asm/i387.h>
31 #define GTBF_TRAP 1
32 #define GTBF_TRAP_NOCODE 2
33 #define GTBF_TRAP_CR2 4
34 struct guest_trap_bounce {
35 unsigned long error_code; /* 0 */
36 unsigned long cr2; /* 4 */
37 unsigned short flags; /* 8 */
38 unsigned short cs; /* 10 */
39 unsigned long eip; /* 12 */
40 } guest_trap_bounce[NR_CPUS] = { { 0 } };
42 asmlinkage int hypervisor_call(void);
43 asmlinkage void lcall7(void);
44 asmlinkage void lcall27(void);
46 /* Master table, and the one used by CPU0. */
47 struct desc_struct idt_table[256] = { {0, 0}, };
48 /* All other CPUs have their own copy. */
49 struct desc_struct *idt_tables[NR_CPUS] = { 0 };
51 asmlinkage void divide_error(void);
52 asmlinkage void debug(void);
53 asmlinkage void nmi(void);
54 asmlinkage void int3(void);
55 asmlinkage void overflow(void);
56 asmlinkage void bounds(void);
57 asmlinkage void invalid_op(void);
58 asmlinkage void device_not_available(void);
59 asmlinkage void double_fault(void);
60 asmlinkage void coprocessor_segment_overrun(void);
61 asmlinkage void invalid_TSS(void);
62 asmlinkage void segment_not_present(void);
63 asmlinkage void stack_segment(void);
64 asmlinkage void general_protection(void);
65 asmlinkage void page_fault(void);
66 asmlinkage void coprocessor_error(void);
67 asmlinkage void simd_coprocessor_error(void);
68 asmlinkage void alignment_check(void);
69 asmlinkage void spurious_interrupt_bug(void);
70 asmlinkage void machine_check(void);
72 int kstack_depth_to_print = 24;
74 static inline int kernel_text_address(unsigned long addr)
75 {
76 return ( 1 );
77 }
79 void show_trace(unsigned long * stack)
80 {
81 int i;
82 unsigned long addr;
84 if (!stack)
85 stack = (unsigned long*)&stack;
87 printk("Call Trace: ");
88 i = 1;
89 while (((long) stack & (THREAD_SIZE-1)) != 0) {
90 addr = *stack++;
91 if (kernel_text_address(addr)) {
92 if (i && ((i % 6) == 0))
93 printk("\n ");
94 printk("[<%08lx>] ", addr);
95 i++;
96 }
97 }
98 printk("\n");
99 }
101 void show_trace_task(struct task_struct *tsk)
102 {
103 unsigned long esp = tsk->thread.esp;
105 /* User space on another CPU? */
106 if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1))
107 return;
108 show_trace((unsigned long *)esp);
109 }
111 void show_stack(unsigned long * esp)
112 {
113 unsigned long *stack;
114 int i;
116 // debugging aid: "show_stack(NULL);" prints the
117 // back trace for this cpu.
119 if(esp==NULL)
120 esp=(unsigned long*)&esp;
122 stack = esp;
123 for(i=0; i < kstack_depth_to_print; i++) {
124 if (((long) stack & (THREAD_SIZE-1)) == 0)
125 break;
126 if (i && ((i % 8) == 0))
127 printk("\n ");
128 printk("%08lx ", *stack++);
129 }
130 printk("\n");
131 show_trace(esp);
132 }
134 void show_registers(struct pt_regs *regs)
135 {
136 unsigned long esp;
137 unsigned short ss;
139 esp = (unsigned long) (&regs->esp);
140 ss = __HYPERVISOR_DS;
141 if ( regs->xcs & 3 )
142 {
143 esp = regs->esp;
144 ss = regs->xss & 0xffff;
145 }
147 printk("CPU: %d\nEIP: %04x:[<%08lx>] \nEFLAGS: %08lx\n",
148 smp_processor_id(), 0xffff & regs->xcs, regs->eip, regs->eflags);
149 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
150 regs->eax, regs->ebx, regs->ecx, regs->edx);
151 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
152 regs->esi, regs->edi, regs->ebp, esp);
153 printk("ds: %04x es: %04x ss: %04x\n",
154 regs->xds & 0xffff, regs->xes & 0xffff, ss);
155 }
158 spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
160 void die(const char * str, struct pt_regs * regs, long err)
161 {
162 spin_lock_irq(&die_lock);
163 printk("%s: %04lx,%04lx\n", str, err >> 16, err & 0xffff);
164 show_registers(regs);
165 spin_unlock_irq(&die_lock);
166 panic("HYPERVISOR DEATH!!\n");
167 }
169 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
170 {
171 if (!(3 & regs->xcs)) die(str, regs, err);
172 }
174 static void inline do_trap(int trapnr, char *str,
175 struct pt_regs * regs,
176 long error_code, int use_error_code)
177 {
178 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
179 trap_info_t *ti;
180 unsigned long addr, fixup;
182 if (!(regs->xcs & 3))
183 goto fault_in_hypervisor;
185 ti = current->thread.traps + trapnr;
186 if ( trapnr == 14 )
187 {
188 /* page fault pushes %cr2 */
189 gtb->flags = GTBF_TRAP_CR2;
190 __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (gtb->cr2) : );
191 }
192 else
193 {
194 gtb->flags = use_error_code ? GTBF_TRAP : GTBF_TRAP_NOCODE;
195 }
196 gtb->error_code = error_code;
197 gtb->cs = ti->cs;
198 gtb->eip = ti->address;
199 return;
201 fault_in_hypervisor:
203 if ( (fixup = search_exception_table(regs->eip)) != 0 )
204 {
205 regs->eip = fixup;
206 return;
207 }
209 __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
211 if ( (trapnr == 14) && (addr >= PAGE_OFFSET) )
212 {
213 unsigned long page;
214 unsigned long *pde;
215 pde = (unsigned long *)idle_pg_table[smp_processor_id()];
216 page = pde[addr >> L2_PAGETABLE_SHIFT];
217 printk("*pde = %08lx\n", page);
218 if ( page & _PAGE_PRESENT )
219 {
220 page &= PAGE_MASK;
221 page = ((unsigned long *) __va(page))[(addr&0x3ff000)>>PAGE_SHIFT];
222 printk(" *pte = %08lx\n", page);
223 }
224 }
226 show_registers(regs);
227 panic("CPU%d FATAL TRAP: vector = %d (%s)\n"
228 "[error_code=%08x]\n"
229 "Faulting linear address might be %08lx\n",
230 smp_processor_id(), trapnr, str,
231 error_code, addr);
232 }
234 #define DO_ERROR_NOCODE(trapnr, str, name) \
235 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
236 { \
237 do_trap(trapnr, str, regs, error_code, 0); \
238 }
240 #define DO_ERROR(trapnr, str, name) \
241 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
242 { \
243 do_trap(trapnr, str, regs, error_code, 1); \
244 }
246 DO_ERROR_NOCODE( 0, "divide error", divide_error)
247 DO_ERROR_NOCODE( 3, "int3", int3)
248 DO_ERROR_NOCODE( 4, "overflow", overflow)
249 DO_ERROR_NOCODE( 5, "bounds", bounds)
250 DO_ERROR_NOCODE( 6, "invalid operand", invalid_op)
251 DO_ERROR_NOCODE( 7, "device not available", device_not_available)
252 DO_ERROR( 8, "double fault", double_fault)
253 DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
254 DO_ERROR(10, "invalid TSS", invalid_TSS)
255 DO_ERROR(11, "segment not present", segment_not_present)
256 DO_ERROR(12, "stack segment", stack_segment)
257 DO_ERROR(14, "page fault", page_fault)
258 /* Vector 15 reserved by Intel */
259 DO_ERROR_NOCODE(16, "fpu error", coprocessor_error)
260 DO_ERROR(17, "alignment check", alignment_check)
261 DO_ERROR_NOCODE(18, "machine check", machine_check)
262 DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
264 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
265 {
266 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
267 trap_info_t *ti;
268 unsigned long fixup;
270 /* Bad shit if error in ring 0, or result of an interrupt. */
271 if (!(regs->xcs & 3) || (error_code & 1))
272 goto gp_in_kernel;
274 /*
275 * Cunning trick to allow arbitrary "INT n" handling.
276 *
277 * We set DPL == 0 on all vectors in the IDT. This prevents any INT <n>
278 * instruction from trapping to the appropriate vector, when that might not
279 * be expected by Xen or the guest OS. For example, that entry might be for
280 * a fault handler (unlike traps, faults don't increment EIP), or might
281 * expect an error code on the stack (which a software trap never
282 * provides), or might be a hardware interrupt handler that doesn't like
283 * being called spuriously.
284 *
285 * Instead, a GPF occurs with the faulting IDT vector in the error code.
286 * Bit 1 is set to indicate that an IDT entry caused the fault. Bit 0 is
287 * clear to indicate that it's a software fault, not hardware.
288 *
289 * NOTE: Vectors 3 and 4 are dealt with from their own handler. This is
290 * okay because they can only be triggered by an explicit DPL-checked
291 * instruction. The DPL specified by the guest OS for these vectors is NOT
292 * CHECKED!!
293 */
294 if ( (error_code & 3) == 2 )
295 {
296 /* This fault must be due to <INT n> instruction. */
297 ti = current->thread.traps + (error_code>>3);
298 if ( ti->dpl >= (regs->xcs & 3) )
299 {
300 /* XXX Kill next conditional soon :-) XXX */
301 if ( (error_code>>3)==0x80 )
302 {
303 printk("DIDN'T USE FAST-TRAP HANDLER FOR 0x80!!! :-(\n");
304 BUG();
305 }
306 gtb->flags = GTBF_TRAP_NOCODE;
307 gtb->cs = ti->cs;
308 gtb->eip = ti->address;
309 regs->eip += 2;
310 return;
311 }
312 }
314 /* Pass on GPF as is. */
315 ti = current->thread.traps + 13;
316 gtb->flags = GTBF_TRAP;
317 gtb->error_code = error_code;
318 gtb->cs = ti->cs;
319 gtb->eip = ti->address;
320 return;
322 gp_in_kernel:
323 if ( (fixup = search_exception_table(regs->eip)) != 0 )
324 {
325 regs->eip = fixup;
326 return;
327 }
329 die("general protection fault", regs, error_code);
330 }
332 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
333 {
334 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
335 printk("You probably have a hardware problem with your RAM chips\n");
337 /* Clear and disable the memory parity error line. */
338 reason = (reason & 0xf) | 4;
339 outb(reason, 0x61);
340 }
342 static void io_check_error(unsigned char reason, struct pt_regs * regs)
343 {
344 unsigned long i;
346 printk("NMI: IOCK error (debug interrupt?)\n");
347 show_registers(regs);
349 /* Re-enable the IOCK line, wait for a few seconds */
350 reason = (reason & 0xf) | 8;
351 outb(reason, 0x61);
352 i = 2000;
353 while (--i) udelay(1000);
354 reason &= ~8;
355 outb(reason, 0x61);
356 }
358 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
359 {
360 printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
361 printk("Dazed and confused, but trying to continue\n");
362 printk("Do you have a strange power saving mode enabled?\n");
363 }
365 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
366 {
367 unsigned char reason = inb(0x61);
369 if (!(reason & 0xc0)) {
370 unknown_nmi_error(reason, regs);
371 return;
372 }
373 if (reason & 0x80)
374 mem_parity_error(reason, regs);
375 if (reason & 0x40)
376 io_check_error(reason, regs);
377 /*
378 * Reassert NMI in case it became active meanwhile
379 * as it's edge-triggered.
380 */
381 outb(0x8f, 0x70);
382 inb(0x71); /* dummy */
383 outb(0x0f, 0x70);
384 inb(0x71); /* dummy */
385 }
387 asmlinkage void math_state_restore(struct pt_regs *regs, long error_code)
388 {
389 /* Prevent recursion. */
390 clts();
392 if ( !(current->flags & PF_USEDFPU) )
393 {
394 if ( current->flags & PF_DONEFPUINIT )
395 restore_fpu(current);
396 else
397 init_fpu();
398 current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */
399 }
401 if ( current->flags & PF_GUEST_STTS )
402 {
403 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
404 gtb->flags = GTBF_TRAP_NOCODE;
405 gtb->cs = current->thread.traps[7].cs;
406 gtb->eip = current->thread.traps[7].address;
407 current->flags &= ~PF_GUEST_STTS;
408 }
409 }
412 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
413 {
414 unsigned int condition;
415 struct task_struct *tsk = current;
416 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
418 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
420 /* Mask out spurious debug traps due to lazy DR7 setting */
421 if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
422 (tsk->thread.debugreg[7] == 0) )
423 {
424 __asm__("movl %0,%%db7" : : "r" (0));
425 return;
426 }
428 if ( (regs->xcs & 3) == 0 )
429 {
430 /* Clear TF just for absolute sanity. */
431 regs->eflags &= ~EF_TF;
432 /*
433 * Basically, we ignore watchpoints when they trigger in
434 * the hypervisor. This may happen when a buffer is passed
435 * to us which previously had a watchpoint set on it.
436 * No need to bump EIP; the only faulting trap is an
437 * instruction breakpoint, which can't happen to us.
438 */
439 return;
440 }
442 /* Save debug status register where guest OS can peek at it */
443 tsk->thread.debugreg[6] = condition;
445 gtb->flags = GTBF_TRAP_NOCODE;
446 gtb->cs = tsk->thread.traps[1].cs;
447 gtb->eip = tsk->thread.traps[1].address;
448 }
451 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
452 long error_code)
453 { /* nothing */ }
456 #define _set_gate(gate_addr,type,dpl,addr) \
457 do { \
458 int __d0, __d1; \
459 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
460 "movw %4,%%dx\n\t" \
461 "movl %%eax,%0\n\t" \
462 "movl %%edx,%1" \
463 :"=m" (*((long *) (gate_addr))), \
464 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
465 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
466 "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \
467 } while (0)
470 /*
471 * This needs to use 'idt_table' rather than 'idt', and
472 * thus use the _nonmapped_ version of the IDT, as the
473 * Pentium F0 0F bugfix can have resulted in the mapped
474 * IDT being write-protected.
475 */
476 void set_intr_gate(unsigned int n, void *addr)
477 {
478 _set_gate(idt_table+n,14,0,addr);
479 }
481 static void __init set_trap_gate(unsigned int n, void *addr)
482 {
483 _set_gate(idt_table+n,15,0,addr);
484 }
486 static void __init set_system_gate(unsigned int n, void *addr)
487 {
488 _set_gate(idt_table+n,15,3,addr);
489 }
491 static void __init set_call_gate(void *a, void *addr)
492 {
493 _set_gate(a,12,3,addr);
494 }
496 #define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
497 *((gate_addr)+1) = ((base) & 0xff000000) | \
498 (((base) & 0x00ff0000)>>16) | \
499 ((limit) & 0xf0000) | \
500 ((dpl)<<13) | \
501 (0x00408000) | \
502 ((type)<<8); \
503 *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
504 ((limit) & 0x0ffff); }
506 #define _set_tssldt_desc(n,addr,limit,type) \
507 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
508 "movw %%ax,2(%2)\n\t" \
509 "rorl $16,%%eax\n\t" \
510 "movb %%al,4(%2)\n\t" \
511 "movb %4,5(%2)\n\t" \
512 "movb $0,6(%2)\n\t" \
513 "movb %%ah,7(%2)\n\t" \
514 "rorl $16,%%eax" \
515 : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
517 void set_tss_desc(unsigned int n, void *addr)
518 {
519 _set_tssldt_desc(gdt_table+__TSS(n), (int)addr, 235, 0x89);
520 }
522 void __init trap_init(void)
523 {
524 set_trap_gate(0,&divide_error);
525 set_trap_gate(1,&debug);
526 set_intr_gate(2,&nmi);
527 set_system_gate(3,&int3); /* usable from all privilege levels */
528 set_system_gate(4,&overflow); /* usable from all privilege levels */
529 set_trap_gate(5,&bounds);
530 set_trap_gate(6,&invalid_op);
531 set_trap_gate(7,&device_not_available);
532 set_trap_gate(8,&double_fault);
533 set_trap_gate(9,&coprocessor_segment_overrun);
534 set_trap_gate(10,&invalid_TSS);
535 set_trap_gate(11,&segment_not_present);
536 set_trap_gate(12,&stack_segment);
537 set_trap_gate(13,&general_protection);
538 set_intr_gate(14,&page_fault);
539 set_trap_gate(15,&spurious_interrupt_bug);
540 set_trap_gate(16,&coprocessor_error);
541 set_trap_gate(17,&alignment_check);
542 set_trap_gate(18,&machine_check);
543 set_trap_gate(19,&simd_coprocessor_error);
545 /* Only ring 1 can access monitor services. */
546 _set_gate(idt_table+HYPERVISOR_CALL_VECTOR,15,1,&hypervisor_call);
548 /* CPU0 uses the master IDT. */
549 idt_tables[0] = idt_table;
551 /*
552 * Should be a barrier for any external CPU state.
553 */
554 {
555 extern void cpu_init(void);
556 cpu_init();
557 }
558 }
561 long do_set_trap_table(trap_info_t *traps)
562 {
563 trap_info_t cur;
564 trap_info_t *dst = current->thread.traps;
566 /*
567 * I'm removing the next line, since it seems more intuitive to use this
568 * as an interface to incrementally update a domain's trap table. Clearing
569 * out old entries automatically is rather antisocial!
570 */
571 /*memset(dst, 0, sizeof(*dst) * 256);*/
573 for ( ; ; )
574 {
575 if ( copy_from_user(&cur, traps, sizeof(cur)) ) return -EFAULT;
576 if ( (cur.cs & 3) == 0 ) return -EPERM;
577 if ( cur.address == 0 ) break;
578 memcpy(dst+cur.vector, &cur, sizeof(cur));
579 traps++;
580 }
582 return(0);
583 }
586 long do_set_fast_trap(int idx)
587 {
588 trap_info_t *ti;
590 /* Index 0 is special: it disables fast traps. */
591 if ( idx == 0 )
592 {
593 CLEAR_FAST_TRAP(&current->thread);
594 SET_DEFAULT_FAST_TRAP(&current->thread);
595 return 0;
596 }
598 /*
599 * We only fast-trap vectors 0x20-0x2f, and vector 0x80.
600 * The former range is used by Windows and MS-DOS.
601 * Vector 0x80 is used by Linux and the BSD variants.
602 */
603 if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) return -1;
605 ti = current->thread.traps + idx;
607 CLEAR_FAST_TRAP(&current->thread);
609 current->thread.fast_trap_idx = idx;
610 current->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
611 current->thread.fast_trap_desc.b =
612 (ti->address & 0xffff0000) | 0x8f00 | (ti->dpl&3)<<13;
614 SET_FAST_TRAP(&current->thread);
616 return 0;
617 }
620 long do_fpu_taskswitch(void)
621 {
622 current->flags |= PF_GUEST_STTS;
623 stts();
624 return 0;
625 }
628 long do_set_debugreg(int reg, unsigned long value)
629 {
630 int i;
632 switch ( reg )
633 {
634 case 0:
635 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
636 __asm__ ( "movl %0, %%db0" : : "r" (value) );
637 break;
638 case 1:
639 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
640 __asm__ ( "movl %0, %%db1" : : "r" (value) );
641 break;
642 case 2:
643 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
644 __asm__ ( "movl %0, %%db2" : : "r" (value) );
645 break;
646 case 3:
647 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
648 __asm__ ( "movl %0, %%db3" : : "r" (value) );
649 break;
650 case 6:
651 /*
652 * DR6: Bits 4-11,16-31 reserved (set to 1).
653 * Bit 12 reserved (set to 0).
654 */
655 value &= 0xffffefff; /* reserved bits => 0 */
656 value |= 0xffff0ff0; /* reserved bits => 1 */
657 __asm__ ( "movl %0, %%db6" : : "r" (value) );
658 break;
659 case 7:
660 /*
661 * DR7: Bit 10 reserved (set to 1).
662 * Bits 11-12,14-15 reserved (set to 0).
663 * Privileged bits:
664 * GD (bit 13): must be 0.
665 * R/Wn (bits 16-17,20-21,24-25,28-29): mustn't be 10.
666 * LENn (bits 18-19,22-23,26-27,30-31): mustn't be 10.
667 */
668 /* DR7 == 0 => debugging disabled for this domain. */
669 if ( value != 0 )
670 {
671 value &= 0xffff27ff; /* reserved bits => 0 */
672 value |= 0x00000400; /* reserved bits => 1 */
673 if ( (value & (1<<13)) != 0 ) return -EPERM;
674 for ( i = 0; i < 16; i += 2 )
675 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
676 }
677 __asm__ ( "movl %0, %%db7" : : "r" (value) );
678 break;
679 default:
680 return -EINVAL;
681 }
683 current->thread.debugreg[reg] = value;
684 return 0;
685 }
687 unsigned long do_get_debugreg(int reg)
688 {
689 if ( (reg < 0) || (reg > 7) ) return -EINVAL;
690 return current->thread.debugreg[reg];
691 }