ia64/xen-unstable

view xen/arch/x86/x86_32/traps.c @ 3781:1d13ed9582e0

bitkeeper revision 1.1172.1.1 (420ba344y-TLJ8cFOVA_8bN7wd3dMw)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 10 18:09:08 2005 +0000 (2005-02-10)
parents 09ef8bf5a916 f368e743fc2e
children 0cd8803a1553
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 #include <xen/config.h>
4 #include <xen/init.h>
5 #include <xen/sched.h>
6 #include <xen/lib.h>
7 #include <xen/console.h>
8 #include <xen/mm.h>
9 #include <xen/irq.h>
10 #include <asm/flushtlb.h>
12 /* All CPUs have their own IDT to allow set_fast_trap(). */
13 idt_entry_t *idt_tables[NR_CPUS] = { 0 };
15 static int kstack_depth_to_print = 8*20;
17 static inline int kernel_text_address(unsigned long addr)
18 {
19 if (addr >= (unsigned long) &_stext &&
20 addr <= (unsigned long) &_etext)
21 return 1;
22 return 0;
24 }
26 void show_guest_stack(void)
27 {
28 int i;
29 execution_context_t *ec = get_execution_context();
30 unsigned long *stack = (unsigned long *)ec->esp;
31 printk("Guest EIP is %lx\n ",ec->eip);
33 for ( i = 0; i < kstack_depth_to_print; i++ )
34 {
35 if ( ((long)stack & (STACK_SIZE-1)) == 0 )
36 break;
37 if ( i && ((i % 8) == 0) )
38 printk("\n ");
39 printk("%p ", *stack++);
40 }
41 printk("\n");
43 }
45 void show_trace(unsigned long *esp)
46 {
47 unsigned long *stack, addr;
48 int i;
50 printk("Call Trace from ESP=%p:\n ", esp);
51 stack = esp;
52 i = 0;
53 while (((long) stack & (STACK_SIZE-1)) != 0) {
54 addr = *stack++;
55 if (kernel_text_address(addr)) {
56 if (i && ((i % 6) == 0))
57 printk("\n ");
58 printk("[<%p>] ", addr);
59 i++;
60 }
61 }
62 printk("\n");
63 }
65 void show_stack(unsigned long *esp)
66 {
67 unsigned long *stack;
68 int i;
70 printk("Stack trace from ESP=%p:\n ", esp);
72 stack = esp;
73 for ( i = 0; i < kstack_depth_to_print; i++ )
74 {
75 if ( ((long)stack & (STACK_SIZE-1)) == 0 )
76 break;
77 if ( i && ((i % 8) == 0) )
78 printk("\n ");
79 if ( kernel_text_address(*stack) )
80 printk("[%p] ", *stack++);
81 else
82 printk("%p ", *stack++);
83 }
84 printk("\n");
86 show_trace( esp );
87 }
89 void show_registers(struct xen_regs *regs)
90 {
91 unsigned long esp;
92 unsigned short ss, ds, es, fs, gs;
94 if ( GUEST_MODE(regs) )
95 {
96 esp = regs->esp;
97 ss = regs->ss & 0xffff;
98 ds = regs->ds & 0xffff;
99 es = regs->es & 0xffff;
100 fs = regs->fs & 0xffff;
101 gs = regs->gs & 0xffff;
102 }
103 else
104 {
105 esp = (unsigned long)(&regs->esp);
106 ss = __HYPERVISOR_DS;
107 ds = __HYPERVISOR_DS;
108 es = __HYPERVISOR_DS;
109 fs = __HYPERVISOR_DS;
110 gs = __HYPERVISOR_DS;
111 }
113 printk("CPU: %d\nEIP: %04lx:[<%p>] \nEFLAGS: %p\n",
114 smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
115 printk("eax: %p ebx: %p ecx: %p edx: %p\n",
116 regs->eax, regs->ebx, regs->ecx, regs->edx);
117 printk("esi: %p edi: %p ebp: %p esp: %p\n",
118 regs->esi, regs->edi, regs->ebp, esp);
119 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
120 ds, es, fs, gs, ss);
121 printk("cr3: %08lx\n", read_cr3());
123 show_stack((unsigned long *)&regs->esp);
124 }
126 void show_page_walk(unsigned long addr)
127 {
128 unsigned long page;
130 if ( addr < PAGE_OFFSET )
131 return;
133 printk("Pagetable walk from %p:\n", addr);
135 page = l2_pgentry_val(idle_pg_table[l2_table_offset(addr)]);
136 printk(" L2 = %p %s\n", page, (page & _PAGE_PSE) ? "(4MB)" : "");
137 if ( !(page & _PAGE_PRESENT) || (page & _PAGE_PSE) )
138 return;
140 page &= PAGE_MASK;
141 page = ((unsigned long *) __va(page))[l1_table_offset(addr)];
142 printk(" L1 = %p\n", page);
143 }
145 #define DOUBLEFAULT_STACK_SIZE 1024
146 static struct tss_struct doublefault_tss;
147 static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE];
149 asmlinkage void do_double_fault(void)
150 {
151 struct tss_struct *tss = &doublefault_tss;
152 unsigned int cpu = ((tss->back_link>>3)-__FIRST_TSS_ENTRY)>>1;
154 /* Disable the NMI watchdog. It's useless now. */
155 watchdog_on = 0;
157 console_force_unlock();
159 /* Find information saved during fault and dump it to the console. */
160 tss = &init_tss[cpu];
161 printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n",
162 cpu, tss->cs, tss->eip, tss->eflags);
163 printk("CR3: %08x\n", tss->__cr3);
164 printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n",
165 tss->eax, tss->ebx, tss->ecx, tss->edx);
166 printk("esi: %08x edi: %08x ebp: %08x esp: %08x\n",
167 tss->esi, tss->edi, tss->ebp, tss->esp);
168 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
169 tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
170 printk("************************************\n");
171 printk("CPU%d DOUBLE FAULT -- system shutdown\n", cpu);
172 printk("System needs manual reset.\n");
173 printk("************************************\n");
175 /* Lock up the console to prevent spurious output from other CPUs. */
176 console_force_lock();
178 /* Wait for manual reset. */
179 for ( ; ; )
180 __asm__ __volatile__ ( "hlt" );
181 }
183 BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
184 asmlinkage void smp_deferred_nmi(struct xen_regs regs)
185 {
186 asmlinkage void do_nmi(struct xen_regs *, unsigned long);
187 ack_APIC_irq();
188 do_nmi(&regs, 0);
189 }
191 void __init percpu_traps_init(void)
192 {
193 asmlinkage int hypercall(void);
195 if ( smp_processor_id() != 0 )
196 return;
198 /* CPU0 uses the master IDT. */
199 idt_tables[0] = idt_table;
201 /* The hypercall entry vector is only accessible from ring 1. */
202 _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
204 set_intr_gate(TRAP_deferred_nmi, &deferred_nmi);
206 /*
207 * Make a separate task for double faults. This will get us debug output if
208 * we blow the kernel stack.
209 */
210 struct tss_struct *tss = &doublefault_tss;
211 memset(tss, 0, sizeof(*tss));
212 tss->ds = __HYPERVISOR_DS;
213 tss->es = __HYPERVISOR_DS;
214 tss->ss = __HYPERVISOR_DS;
215 tss->esp = (unsigned long)
216 &doublefault_stack[DOUBLEFAULT_STACK_SIZE];
217 tss->__cr3 = __pa(idle_pg_table);
218 tss->cs = __HYPERVISOR_CS;
219 tss->eip = (unsigned long)do_double_fault;
220 tss->eflags = 2;
221 tss->bitmap = IOBMP_INVALID_OFFSET;
222 _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY,
223 (unsigned long)tss, 235, 9);
225 set_task_gate(TRAP_double_fault, __DOUBLEFAULT_TSS_ENTRY<<3);
226 }
228 long set_fast_trap(struct exec_domain *p, int idx)
229 {
230 trap_info_t *ti;
232 /* Index 0 is special: it disables fast traps. */
233 if ( idx == 0 )
234 {
235 if ( p == current )
236 CLEAR_FAST_TRAP(&p->arch);
237 SET_DEFAULT_FAST_TRAP(&p->arch);
238 return 0;
239 }
241 /*
242 * We only fast-trap vectors 0x20-0x2f, and vector 0x80.
243 * The former range is used by Windows and MS-DOS.
244 * Vector 0x80 is used by Linux and the BSD variants.
245 */
246 if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) )
247 return -1;
249 ti = p->arch.traps + idx;
251 /*
252 * We can't virtualise interrupt gates, as there's no way to get
253 * the CPU to automatically clear the events_mask variable.
254 */
255 if ( TI_GET_IF(ti) )
256 return -1;
258 if ( p == current )
259 CLEAR_FAST_TRAP(&p->arch);
261 p->arch.fast_trap_idx = idx;
262 p->arch.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
263 p->arch.fast_trap_desc.b =
264 (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
266 if ( p == current )
267 SET_FAST_TRAP(&p->arch);
269 return 0;
270 }
273 long do_set_fast_trap(int idx)
274 {
275 return set_fast_trap(current, idx);
276 }
278 long do_set_callbacks(unsigned long event_selector,
279 unsigned long event_address,
280 unsigned long failsafe_selector,
281 unsigned long failsafe_address)
282 {
283 struct exec_domain *d = current;
285 if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
286 return -EPERM;
288 d->arch.event_selector = event_selector;
289 d->arch.event_address = event_address;
290 d->arch.failsafe_selector = failsafe_selector;
291 d->arch.failsafe_address = failsafe_address;
293 return 0;
294 }