ia64/xen-unstable

view xen/arch/x86/x86_32/traps.c @ 3650:8c6281ec8860

bitkeeper revision 1.1159.212.87 (4203b25aTB_XSOA2G0yxgrj0ey-vIA)

Defined per-CPU SYSCALL entry point for hypercalls. We enter the DOM0
kernel and can receive hypercalls. Now probably need to fix the
user-access (uaccess.h) functions and macros.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Fri Feb 04 17:35:22 2005 +0000 (2005-02-04)
parents d55d523078f7
children 393483ae9f62 d93748c50893
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/sched.h>
5 #include <xen/lib.h>
6 #include <xen/console.h>
7 #include <xen/mm.h>
8 #include <xen/irq.h>
10 static int kstack_depth_to_print = 8*20;
12 static inline int kernel_text_address(unsigned long addr)
13 {
14 if (addr >= (unsigned long) &_stext &&
15 addr <= (unsigned long) &_etext)
16 return 1;
17 return 0;
19 }
21 void show_guest_stack(void)
22 {
23 int i;
24 execution_context_t *ec = get_execution_context();
25 unsigned long *stack = (unsigned long *)ec->esp;
26 printk("Guest EIP is %lx\n ",ec->eip);
28 for ( i = 0; i < kstack_depth_to_print; i++ )
29 {
30 if ( ((long)stack & (STACK_SIZE-1)) == 0 )
31 break;
32 if ( i && ((i % 8) == 0) )
33 printk("\n ");
34 printk("%p ", *stack++);
35 }
36 printk("\n");
38 }
40 void show_trace(unsigned long *esp)
41 {
42 unsigned long *stack, addr;
43 int i;
45 printk("Call Trace from ESP=%p:\n ", esp);
46 stack = esp;
47 i = 0;
48 while (((long) stack & (STACK_SIZE-1)) != 0) {
49 addr = *stack++;
50 if (kernel_text_address(addr)) {
51 if (i && ((i % 6) == 0))
52 printk("\n ");
53 printk("[<%p>] ", addr);
54 i++;
55 }
56 }
57 printk("\n");
58 }
60 void show_stack(unsigned long *esp)
61 {
62 unsigned long *stack;
63 int i;
65 printk("Stack trace from ESP=%p:\n ", esp);
67 stack = esp;
68 for ( i = 0; i < kstack_depth_to_print; i++ )
69 {
70 if ( ((long)stack & (STACK_SIZE-1)) == 0 )
71 break;
72 if ( i && ((i % 8) == 0) )
73 printk("\n ");
74 if ( kernel_text_address(*stack) )
75 printk("[%p] ", *stack++);
76 else
77 printk("%p ", *stack++);
78 }
79 printk("\n");
81 show_trace( esp );
82 }
84 void show_registers(struct xen_regs *regs)
85 {
86 unsigned long esp;
87 unsigned short ss, ds, es, fs, gs;
89 if ( GUEST_FAULT(regs) )
90 {
91 esp = regs->esp;
92 ss = regs->ss & 0xffff;
93 ds = regs->ds & 0xffff;
94 es = regs->es & 0xffff;
95 fs = regs->fs & 0xffff;
96 gs = regs->gs & 0xffff;
97 }
98 else
99 {
100 esp = (unsigned long)(&regs->esp);
101 ss = __HYPERVISOR_DS;
102 ds = __HYPERVISOR_DS;
103 es = __HYPERVISOR_DS;
104 fs = __HYPERVISOR_DS;
105 gs = __HYPERVISOR_DS;
106 }
108 printk("CPU: %d\nEIP: %04lx:[<%p>] \nEFLAGS: %p\n",
109 smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
110 printk("eax: %p ebx: %p ecx: %p edx: %p\n",
111 regs->eax, regs->ebx, regs->ecx, regs->edx);
112 printk("esi: %p edi: %p ebp: %p esp: %p\n",
113 regs->esi, regs->edi, regs->ebp, esp);
114 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
115 ds, es, fs, gs, ss);
117 show_stack((unsigned long *)&regs->esp);
118 }
120 void show_page_walk(unsigned long addr)
121 {
122 unsigned long page;
124 if ( addr < PAGE_OFFSET )
125 return;
127 printk("Pagetable walk from %p:\n", addr);
129 page = l2_pgentry_val(idle_pg_table[l2_table_offset(addr)]);
130 printk(" L2 = %p %s\n", page, (page & _PAGE_PSE) ? "(4MB)" : "");
131 if ( !(page & _PAGE_PRESENT) || (page & _PAGE_PSE) )
132 return;
134 page &= PAGE_MASK;
135 page = ((unsigned long *) __va(page))[l1_table_offset(addr)];
136 printk(" L1 = %p\n", page);
137 }
139 #define DOUBLEFAULT_STACK_SIZE 1024
140 static struct tss_struct doublefault_tss;
141 static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE];
143 asmlinkage void do_double_fault(void)
144 {
145 struct tss_struct *tss = &doublefault_tss;
146 unsigned int cpu = ((tss->back_link>>3)-__FIRST_TSS_ENTRY)>>1;
148 /* Disable the NMI watchdog. It's useless now. */
149 watchdog_on = 0;
151 /* Find information saved during fault and dump it to the console. */
152 tss = &init_tss[cpu];
153 printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n",
154 cpu, tss->cs, tss->eip, tss->eflags);
155 printk("CR3: %08x\n", tss->__cr3);
156 printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n",
157 tss->eax, tss->ebx, tss->ecx, tss->edx);
158 printk("esi: %08x edi: %08x ebp: %08x esp: %08x\n",
159 tss->esi, tss->edi, tss->ebp, tss->esp);
160 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
161 tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
162 printk("************************************\n");
163 printk("CPU%d DOUBLE FAULT -- system shutdown\n", cpu);
164 printk("System needs manual reset.\n");
165 printk("************************************\n");
167 /* Lock up the console to prevent spurious output from other CPUs. */
168 console_force_lock();
170 /* Wait for manual reset. */
171 for ( ; ; )
172 __asm__ __volatile__ ( "hlt" );
173 }
175 void __init doublefault_init(void)
176 {
177 /*
178 * Make a separate task for double faults. This will get us debug output if
179 * we blow the kernel stack.
180 */
181 struct tss_struct *tss = &doublefault_tss;
182 memset(tss, 0, sizeof(*tss));
183 tss->ds = __HYPERVISOR_DS;
184 tss->es = __HYPERVISOR_DS;
185 tss->ss = __HYPERVISOR_DS;
186 tss->esp = (unsigned long)
187 &doublefault_stack[DOUBLEFAULT_STACK_SIZE];
188 tss->__cr3 = __pa(idle_pg_table);
189 tss->cs = __HYPERVISOR_CS;
190 tss->eip = (unsigned long)do_double_fault;
191 tss->eflags = 2;
192 tss->bitmap = IOBMP_INVALID_OFFSET;
193 _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY,
194 (unsigned long)tss, 235, 9);
196 set_task_gate(TRAP_double_fault, __DOUBLEFAULT_TSS_ENTRY<<3);
197 }
199 void __init percpu_traps_init(void)
200 {
201 }
203 long set_fast_trap(struct exec_domain *p, int idx)
204 {
205 trap_info_t *ti;
207 /* Index 0 is special: it disables fast traps. */
208 if ( idx == 0 )
209 {
210 if ( p == current )
211 CLEAR_FAST_TRAP(&p->thread);
212 SET_DEFAULT_FAST_TRAP(&p->thread);
213 return 0;
214 }
216 /*
217 * We only fast-trap vectors 0x20-0x2f, and vector 0x80.
218 * The former range is used by Windows and MS-DOS.
219 * Vector 0x80 is used by Linux and the BSD variants.
220 */
221 if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) )
222 return -1;
224 ti = p->thread.traps + idx;
226 /*
227 * We can't virtualise interrupt gates, as there's no way to get
228 * the CPU to automatically clear the events_mask variable.
229 */
230 if ( TI_GET_IF(ti) )
231 return -1;
233 if ( p == current )
234 CLEAR_FAST_TRAP(&p->thread);
236 p->thread.fast_trap_idx = idx;
237 p->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
238 p->thread.fast_trap_desc.b =
239 (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
241 if ( p == current )
242 SET_FAST_TRAP(&p->thread);
244 return 0;
245 }
248 long do_set_fast_trap(int idx)
249 {
250 return set_fast_trap(current, idx);
251 }