direct-io.hg

view extras/mini-os/traps.c @ 10734:9b7e1ea4c4d2

[HVM] Sync p2m table across all vcpus on x86_32p xen.
We found VGA acceleration can not work on SMP VMX guests on x86_32p
xen, this is caused by the way we construct p2m table today: only the 1st
l2 page table slot that maps p2m table pages is copied to none-vcpu0 vcpu
monitor page table when VMX is created. But VGA acceleration will
create some p2m table entries beyond the 1st l2 page table slot after HVM is
created, so only vcpu0 can get these p2m entries, and other vcpu can
not do VGA acceleration.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Wed Jul 26 11:34:12 2006 +0100 (2006-07-26)
parents 852a1fd80f42
children
line source
2 #include <os.h>
3 #include <traps.h>
4 #include <hypervisor.h>
5 #include <mm.h>
6 #include <lib.h>
7 #include <sched.h>
9 /*
10 * These are assembler stubs in entry.S.
11 * They are the actual entry points for virtual exceptions.
12 */
13 void divide_error(void);
14 void debug(void);
15 void int3(void);
16 void overflow(void);
17 void bounds(void);
18 void invalid_op(void);
19 void device_not_available(void);
20 void coprocessor_segment_overrun(void);
21 void invalid_TSS(void);
22 void segment_not_present(void);
23 void stack_segment(void);
24 void general_protection(void);
25 void page_fault(void);
26 void coprocessor_error(void);
27 void simd_coprocessor_error(void);
28 void alignment_check(void);
29 void spurious_interrupt_bug(void);
30 void machine_check(void);
33 void dump_regs(struct pt_regs *regs)
34 {
35 printk("Thread: %s\n", current->name);
36 #ifdef __i386__
37 printk("EIP: %x, EFLAGS %x.\n", regs->eip, regs->eflags);
38 printk("EBX: %08x ECX: %08x EDX: %08x\n",
39 regs->ebx, regs->ecx, regs->edx);
40 printk("ESI: %08x EDI: %08x EBP: %08x EAX: %08x\n",
41 regs->esi, regs->edi, regs->ebp, regs->eax);
42 printk("DS: %04x ES: %04x orig_eax: %08x, eip: %08x\n",
43 regs->xds, regs->xes, regs->orig_eax, regs->eip);
44 printk("CS: %04x EFLAGS: %08x esp: %08x ss: %04x\n",
45 regs->xcs, regs->eflags, regs->esp, regs->xss);
46 #else
47 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
48 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n",
49 regs->ss, regs->rsp, regs->eflags);
50 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
51 regs->rax, regs->rbx, regs->rcx);
52 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
53 regs->rdx, regs->rsi, regs->rdi);
54 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
55 regs->rbp, regs->r8, regs->r9);
56 printk("R10: %016lx R11: %016lx R12: %016lx\n",
57 regs->r10, regs->r11, regs->r12);
58 printk("R13: %016lx R14: %016lx R15: %016lx\n",
59 regs->r13, regs->r14, regs->r15);
60 #endif
61 }
63 static void do_trap(int trapnr, char *str, struct pt_regs * regs, unsigned long error_code)
64 {
65 printk("FATAL: Unhandled Trap %d (%s), error code=0x%lx\n", trapnr, str, error_code);
66 printk("Regs address %p\n", regs);
67 dump_regs(regs);
68 do_exit();
69 }
71 #define DO_ERROR(trapnr, str, name) \
72 void do_##name(struct pt_regs * regs, unsigned long error_code) \
73 { \
74 do_trap(trapnr, str, regs, error_code); \
75 }
77 #define DO_ERROR_INFO(trapnr, str, name, sicode, siaddr) \
78 void do_##name(struct pt_regs * regs, unsigned long error_code) \
79 { \
80 do_trap(trapnr, str, regs, error_code); \
81 }
83 DO_ERROR_INFO( 0, "divide error", divide_error, FPE_INTDIV, regs->eip)
84 DO_ERROR( 3, "int3", int3)
85 DO_ERROR( 4, "overflow", overflow)
86 DO_ERROR( 5, "bounds", bounds)
87 DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
88 DO_ERROR( 7, "device not available", device_not_available)
89 DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
90 DO_ERROR(10, "invalid TSS", invalid_TSS)
91 DO_ERROR(11, "segment not present", segment_not_present)
92 DO_ERROR(12, "stack segment", stack_segment)
93 DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
94 DO_ERROR(18, "machine check", machine_check)
96 void page_walk(unsigned long virt_address)
97 {
98 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
99 unsigned long addr = virt_address;
100 printk("Pagetable walk from virt %lx, base %lx:\n", virt_address, start_info.pt_base);
102 #if defined(__x86_64__)
103 page = tab[l4_table_offset(addr)];
104 tab = pte_to_virt(page);
105 printk(" L4 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l4_table_offset(addr));
106 #endif
107 #if defined(__x86_64__) || defined(CONFIG_X86_PAE)
108 page = tab[l3_table_offset(addr)];
109 tab = pte_to_virt(page);
110 printk(" L3 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l3_table_offset(addr));
111 #endif
112 page = tab[l2_table_offset(addr)];
113 tab = pte_to_virt(page);
114 printk(" L2 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l2_table_offset(addr));
116 page = tab[l1_table_offset(addr)];
117 printk(" L1 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l1_table_offset(addr));
119 }
121 #define read_cr2() \
122 (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
124 static int handling_pg_fault = 0;
126 void do_page_fault(struct pt_regs *regs, unsigned long error_code)
127 {
128 unsigned long addr = read_cr2();
129 /* If we are already handling a page fault, and got another one
130 that means we faulted in pagetable walk. Continuing here would cause
131 a recursive fault */
132 if(handling_pg_fault)
133 {
134 printk("Page fault in pagetable walk (access to invalid memory?).\n");
135 do_exit();
136 }
137 handling_pg_fault = 1;
139 #if defined(__x86_64__)
140 printk("Page fault at linear address %p, rip %p, code %lx\n",
141 addr, regs->rip, error_code);
142 #else
143 printk("Page fault at linear address %p, eip %p, code %lx\n",
144 addr, regs->eip, error_code);
145 #endif
147 dump_regs(regs);
148 page_walk(addr);
149 do_exit();
150 /* We should never get here ... but still */
151 handling_pg_fault = 0;
152 }
154 void do_general_protection(struct pt_regs *regs, long error_code)
155 {
156 #ifdef __i386__
157 printk("GPF eip: %p, error_code=%lx\n", regs->eip, error_code);
158 #else
159 printk("GPF rip: %p, error_code=%lx\n", regs->rip, error_code);
160 #endif
161 dump_regs(regs);
162 do_exit();
163 }
166 void do_debug(struct pt_regs * regs)
167 {
168 printk("Debug exception\n");
169 #define TF_MASK 0x100
170 regs->eflags &= ~TF_MASK;
171 dump_regs(regs);
172 do_exit();
173 }
175 void do_coprocessor_error(struct pt_regs * regs)
176 {
177 printk("Copro error\n");
178 dump_regs(regs);
179 do_exit();
180 }
182 void simd_math_error(void *eip)
183 {
184 printk("SIMD error\n");
185 }
187 void do_simd_coprocessor_error(struct pt_regs * regs)
188 {
189 printk("SIMD copro error\n");
190 }
192 void do_spurious_interrupt_bug(struct pt_regs * regs)
193 {
194 }
196 /*
197 * Submit a virtual IDT to teh hypervisor. This consists of tuples
198 * (interrupt vector, privilege ring, CS:EIP of handler).
199 * The 'privilege ring' field specifies the least-privileged ring that
200 * can trap to that vector using a software-interrupt instruction (INT).
201 */
202 static trap_info_t trap_table[] = {
203 { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
204 { 1, 0, __KERNEL_CS, (unsigned long)debug },
205 { 3, 3, __KERNEL_CS, (unsigned long)int3 },
206 { 4, 3, __KERNEL_CS, (unsigned long)overflow },
207 { 5, 3, __KERNEL_CS, (unsigned long)bounds },
208 { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
209 { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
210 { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
211 { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
212 { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
213 { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
214 { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
215 { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
216 { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
217 { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
218 { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
219 { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
220 { 0, 0, 0, 0 }
221 };
225 void trap_init(void)
226 {
227 HYPERVISOR_set_trap_table(trap_table);
228 }