direct-io.hg
changeset 3090:f068615fc588
bitkeeper revision 1.1159.183.7 (41a3be3bz-X6A7J-BlFnGqN1AnNswA)
Merge scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
Merge scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Nov 23 22:48:27 2004 +0000 (2004-11-23) |
parents | 7ef582b6c9c4 50eaaedc0d3c |
children | 42bdac6c8985 2754a2ed61c3 |
files | xen/arch/x86/extable.c xen/arch/x86/irq.c xen/arch/x86/nmi.c xen/arch/x86/pdb-stub.c xen/arch/x86/smp.c xen/arch/x86/traps.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_32/xen.lds xen/include/asm-x86/debugger.h xen/include/asm-x86/irq.h xen/include/asm-x86/processor.h xen/include/asm-x86/x86_32/regs.h |
line diff
1.1 --- a/xen/arch/x86/extable.c Mon Nov 22 23:08:21 2004 +0000 1.2 +++ b/xen/arch/x86/extable.c Tue Nov 23 22:48:27 2004 +0000 1.3 @@ -3,9 +3,6 @@ 1.4 #include <xen/spinlock.h> 1.5 #include <asm/uaccess.h> 1.6 1.7 -extern const struct exception_table_entry __start___ex_table[]; 1.8 -extern const struct exception_table_entry __stop___ex_table[]; 1.9 - 1.10 static inline unsigned long 1.11 search_one_table(const struct exception_table_entry *first, 1.12 const struct exception_table_entry *last, 1.13 @@ -31,5 +28,21 @@ search_one_table(const struct exception_ 1.14 unsigned long 1.15 search_exception_table(unsigned long addr) 1.16 { 1.17 - return search_one_table(__start___ex_table, __stop___ex_table-1, addr); 1.18 + extern const struct exception_table_entry __start___ex_table[]; 1.19 + extern const struct exception_table_entry __stop___ex_table[]; 1.20 + return search_one_table( 1.21 + __start___ex_table, __stop___ex_table-1, addr); 1.22 } 1.23 + 1.24 +#ifdef __i386__ 1.25 +unsigned long 1.26 +search_pre_exception_table(unsigned long addr) 1.27 +{ 1.28 + extern const struct exception_table_entry __start___pre_ex_table[]; 1.29 + extern const struct exception_table_entry __stop___pre_ex_table[]; 1.30 + unsigned long fixup = search_one_table( 1.31 + __start___pre_ex_table, __stop___pre_ex_table-1, addr); 1.32 + DPRINTK("Pre-exception: %08lx -> %08lx\n", addr, fixup); 1.33 + return fixup; 1.34 +} 1.35 +#endif
2.1 --- a/xen/arch/x86/irq.c Mon Nov 22 23:08:21 2004 +0000 2.2 +++ b/xen/arch/x86/irq.c Tue Nov 23 22:48:27 2004 +0000 2.3 @@ -90,7 +90,7 @@ void enable_irq(unsigned int irq) 2.4 asmlinkage void do_IRQ(struct xen_regs regs) 2.5 { 2.6 #if defined(__i386__) 2.7 - unsigned int irq = regs.orig_eax; 2.8 + unsigned int irq = regs.entry_vector; 2.9 #else 2.10 unsigned int irq = 0; /* XXX */ 2.11 #endif
3.1 --- a/xen/arch/x86/nmi.c Mon Nov 22 23:08:21 2004 +0000 3.2 +++ b/xen/arch/x86/nmi.c Tue Nov 23 22:48:27 2004 +0000 3.3 @@ -286,7 +286,7 @@ void nmi_watchdog_tick (struct xen_regs 3.4 if ( alert_counter[cpu] == 5*nmi_hz ) 3.5 { 3.6 console_force_unlock(); 3.7 - fatal_trap(TRAP_nmi, regs, 0); 3.8 + fatal_trap(TRAP_nmi, regs); 3.9 } 3.10 } 3.11 else
4.1 --- a/xen/arch/x86/pdb-stub.c Mon Nov 22 23:08:21 2004 +0000 4.2 +++ b/xen/arch/x86/pdb-stub.c Tue Nov 23 22:48:27 2004 +0000 4.3 @@ -1091,7 +1091,7 @@ int pdb_handle_exception(int exceptionVe 4.4 (xen_regs->cs & 3) == 3 && 4.5 xen_regs->eip != pdb_system_call_next_addr + 1) 4.6 { 4.7 - TRC(printf("pdb: user bkpt (0x%x) at 0x%x:0x%lx:0x%lx\n", 4.8 + TRC(printf("pdb: user bkpt (0x%x) at 0x%x:0x%lx:0x%x\n", 4.9 exceptionVector, xen_regs->cs & 3, cr3, xen_regs->eip)); 4.10 return 1; 4.11 } 4.12 @@ -1110,12 +1110,12 @@ int pdb_handle_exception(int exceptionVe 4.13 (exceptionVector != KEYPRESS_EXCEPTION) && 4.14 xen_regs->eip < 0xc0000000) /* Linux-specific for now! */ 4.15 { 4.16 - TRC(printf("pdb: user bkpt (0x%x) at 0x%lx:0x%lx\n", 4.17 + TRC(printf("pdb: user bkpt (0x%x) at 0x%lx:0x%x\n", 4.18 exceptionVector, cr3, xen_regs->eip)); 4.19 return 1; 4.20 } 4.21 4.22 - printk("pdb_handle_exception [0x%x][0x%lx:0x%lx]\n", 4.23 + printk("pdb_handle_exception [0x%x][0x%lx:0x%x]\n", 4.24 exceptionVector, cr3, xen_regs->eip); 4.25 4.26 if ( pdb_stepping ) 4.27 @@ -1229,7 +1229,7 @@ void pdb_handle_debug_trap(struct xen_re 4.28 { 4.29 d->thread.debugreg[6] = condition; 4.30 4.31 - tb->flags = TBF_TRAP_NOCODE; 4.32 + tb->flags = TBF_EXCEPTION; 4.33 tb->cs = d->thread.traps[1].cs; 4.34 tb->eip = d->thread.traps[1].address; 4.35 }
5.1 --- a/xen/arch/x86/smp.c Mon Nov 22 23:08:21 2004 +0000 5.2 +++ b/xen/arch/x86/smp.c Tue Nov 23 22:48:27 2004 +0000 5.3 @@ -224,7 +224,7 @@ asmlinkage void smp_invalidate_interrupt 5.4 5.5 void flush_tlb_mask(unsigned long mask) 5.6 { 5.7 - ASSERT(!in_irq()); 5.8 + ASSERT(local_irq_is_enabled()); 5.9 5.10 if ( mask & (1 << smp_processor_id()) ) 5.11 { 5.12 @@ -234,20 +234,7 @@ void flush_tlb_mask(unsigned long mask) 5.13 5.14 if ( mask != 0 ) 5.15 { 5.16 - /* 5.17 - * We are certainly not reentering a flush_lock region on this CPU 5.18 - * because we are not in an IRQ context. We can therefore wait for the 5.19 - * other guy to release the lock. This is harder than it sounds because 5.20 - * local interrupts might be disabled, and he may be waiting for us to 5.21 - * execute smp_invalidate_interrupt(). We deal with this possibility by 5.22 - * inlining the meat of that function here. 5.23 - */ 5.24 - while ( unlikely(!spin_trylock(&flush_lock)) ) 5.25 - { 5.26 - if ( test_and_clear_bit(smp_processor_id(), &flush_cpumask) ) 5.27 - local_flush_tlb(); 5.28 - rep_nop(); 5.29 - } 5.30 + spin_lock(&flush_lock); 5.31 5.32 flush_cpumask = mask; 5.33 send_IPI_mask(mask, INVALIDATE_TLB_VECTOR); 5.34 @@ -264,6 +251,8 @@ void flush_tlb_mask(unsigned long mask) 5.35 /* Call with no locks held and interrupts enabled (e.g., softirq context). */ 5.36 void new_tlbflush_clock_period(void) 5.37 { 5.38 + ASSERT(local_irq_is_enabled()); 5.39 + 5.40 /* Flush everyone else. We definitely flushed just before entry. */ 5.41 if ( smp_num_cpus > 1 ) 5.42 {
6.1 --- a/xen/arch/x86/traps.c Mon Nov 22 23:08:21 2004 +0000 6.2 +++ b/xen/arch/x86/traps.c Tue Nov 23 22:48:27 2004 +0000 6.3 @@ -183,16 +183,16 @@ void show_registers(struct xen_regs *reg 6.4 gs = __HYPERVISOR_DS; 6.5 } 6.6 6.7 - printk("CPU: %d\nEIP: %04x:[<%08lx>] \nEFLAGS: %08lx\n", 6.8 + printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n", 6.9 smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags); 6.10 - printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 6.11 + printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n", 6.12 regs->eax, regs->ebx, regs->ecx, regs->edx); 6.13 - printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", 6.14 + printk("esi: %08x edi: %08x ebp: %08x esp: %08lx\n", 6.15 regs->esi, regs->edi, regs->ebp, esp); 6.16 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", 6.17 ds, es, fs, gs, ss); 6.18 6.19 - show_stack(®s->esp); 6.20 + show_stack((unsigned long *)®s->esp); 6.21 } 6.22 6.23 /* 6.24 @@ -200,9 +200,10 @@ void show_registers(struct xen_regs *reg 6.25 * are disabled). In such situations we can't do much that is safe. We try to 6.26 * print out some tracing and then we just spin. 6.27 */ 6.28 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs, long error_code) 6.29 +asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs) 6.30 { 6.31 int cpu = smp_processor_id(); 6.32 + unsigned long cr2; 6.33 static char *trapstr[] = { 6.34 "divide error", "debug", "nmi", "bkpt", "overflow", "bounds", 6.35 "invalid operation", "device not available", "double fault", 6.36 @@ -213,9 +214,16 @@ asmlinkage void fatal_trap(int trapnr, s 6.37 }; 6.38 6.39 show_registers(regs); 6.40 + 6.41 + if ( trapnr == TRAP_page_fault ) 6.42 + { 6.43 + __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (cr2) : ); 6.44 + printk("Faulting linear address might be %08lx\n", cr2); 6.45 + } 6.46 + 6.47 printk("************************************\n"); 6.48 - printk("CPU%d FATAL TRAP %d (%s), ERROR_CODE %lx%s.\n", 6.49 - cpu, trapnr, trapstr[trapnr], error_code, 6.50 + printk("CPU%d FATAL TRAP %d (%s), ERROR_CODE %04x%s.\n", 6.51 + cpu, trapnr, trapstr[trapnr], regs->error_code, 6.52 (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT"); 6.53 printk("System shutting down -- need manual reset.\n"); 6.54 printk("************************************\n"); 6.55 @@ -229,24 +237,28 @@ asmlinkage void fatal_trap(int trapnr, s 6.56 } 6.57 6.58 static inline int do_trap(int trapnr, char *str, 6.59 - struct xen_regs *regs, 6.60 - long error_code, int use_error_code) 6.61 + struct xen_regs *regs, 6.62 + int use_error_code) 6.63 { 6.64 struct domain *d = current; 6.65 struct trap_bounce *tb = &d->thread.trap_bounce; 6.66 trap_info_t *ti; 6.67 unsigned long fixup; 6.68 6.69 - DEBUGGER_trap_entry(trapnr, regs, error_code); 6.70 + DEBUGGER_trap_entry(trapnr, regs); 6.71 6.72 if ( !(regs->cs & 3) ) 6.73 goto xen_fault; 6.74 6.75 ti = current->thread.traps + trapnr; 6.76 - tb->flags = use_error_code ? TBF_TRAP : TBF_TRAP_NOCODE; 6.77 - tb->error_code = error_code; 6.78 - tb->cs = ti->cs; 6.79 - tb->eip = ti->address; 6.80 + tb->flags = TBF_EXCEPTION; 6.81 + tb->cs = ti->cs; 6.82 + tb->eip = ti->address; 6.83 + if ( use_error_code ) 6.84 + { 6.85 + tb->flags |= TBF_EXCEPTION_ERRCODE; 6.86 + tb->error_code = regs->error_code; 6.87 + } 6.88 if ( TI_GET_IF(ti) ) 6.89 d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 6.90 return 0; 6.91 @@ -255,30 +267,30 @@ static inline int do_trap(int trapnr, ch 6.92 6.93 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 6.94 { 6.95 - DPRINTK("Trap %d: %08lx -> %08lx\n", trapnr, regs->eip, fixup); 6.96 + DPRINTK("Trap %d: %08x -> %08lx\n", trapnr, regs->eip, fixup); 6.97 regs->eip = fixup; 6.98 return 0; 6.99 } 6.100 6.101 - DEBUGGER_trap_fatal(trapnr, regs, error_code); 6.102 + DEBUGGER_trap_fatal(trapnr, regs); 6.103 6.104 show_registers(regs); 6.105 panic("CPU%d FATAL TRAP: vector = %d (%s)\n" 6.106 - "[error_code=%08x]\n", 6.107 - smp_processor_id(), trapnr, str, error_code); 6.108 + "[error_code=%04x]\n", 6.109 + smp_processor_id(), trapnr, str, regs->error_code); 6.110 return 0; 6.111 } 6.112 6.113 #define DO_ERROR_NOCODE(trapnr, str, name) \ 6.114 -asmlinkage int do_##name(struct xen_regs * regs, long error_code) \ 6.115 +asmlinkage int do_##name(struct xen_regs *regs) \ 6.116 { \ 6.117 - return do_trap(trapnr, str, regs, error_code, 0); \ 6.118 + return do_trap(trapnr, str, regs, 0); \ 6.119 } 6.120 6.121 #define DO_ERROR(trapnr, str, name) \ 6.122 -asmlinkage int do_##name(struct xen_regs * regs, long error_code) \ 6.123 +asmlinkage int do_##name(struct xen_regs *regs) \ 6.124 { \ 6.125 - return do_trap(trapnr, str, regs, error_code, 1); \ 6.126 + return do_trap(trapnr, str, regs, 1); \ 6.127 } 6.128 6.129 DO_ERROR_NOCODE( 0, "divide error", divide_error) 6.130 @@ -293,28 +305,25 @@ DO_ERROR_NOCODE(16, "fpu error", coproce 6.131 DO_ERROR(17, "alignment check", alignment_check) 6.132 DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error) 6.133 6.134 -asmlinkage int do_int3(struct xen_regs *regs, long error_code) 6.135 +asmlinkage int do_int3(struct xen_regs *regs) 6.136 { 6.137 struct domain *d = current; 6.138 struct trap_bounce *tb = &d->thread.trap_bounce; 6.139 trap_info_t *ti; 6.140 6.141 - DEBUGGER_trap_entry(TRAP_int3, regs, error_code); 6.142 + DEBUGGER_trap_entry(TRAP_int3, regs); 6.143 6.144 if ( unlikely((regs->cs & 3) == 0) ) 6.145 { 6.146 - DEBUGGER_trap_fatal(TRAP_int3, regs, error_code); 6.147 + DEBUGGER_trap_fatal(TRAP_int3, regs); 6.148 show_registers(regs); 6.149 - panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n" 6.150 - "[error_code=%08x]\n", 6.151 - smp_processor_id(), error_code); 6.152 + panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id()); 6.153 } 6.154 6.155 ti = current->thread.traps + 3; 6.156 - tb->flags = TBF_TRAP_NOCODE; 6.157 - tb->error_code = error_code; 6.158 - tb->cs = ti->cs; 6.159 - tb->eip = ti->address; 6.160 + tb->flags = TBF_EXCEPTION; 6.161 + tb->cs = ti->cs; 6.162 + tb->eip = ti->address; 6.163 if ( TI_GET_IF(ti) ) 6.164 d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1; 6.165 6.166 @@ -353,12 +362,12 @@ asmlinkage void do_double_fault(void) 6.167 __asm__ __volatile__ ( "hlt" ); 6.168 } 6.169 6.170 -asmlinkage void do_machine_check(struct xen_regs *regs, long error_code) 6.171 +asmlinkage void do_machine_check(struct xen_regs *regs) 6.172 { 6.173 - fatal_trap(TRAP_machine_check, regs, error_code); 6.174 + fatal_trap(TRAP_machine_check, regs); 6.175 } 6.176 6.177 -asmlinkage int do_page_fault(struct xen_regs *regs, long error_code) 6.178 +asmlinkage int do_page_fault(struct xen_regs *regs) 6.179 { 6.180 trap_info_t *ti; 6.181 unsigned long off, addr, fixup; 6.182 @@ -369,7 +378,7 @@ asmlinkage int do_page_fault(struct xen_ 6.183 6.184 __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : ); 6.185 6.186 - DEBUGGER_trap_entry(TRAP_page_fault, regs, error_code); 6.187 + DEBUGGER_trap_entry(TRAP_page_fault, regs); 6.188 6.189 perfc_incrc(page_faults); 6.190 6.191 @@ -384,17 +393,17 @@ asmlinkage int do_page_fault(struct xen_ 6.192 } 6.193 6.194 if ( (addr < PAGE_OFFSET) && 6.195 - ((error_code & 3) == 3) && /* write-protection fault */ 6.196 + ((regs->error_code & 3) == 3) && /* write-protection fault */ 6.197 ptwr_do_page_fault(addr) ) 6.198 { 6.199 if ( unlikely(d->mm.shadow_mode) ) 6.200 - (void)shadow_fault(addr, error_code); 6.201 + (void)shadow_fault(addr, regs->error_code); 6.202 return EXCRET_fault_fixed; 6.203 } 6.204 } 6.205 6.206 if ( unlikely(d->mm.shadow_mode) && 6.207 - (addr < PAGE_OFFSET) && shadow_fault(addr, error_code) ) 6.208 + (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) ) 6.209 return EXCRET_fault_fixed; 6.210 6.211 if ( unlikely(addr >= LDT_VIRT_START) && 6.212 @@ -414,9 +423,9 @@ asmlinkage int do_page_fault(struct xen_ 6.213 goto xen_fault; 6.214 6.215 ti = d->thread.traps + 14; 6.216 - tb->flags = TBF_TRAP_CR2; /* page fault pushes %cr2 */ 6.217 + tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2; 6.218 tb->cr2 = addr; 6.219 - tb->error_code = error_code; 6.220 + tb->error_code = regs->error_code; 6.221 tb->cs = ti->cs; 6.222 tb->eip = ti->address; 6.223 if ( TI_GET_IF(ti) ) 6.224 @@ -429,12 +438,12 @@ asmlinkage int do_page_fault(struct xen_ 6.225 { 6.226 perfc_incrc(copy_user_faults); 6.227 if ( !d->mm.shadow_mode ) 6.228 - DPRINTK("Page fault: %08lx -> %08lx\n", regs->eip, fixup); 6.229 + DPRINTK("Page fault: %08x -> %08lx\n", regs->eip, fixup); 6.230 regs->eip = fixup; 6.231 return 0; 6.232 } 6.233 6.234 - DEBUGGER_trap_fatal(TRAP_page_fault, regs, error_code); 6.235 + DEBUGGER_trap_fatal(TRAP_page_fault, regs); 6.236 6.237 if ( addr >= PAGE_OFFSET ) 6.238 { 6.239 @@ -448,30 +457,30 @@ asmlinkage int do_page_fault(struct xen_ 6.240 printk(" *pte = %08lx\n", page); 6.241 } 6.242 #ifdef MEMORY_GUARD 6.243 - if ( !(error_code & 1) ) 6.244 + if ( !(regs->error_code & 1) ) 6.245 printk(" -- POSSIBLY AN ACCESS TO FREED MEMORY? --\n"); 6.246 #endif 6.247 } 6.248 6.249 show_registers(regs); 6.250 panic("CPU%d FATAL PAGE FAULT\n" 6.251 - "[error_code=%08x]\n" 6.252 + "[error_code=%04x]\n" 6.253 "Faulting linear address might be %08lx\n", 6.254 - smp_processor_id(), error_code, addr); 6.255 + smp_processor_id(), regs->error_code, addr); 6.256 return 0; 6.257 } 6.258 6.259 -asmlinkage int do_general_protection(struct xen_regs *regs, long error_code) 6.260 +asmlinkage int do_general_protection(struct xen_regs *regs) 6.261 { 6.262 struct domain *d = current; 6.263 struct trap_bounce *tb = &d->thread.trap_bounce; 6.264 trap_info_t *ti; 6.265 unsigned long fixup; 6.266 6.267 - DEBUGGER_trap_entry(TRAP_gp_fault, regs, error_code); 6.268 + DEBUGGER_trap_entry(TRAP_gp_fault, regs); 6.269 6.270 /* Badness if error in ring 0, or result of an interrupt. */ 6.271 - if ( !(regs->cs & 3) || (error_code & 1) ) 6.272 + if ( !(regs->cs & 3) || (regs->error_code & 1) ) 6.273 goto gp_in_kernel; 6.274 6.275 /* 6.276 @@ -494,13 +503,13 @@ asmlinkage int do_general_protection(str 6.277 * instruction. The DPL specified by the guest OS for these vectors is NOT 6.278 * CHECKED!! 6.279 */ 6.280 - if ( (error_code & 3) == 2 ) 6.281 + if ( (regs->error_code & 3) == 2 ) 6.282 { 6.283 /* This fault must be due to <INT n> instruction. */ 6.284 - ti = current->thread.traps + (error_code>>3); 6.285 + ti = current->thread.traps + (regs->error_code>>3); 6.286 if ( TI_GET_DPL(ti) >= (regs->cs & 3) ) 6.287 { 6.288 - tb->flags = TBF_TRAP_NOCODE; 6.289 + tb->flags = TBF_EXCEPTION; 6.290 regs->eip += 2; 6.291 goto finish_propagation; 6.292 } 6.293 @@ -508,15 +517,15 @@ asmlinkage int do_general_protection(str 6.294 6.295 #if defined(__i386__) 6.296 if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments) && 6.297 - (error_code == 0) && 6.298 + (regs->error_code == 0) && 6.299 gpf_emulate_4gb(regs) ) 6.300 return 0; 6.301 #endif 6.302 6.303 /* Pass on GPF as is. */ 6.304 ti = current->thread.traps + 13; 6.305 - tb->flags = TBF_TRAP; 6.306 - tb->error_code = error_code; 6.307 + tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; 6.308 + tb->error_code = regs->error_code; 6.309 finish_propagation: 6.310 tb->cs = ti->cs; 6.311 tb->eip = ti->address; 6.312 @@ -528,16 +537,17 @@ asmlinkage int do_general_protection(str 6.313 6.314 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 6.315 { 6.316 - DPRINTK("GPF (%04lx): %08lx -> %08lx\n", error_code, regs->eip, fixup); 6.317 + DPRINTK("GPF (%04x): %08x -> %08lx\n", 6.318 + regs->error_code, regs->eip, fixup); 6.319 regs->eip = fixup; 6.320 return 0; 6.321 } 6.322 6.323 - DEBUGGER_trap_fatal(TRAP_gp_fault, regs, error_code); 6.324 + DEBUGGER_trap_fatal(TRAP_gp_fault, regs); 6.325 6.326 show_registers(regs); 6.327 - panic("CPU%d GENERAL PROTECTION FAULT\n" 6.328 - "[error_code=%08x]\n", smp_processor_id(), error_code); 6.329 + panic("CPU%d GENERAL PROTECTION FAULT\n[error_code=%04x]\n", 6.330 + smp_processor_id(), regs->error_code); 6.331 return 0; 6.332 } 6.333 6.334 @@ -545,7 +555,7 @@ asmlinkage void mem_parity_error(struct 6.335 { 6.336 console_force_unlock(); 6.337 printk("\n\nNMI - MEMORY ERROR\n"); 6.338 - fatal_trap(TRAP_nmi, regs, 0); 6.339 + fatal_trap(TRAP_nmi, regs); 6.340 } 6.341 6.342 asmlinkage void io_check_error(struct xen_regs *regs) 6.343 @@ -553,7 +563,7 @@ asmlinkage void io_check_error(struct xe 6.344 console_force_unlock(); 6.345 6.346 printk("\n\nNMI - I/O ERROR\n"); 6.347 - fatal_trap(TRAP_nmi, regs, 0); 6.348 + fatal_trap(TRAP_nmi, regs); 6.349 } 6.350 6.351 static void unknown_nmi_error(unsigned char reason, struct xen_regs * regs) 6.352 @@ -588,7 +598,7 @@ static void nmi_softirq(void) 6.353 send_guest_virq(dom0, VIRQ_IO_ERR); 6.354 } 6.355 6.356 -asmlinkage int math_state_restore(struct xen_regs *regs, long error_code) 6.357 +asmlinkage int math_state_restore(struct xen_regs *regs) 6.358 { 6.359 /* Prevent recursion. */ 6.360 clts(); 6.361 @@ -605,7 +615,7 @@ asmlinkage int math_state_restore(struct 6.362 if ( test_and_clear_bit(DF_GUEST_STTS, ¤t->flags) ) 6.363 { 6.364 struct trap_bounce *tb = ¤t->thread.trap_bounce; 6.365 - tb->flags = TBF_TRAP_NOCODE; 6.366 + tb->flags = TBF_EXCEPTION; 6.367 tb->cs = current->thread.traps[7].cs; 6.368 tb->eip = current->thread.traps[7].address; 6.369 } 6.370 @@ -613,13 +623,13 @@ asmlinkage int math_state_restore(struct 6.371 return EXCRET_fault_fixed; 6.372 } 6.373 6.374 -asmlinkage int do_debug(struct xen_regs *regs, long error_code) 6.375 +asmlinkage int do_debug(struct xen_regs *regs) 6.376 { 6.377 unsigned int condition; 6.378 struct domain *d = current; 6.379 struct trap_bounce *tb = &d->thread.trap_bounce; 6.380 6.381 - DEBUGGER_trap_entry(TRAP_debug, regs, error_code); 6.382 + DEBUGGER_trap_entry(TRAP_debug, regs); 6.383 6.384 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); 6.385 6.386 @@ -647,7 +657,7 @@ asmlinkage int do_debug(struct xen_regs 6.387 /* Save debug status register where guest OS can peek at it */ 6.388 d->thread.debugreg[6] = condition; 6.389 6.390 - tb->flags = TBF_TRAP_NOCODE; 6.391 + tb->flags = TBF_EXCEPTION; 6.392 tb->cs = d->thread.traps[1].cs; 6.393 tb->eip = d->thread.traps[1].address; 6.394 6.395 @@ -655,8 +665,7 @@ asmlinkage int do_debug(struct xen_regs 6.396 return EXCRET_not_a_fault; 6.397 } 6.398 6.399 -asmlinkage int do_spurious_interrupt_bug( 6.400 - struct xen_regs * regs, long error_code) 6.401 +asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs) 6.402 { 6.403 return EXCRET_not_a_fault; 6.404 }
7.1 --- a/xen/arch/x86/x86_32/asm-offsets.c Mon Nov 22 23:08:21 2004 +0000 7.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c Tue Nov 23 22:48:27 2004 +0000 7.3 @@ -7,7 +7,7 @@ 7.4 #include <xen/sched.h> 7.5 7.6 #define DEFINE(_sym, _val) \ 7.7 - __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" _val ) 7.8 + __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) ) 7.9 #define BLANK() \ 7.10 __asm__ __volatile__ ( "\n->" : : ) 7.11 #define OFFSET(_sym, _str, _mem) \ 7.12 @@ -31,7 +31,10 @@ void __dummy__(void) 7.13 OFFSET(XREGS_gs, struct xen_regs, gs); 7.14 OFFSET(XREGS_ss, struct xen_regs, ss); 7.15 OFFSET(XREGS_eflags, struct xen_regs, eflags); 7.16 - OFFSET(XREGS_orig_eax, struct xen_regs, orig_eax); 7.17 + OFFSET(XREGS_error_code, struct xen_regs, error_code); 7.18 + OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector); 7.19 + OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp); 7.20 + DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs)); 7.21 BLANK(); 7.22 7.23 OFFSET(DOMAIN_processor, struct domain, processor); 7.24 @@ -41,6 +44,7 @@ void __dummy__(void) 7.25 OFFSET(DOMAIN_failsafe_sel, struct domain, thread.failsafe_selector); 7.26 OFFSET(DOMAIN_failsafe_addr, struct domain, thread.failsafe_address); 7.27 OFFSET(DOMAIN_trap_bounce, struct domain, thread.trap_bounce); 7.28 + OFFSET(DOMAIN_thread_flags, struct domain, thread.flags); 7.29 BLANK(); 7.30 7.31 OFFSET(SHINFO_upcall_pending, shared_info_t,
8.1 --- a/xen/arch/x86/x86_32/entry.S Mon Nov 22 23:08:21 2004 +0000 8.2 +++ b/xen/arch/x86/x86_32/entry.S Tue Nov 23 22:48:27 2004 +0000 8.3 @@ -3,12 +3,10 @@ 8.4 * 8.5 * Copyright (c) 2002-2004, K A Fraser 8.6 * Copyright (c) 1991, 1992 Linus Torvalds 8.7 - */ 8.8 - 8.9 -/* 8.10 - * The idea for callbacks to guest OSes 8.11 - * ==================================== 8.12 - * 8.13 + * 8.14 + * Calling back to a guest OS: 8.15 + * =========================== 8.16 + * 8.17 * First, we require that all callbacks (either via a supplied 8.18 * interrupt-descriptor-table, or via the special event or failsafe callbacks 8.19 * in the shared-info-structure) are to ring 1. This just makes life easier, 8.20 @@ -16,12 +14,11 @@ 8.21 * out which the privilege-level of the return code-selector. That code 8.22 * would just be a hassle to write, and would need to account for running 8.23 * off the end of the GDT/LDT, for example. For all callbacks we check 8.24 - * that the provided 8.25 - * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as 8.26 - * don't allow a guest OS to install ring-0 privileges into the GDT/LDT. 8.27 - * It's up to the guest OS to ensure all returns via the IDT are to ring 1. 8.28 - * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather 8.29 - * than the correct ring) and bad things are bound to ensue -- IRET is 8.30 + * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that 8.31 + * we're safe as don't allow a guest OS to install ring-0 privileges into the 8.32 + * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to 8.33 + * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1 8.34 + * rather than the correct ring) and bad things are bound to ensue -- IRET is 8.35 * likely to fault, and we may end up killing the domain (no harm can 8.36 * come to Xen, though). 8.37 * 8.38 @@ -159,59 +156,12 @@ multicall_fixup1: 8.39 8.40 ALIGN 8.41 restore_all_guest: 8.42 -1: movl XREGS_ds(%esp),%ds 8.43 -2: movl XREGS_es(%esp),%es 8.44 -3: movl XREGS_fs(%esp),%fs 8.45 -4: movl XREGS_gs(%esp),%gs 8.46 - popl %ebx 8.47 - popl %ecx 8.48 - popl %edx 8.49 - popl %esi 8.50 - popl %edi 8.51 - popl %ebp 8.52 - popl %eax 8.53 - addl $4,%esp 8.54 -5: iret 8.55 -.section .fixup,"ax" 8.56 -6: subl $4,%esp 8.57 - pushl %eax 8.58 - pushl %ebp 8.59 - pushl %edi 8.60 - pushl %esi 8.61 - pushl %edx 8.62 - pushl %ecx 8.63 - pushl %ebx 8.64 -7: SET_XEN_SEGMENTS(a) 8.65 - jmp failsafe_callback 8.66 -.previous 8.67 -.section __ex_table,"a" 8.68 - .align 4 8.69 - .long 1b,7b 8.70 - .long 2b,7b 8.71 - .long 3b,7b 8.72 - .long 4b,7b 8.73 - .long 5b,6b 8.74 -.previous 8.75 - 8.76 -/* No special register assumptions */ 8.77 -failsafe_callback: 8.78 - GET_CURRENT(%ebx) 8.79 - leal DOMAIN_trap_bounce(%ebx),%edx 8.80 - movl DOMAIN_failsafe_addr(%ebx),%eax 8.81 - movl %eax,TRAPBOUNCE_eip(%edx) 8.82 - movl DOMAIN_failsafe_sel(%ebx),%eax 8.83 - movw %ax,TRAPBOUNCE_cs(%edx) 8.84 - call create_bounce_frame 8.85 - subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame 8.86 - movl XREGS_ds(%esp),%eax 8.87 -FAULT1: movl %eax,%gs:(%esi) 8.88 - movl XREGS_es(%esp),%eax 8.89 -FAULT2: movl %eax,%gs:4(%esi) 8.90 - movl XREGS_fs(%esp),%eax 8.91 -FAULT3: movl %eax,%gs:8(%esi) 8.92 - movl XREGS_gs(%esp),%eax 8.93 -FAULT4: movl %eax,%gs:12(%esi) 8.94 - movl %esi,XREGS_esp(%esp) 8.95 + testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx) 8.96 + jnz failsafe_callback 8.97 +FLT1: movl XREGS_ds(%esp),%ds 8.98 +FLT2: movl XREGS_es(%esp),%es 8.99 +FLT3: movl XREGS_fs(%esp),%fs 8.100 +FLT4: movl XREGS_gs(%esp),%gs 8.101 popl %ebx 8.102 popl %ecx 8.103 popl %edx 8.104 @@ -220,7 +170,77 @@ FAULT4: movl %eax,%gs:12(%esi) 8.105 popl %ebp 8.106 popl %eax 8.107 addl $4,%esp 8.108 -FAULT5: iret 8.109 +FLT5: iret 8.110 +.section .fixup,"ax" 8.111 +FIX5: subl $28,%esp 8.112 + pushl 28(%esp) # error_code/entry_vector 8.113 + movl %eax,XREGS_eax+4(%esp) 8.114 + movl %ebp,XREGS_ebp+4(%esp) 8.115 + movl %edi,XREGS_edi+4(%esp) 8.116 + movl %esi,XREGS_esi+4(%esp) 8.117 + movl %edx,XREGS_edx+4(%esp) 8.118 + movl %ecx,XREGS_ecx+4(%esp) 8.119 + movl %ebx,XREGS_ebx+4(%esp) 8.120 +FIX1: SET_XEN_SEGMENTS(a) 8.121 + movl %eax,%fs 8.122 + movl %eax,%gs 8.123 + sti 8.124 + popl %esi 8.125 + pushfl # EFLAGS 8.126 + movl $__HYPERVISOR_CS,%eax 8.127 + pushl %eax # CS 8.128 + movl $DBLFLT1,%eax 8.129 + pushl %eax # EIP 8.130 + pushl %esi # error_code/entry_vector 8.131 + jmp error_code 8.132 +DBLFLT1:GET_CURRENT(%ebx) 8.133 + jmp test_all_events 8.134 +DBLFIX1:GET_CURRENT(%ebx) 8.135 + testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx) 8.136 + jnz domain_crash # cannot reenter failsafe code 8.137 + orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx) 8.138 + jmp test_all_events # will return via failsafe code 8.139 +.previous 8.140 +.section __pre_ex_table,"a" 8.141 + .long FLT1,FIX1 8.142 + .long FLT2,FIX1 8.143 + .long FLT3,FIX1 8.144 + .long FLT4,FIX1 8.145 + .long FLT5,FIX5 8.146 +.previous 8.147 +.section __ex_table,"a" 8.148 + .long DBLFLT1,DBLFIX1 8.149 +.previous 8.150 + 8.151 +/* No special register assumptions */ 8.152 +failsafe_callback: 8.153 + GET_CURRENT(%ebx) 8.154 + andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx) 8.155 + leal DOMAIN_trap_bounce(%ebx),%edx 8.156 + movl DOMAIN_failsafe_addr(%ebx),%eax 8.157 + movl %eax,TRAPBOUNCE_eip(%edx) 8.158 + movl DOMAIN_failsafe_sel(%ebx),%eax 8.159 + movw %ax,TRAPBOUNCE_cs(%edx) 8.160 + movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx) 8.161 + call create_bounce_frame 8.162 + popl %ebx 8.163 + popl %ecx 8.164 + popl %edx 8.165 + popl %esi 8.166 + popl %edi 8.167 + popl %ebp 8.168 + popl %eax 8.169 + addl $4,%esp 8.170 +FLT6: iret 8.171 +.section .fixup,"ax" 8.172 +FIX6: pushl %ebx 8.173 + GET_CURRENT(%ebx) 8.174 + orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx) 8.175 + pop %ebx 8.176 + jmp FIX5 8.177 +.section __pre_ex_table,"a" 8.178 + .long FLT6,FIX6 8.179 +.previous 8.180 8.181 ALIGN 8.182 restore_all_xen: 8.183 @@ -236,7 +256,7 @@ restore_all_xen: 8.184 8.185 ALIGN 8.186 ENTRY(hypercall) 8.187 - pushl %eax # save orig_eax 8.188 + subl $4,%esp 8.189 SAVE_ALL(b) 8.190 sti 8.191 GET_CURRENT(%ebx) 8.192 @@ -244,7 +264,7 @@ ENTRY(hypercall) 8.193 call *SYMBOL_NAME(hypercall_table)(,%eax,4) 8.194 8.195 ret_from_hypercall: 8.196 - movl %eax,XREGS_eax(%esp) # save the return value 8.197 + movl %eax,XREGS_eax(%esp) # save the return value 8.198 8.199 test_all_events: 8.200 xorl %ecx,%ecx 8.201 @@ -261,14 +281,16 @@ test_all_events: 8.202 jnz restore_all_guest 8.203 testb $0xFF,SHINFO_upcall_pending(%eax) 8.204 jz restore_all_guest 8.205 - movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery 8.206 /*process_guest_events:*/ 8.207 leal DOMAIN_trap_bounce(%ebx),%edx 8.208 movl DOMAIN_event_addr(%ebx),%eax 8.209 movl %eax,TRAPBOUNCE_eip(%edx) 8.210 movl DOMAIN_event_sel(%ebx),%eax 8.211 movw %ax,TRAPBOUNCE_cs(%edx) 8.212 + movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx) 8.213 call create_bounce_frame 8.214 + movl DOMAIN_shared_info(%ebx),%eax 8.215 + movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery 8.216 jmp restore_all_guest 8.217 8.218 ALIGN 8.219 @@ -282,8 +304,8 @@ process_softirqs: 8.220 /* %edx == trap_bounce, %ebx == task_struct */ 8.221 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */ 8.222 create_bounce_frame: 8.223 - mov XREGS_cs+4(%esp),%cl 8.224 - test $2,%cl 8.225 + movb XREGS_cs+4(%esp),%cl 8.226 + testb $2,%cl 8.227 jz 1f /* jump if returning to an existing ring-1 activation */ 8.228 /* obtain ss/esp from TSS -- no current ring-1 activations */ 8.229 movl DOMAIN_processor(%ebx),%eax 8.230 @@ -294,29 +316,51 @@ create_bounce_frame: 8.231 addl %ecx,%eax 8.232 addl $init_tss + 12,%eax 8.233 movl (%eax),%esi /* tss->esp1 */ 8.234 -FAULT6: movl 4(%eax),%gs /* tss->ss1 */ 8.235 +FLT7: movl 4(%eax),%gs /* tss->ss1 */ 8.236 /* base of stack frame must contain ss/esp (inter-priv iret) */ 8.237 subl $8,%esi 8.238 movl XREGS_esp+4(%esp),%eax 8.239 -FAULT7: movl %eax,%gs:(%esi) 8.240 +FLT8: movl %eax,%gs:(%esi) 8.241 movl XREGS_ss+4(%esp),%eax 8.242 -FAULT8: movl %eax,%gs:4(%esi) 8.243 +FLT9: movl %eax,%gs:4(%esi) 8.244 jmp 2f 8.245 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ 8.246 movl XREGS_esp+4(%esp),%esi 8.247 -FAULT9: movl XREGS_ss+4(%esp),%gs 8.248 +FLT10: movl XREGS_ss+4(%esp),%gs 8.249 2: /* Construct a stack frame: EFLAGS, CS/EIP */ 8.250 subl $12,%esi 8.251 movl XREGS_eip+4(%esp),%eax 8.252 -FAULT10:movl %eax,%gs:(%esi) 8.253 +FLT11: movl %eax,%gs:(%esi) 8.254 movl XREGS_cs+4(%esp),%eax 8.255 -FAULT11:movl %eax,%gs:4(%esi) 8.256 +FLT12: movl %eax,%gs:4(%esi) 8.257 movl XREGS_eflags+4(%esp),%eax 8.258 -FAULT12:movl %eax,%gs:8(%esi) 8.259 +FLT13: movl %eax,%gs:8(%esi) 8.260 + movb TRAPBOUNCE_flags(%edx),%cl 8.261 + test $TBF_EXCEPTION_ERRCODE,%cl 8.262 + jz 1f 8.263 + subl $4,%esi # push error_code onto guest frame 8.264 + movl TRAPBOUNCE_error_code(%edx),%eax 8.265 +FLT14: movl %eax,%gs:(%esi) 8.266 + testb $TBF_EXCEPTION_CR2,%cl 8.267 + jz 2f 8.268 + subl $4,%esi # push %cr2 onto guest frame 8.269 + movl TRAPBOUNCE_cr2(%edx),%eax 8.270 +FLT15: movl %eax,%gs:(%esi) 8.271 +1: testb $TBF_FAILSAFE,%cl 8.272 + jz 2f 8.273 + subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame 8.274 + movl XREGS_ds+4(%esp),%eax 8.275 +FLT16: movl %eax,%gs:(%esi) 8.276 + movl XREGS_es+4(%esp),%eax 8.277 +FLT17: movl %eax,%gs:4(%esi) 8.278 + movl XREGS_fs+4(%esp),%eax 8.279 +FLT18: movl %eax,%gs:8(%esi) 8.280 + movl XREGS_gs+4(%esp),%eax 8.281 +FLT19: movl %eax,%gs:12(%esi) 8.282 +2: movb $0,TRAPBOUNCE_flags(%edx) 8.283 /* Rewrite our stack frame and return to ring 1. */ 8.284 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ 8.285 - andl $0xfffcbeff,%eax 8.286 - movl %eax,XREGS_eflags+4(%esp) 8.287 + andl $0xfffcbeff,XREGS_eflags+4(%esp) 8.288 movl %gs,XREGS_ss+4(%esp) 8.289 movl %esi,XREGS_esp+4(%esp) 8.290 movzwl TRAPBOUNCE_cs(%edx),%eax 8.291 @@ -324,57 +368,44 @@ FAULT12:movl %eax,%gs:8(%esi) 8.292 movl TRAPBOUNCE_eip(%edx),%eax 8.293 movl %eax,XREGS_eip+4(%esp) 8.294 ret 8.295 - 8.296 +.section .fixup,"ax" 8.297 +FIX7: sti 8.298 + popl %esi 8.299 + addl $4,%esp # Discard create_b_frame return address 8.300 + pushfl # EFLAGS 8.301 + movl $__HYPERVISOR_CS,%eax 8.302 + pushl %eax # CS 8.303 + movl $DBLFLT2,%eax 8.304 + pushl %eax # EIP 8.305 + pushl %esi # error_code/entry_vector 8.306 + jmp error_code 8.307 +DBLFLT2:jmp process_guest_exception_and_events 8.308 +.previous 8.309 +.section __pre_ex_table,"a" 8.310 + .long FLT7,FIX7 8.311 + .long FLT8,FIX7 8.312 + .long FLT9,FIX7 8.313 + .long FLT10,FIX7 8.314 + .long FLT11,FIX7 8.315 + .long FLT12,FIX7 8.316 + .long FLT13,FIX7 8.317 + .long FLT14,FIX7 8.318 + .long FLT15,FIX7 8.319 + .long FLT16,FIX7 8.320 + .long FLT17,FIX7 8.321 + .long FLT18,FIX7 8.322 + .long FLT19,FIX7 8.323 +.previous 8.324 .section __ex_table,"a" 8.325 - .align 4 8.326 - .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack 8.327 - .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack 8.328 - .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack 8.329 - .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack 8.330 - .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret 8.331 - .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector 8.332 - .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack 8.333 - .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack 8.334 - .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector 8.335 - .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack 8.336 - .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack 8.337 - .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack 8.338 - .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack 8.339 - .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack 8.340 -.previous 8.341 - 8.342 -# This handler kills domains which experience unrecoverable faults. 8.343 -.section .fixup,"ax" 8.344 -crash_domain_fixup1: 8.345 - subl $4,%esp 8.346 - SAVE_ALL(a) 8.347 - sti 8.348 - jmp domain_crash 8.349 -crash_domain_fixup2: 8.350 - addl $4,%esp 8.351 -crash_domain_fixup3: 8.352 - jmp domain_crash 8.353 + .long DBLFLT2,domain_crash 8.354 .previous 8.355 8.356 ALIGN 8.357 -process_guest_exception_and_events: 8.358 +process_guest_exception_and_events: 8.359 leal DOMAIN_trap_bounce(%ebx),%edx 8.360 - testb $~0,TRAPBOUNCE_flags(%edx) 8.361 + testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx) 8.362 jz test_all_events 8.363 - call create_bounce_frame # just the basic frame 8.364 - mov TRAPBOUNCE_flags(%edx),%cl 8.365 - test $TBF_TRAP_NOCODE,%cl 8.366 - jnz 2f 8.367 - subl $4,%esi # push error_code onto guest frame 8.368 - movl TRAPBOUNCE_error_code(%edx),%eax 8.369 -FAULT13:movl %eax,%gs:(%esi) 8.370 - test $TBF_TRAP_CR2,%cl 8.371 - jz 1f 8.372 - subl $4,%esi # push %cr2 onto guest frame 8.373 - movl TRAPBOUNCE_cr2(%edx),%eax 8.374 -FAULT14:movl %eax,%gs:(%esi) 8.375 -1: movl %esi,XREGS_esp(%esp) 8.376 -2: movb $0,TRAPBOUNCE_flags(%edx) 8.377 + call create_bounce_frame 8.378 jmp test_all_events 8.379 8.380 ALIGN 8.381 @@ -382,120 +413,124 @@ ENTRY(ret_from_intr) 8.382 GET_CURRENT(%ebx) 8.383 movb XREGS_cs(%esp),%al 8.384 testb $3,%al # return to non-supervisor? 8.385 - jne test_all_events 8.386 + jnz test_all_events 8.387 jmp restore_all_xen 8.388 8.389 ENTRY(divide_error) 8.390 - pushl $0 # no error code 8.391 - pushl $ SYMBOL_NAME(do_divide_error) 8.392 + pushl $TRAP_divide_error<<16 8.393 ALIGN 8.394 error_code: 8.395 - cld 8.396 - pushl %ebp 8.397 - pushl %edi 8.398 - pushl %esi 8.399 - pushl %edx 8.400 - pushl %ecx 8.401 - pushl %ebx 8.402 - movb XREGS_cs(%esp),%bl 8.403 - testb $3,%bl 8.404 - je 1f 8.405 - movl %ds,XREGS_ds(%esp) 8.406 - movl %es,XREGS_es(%esp) 8.407 - movl %fs,XREGS_fs(%esp) 8.408 - movl %gs,XREGS_gs(%esp) 8.409 -1: SET_XEN_SEGMENTS(b) 8.410 - movl XREGS_orig_eax(%esp),%esi # get the error code 8.411 - movl XREGS_eax(%esp),%edi # get the function address 8.412 - movl %eax,XREGS_eax(%esp) 8.413 - movl %esp,%edx 8.414 - pushl %esi # push the error code 8.415 + SAVE_ALL_NOSEGREGS(a) 8.416 + SET_XEN_SEGMENTS(a) 8.417 + testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp) 8.418 + jz exception_with_ints_disabled 8.419 + sti # re-enable interrupts 8.420 + xorl %eax,%eax 8.421 + movw XREGS_entry_vector(%esp),%ax 8.422 + movl %esp,%edx 8.423 pushl %edx # push the xen_regs pointer 8.424 GET_CURRENT(%ebx) 8.425 - call *%edi 8.426 - addl $8,%esp 8.427 + call *SYMBOL_NAME(exception_table)(,%eax,4) 8.428 + addl $4,%esp 8.429 movb XREGS_cs(%esp),%al 8.430 testb $3,%al 8.431 - je restore_all_xen 8.432 + jz restore_all_xen 8.433 jmp process_guest_exception_and_events 8.434 8.435 +exception_with_ints_disabled: 8.436 + movb XREGS_cs(%esp),%al 8.437 + testb $3,%al # interrupts disabled outside Xen? 8.438 + jnz FATAL_exception_with_ints_disabled 8.439 + pushl XREGS_eip(%esp) 8.440 + call search_pre_exception_table 8.441 + addl $4,%esp 8.442 + testl %eax,%eax # no fixup code for faulting EIP? 8.443 + jz FATAL_exception_with_ints_disabled 8.444 + movl %eax,XREGS_eip(%esp) 8.445 + movl %esp,%esi 8.446 + subl $4,%esp 8.447 + movl %esp,%edi 8.448 + movl $XREGS_kernel_sizeof/4,%ecx 8.449 + rep; movsl # make room for error_code/entry_vector 8.450 + movl XREGS_error_code(%esp),%eax # error_code/entry_vector 8.451 + movl %eax,XREGS_kernel_sizeof(%esp) 8.452 + jmp restore_all_xen # return to fixup code 8.453 + 8.454 +FATAL_exception_with_ints_disabled: 8.455 + xorl %esi,%esi 8.456 + movw XREGS_entry_vector(%esp),%si 8.457 + movl %esp,%edx 8.458 + pushl %edx # push the xen_regs pointer 8.459 + pushl %esi # push the trapnr (entry vector) 8.460 + call SYMBOL_NAME(fatal_trap) 8.461 + ud2 8.462 + 8.463 ENTRY(coprocessor_error) 8.464 - pushl $0 8.465 - pushl $ SYMBOL_NAME(do_coprocessor_error) 8.466 + pushl $TRAP_copro_error<<16 8.467 jmp error_code 8.468 8.469 ENTRY(simd_coprocessor_error) 8.470 - pushl $0 8.471 - pushl $ SYMBOL_NAME(do_simd_coprocessor_error) 8.472 + pushl $TRAP_simd_error<<16 8.473 jmp error_code 8.474 8.475 ENTRY(device_not_available) 8.476 - pushl $0 8.477 - pushl $SYMBOL_NAME(math_state_restore) 8.478 + pushl $TRAP_no_device<<16 8.479 jmp error_code 8.480 8.481 ENTRY(debug) 8.482 - pushl $0 8.483 - pushl $ SYMBOL_NAME(do_debug) 8.484 + pushl $TRAP_debug<<16 8.485 jmp error_code 8.486 8.487 ENTRY(int3) 8.488 - pushl $0 8.489 - pushl $ SYMBOL_NAME(do_int3) 8.490 + pushl $TRAP_int3<<16 8.491 jmp error_code 8.492 8.493 ENTRY(overflow) 8.494 - pushl $0 8.495 - pushl $ SYMBOL_NAME(do_overflow) 8.496 + pushl $TRAP_overflow<<16 8.497 jmp error_code 8.498 8.499 ENTRY(bounds) 8.500 - pushl $0 8.501 - pushl $ SYMBOL_NAME(do_bounds) 8.502 + pushl $TRAP_bounds<<16 8.503 jmp error_code 8.504 8.505 ENTRY(invalid_op) 8.506 - pushl $0 8.507 - pushl $ SYMBOL_NAME(do_invalid_op) 8.508 + pushl $TRAP_invalid_op<<16 8.509 jmp error_code 8.510 8.511 ENTRY(coprocessor_segment_overrun) 8.512 - pushl $0 8.513 - pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun) 8.514 + pushl $TRAP_copro_seg<<16 8.515 jmp error_code 8.516 8.517 ENTRY(invalid_TSS) 8.518 - pushl $ SYMBOL_NAME(do_invalid_TSS) 8.519 + movw $TRAP_invalid_tss,2(%esp) 8.520 jmp error_code 8.521 8.522 ENTRY(segment_not_present) 8.523 - pushl $ SYMBOL_NAME(do_segment_not_present) 8.524 + movw $TRAP_no_segment,2(%esp) 8.525 jmp error_code 8.526 8.527 ENTRY(stack_segment) 8.528 - pushl $ SYMBOL_NAME(do_stack_segment) 8.529 + movw $TRAP_stack_error,2(%esp) 8.530 jmp error_code 8.531 8.532 ENTRY(general_protection) 8.533 - pushl $ SYMBOL_NAME(do_general_protection) 8.534 + movw $TRAP_gp_fault,2(%esp) 8.535 jmp error_code 8.536 8.537 ENTRY(alignment_check) 8.538 - pushl $ SYMBOL_NAME(do_alignment_check) 8.539 + movw $TRAP_alignment_check,2(%esp) 8.540 jmp error_code 8.541 8.542 ENTRY(page_fault) 8.543 - pushl $ SYMBOL_NAME(do_page_fault) 8.544 + movw $TRAP_page_fault,2(%esp) 8.545 jmp error_code 8.546 8.547 ENTRY(machine_check) 8.548 - pushl $0 8.549 - pushl $ SYMBOL_NAME(do_machine_check) 8.550 + pushl $TRAP_machine_check<<16 8.551 jmp error_code 8.552 8.553 ENTRY(spurious_interrupt_bug) 8.554 - pushl $0 8.555 - pushl $ SYMBOL_NAME(do_spurious_interrupt_bug) 8.556 + pushl $TRAP_spurious_int<<16 8.557 jmp error_code 8.558 8.559 ENTRY(nmi) 8.560 @@ -521,7 +556,7 @@ ENTRY(nmi) 8.561 # epilogue code. 8.562 movb XREGS_cs(%esp),%al 8.563 testb $3,%al 8.564 - jne do_watchdog_tick 8.565 + jnz do_watchdog_tick 8.566 movl XREGS_ds(%esp),%eax 8.567 cmpw $(__HYPERVISOR_DS),%ax 8.568 jne restore_all_xen 8.569 @@ -546,7 +581,7 @@ do_watchdog_tick: 8.570 addl $8,%esp 8.571 movb XREGS_cs(%esp),%al 8.572 testb $3,%al 8.573 - je restore_all_xen 8.574 + jz restore_all_xen 8.575 GET_CURRENT(%ebx) 8.576 jmp restore_all_guest 8.577 8.578 @@ -591,6 +626,29 @@ nmi_io_err: 8.579 jmp ret_from_intr 8.580 8.581 .data 8.582 + 8.583 +ENTRY(exception_table) 8.584 + .long SYMBOL_NAME(do_divide_error) 8.585 + .long SYMBOL_NAME(do_debug) 8.586 + .long 0 # nmi 8.587 + .long SYMBOL_NAME(do_int3) 8.588 + .long SYMBOL_NAME(do_overflow) 8.589 + .long SYMBOL_NAME(do_bounds) 8.590 + .long SYMBOL_NAME(do_invalid_op) 8.591 + .long SYMBOL_NAME(math_state_restore) 8.592 + .long 0 # double fault 8.593 + .long SYMBOL_NAME(do_coprocessor_segment_overrun) 8.594 + .long SYMBOL_NAME(do_invalid_TSS) 8.595 + .long SYMBOL_NAME(do_segment_not_present) 8.596 + .long SYMBOL_NAME(do_stack_segment) 8.597 + .long SYMBOL_NAME(do_general_protection) 8.598 + .long SYMBOL_NAME(do_page_fault) 8.599 + .long SYMBOL_NAME(do_spurious_interrupt_bug) 8.600 + .long SYMBOL_NAME(do_coprocessor_error) 8.601 + .long SYMBOL_NAME(do_alignment_check) 8.602 + .long SYMBOL_NAME(do_machine_check) 8.603 + .long SYMBOL_NAME(do_simd_coprocessor_error) 8.604 + 8.605 ENTRY(hypercall_table) 8.606 .long SYMBOL_NAME(do_set_trap_table) /* 0 */ 8.607 .long SYMBOL_NAME(do_mmu_update)
9.1 --- a/xen/arch/x86/x86_32/seg_fixup.c Mon Nov 22 23:08:21 2004 +0000 9.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c Tue Nov 23 22:48:27 2004 +0000 9.3 @@ -467,7 +467,7 @@ int gpf_emulate_4gb(struct xen_regs *reg 9.4 { 9.5 ti = &d->thread.traps[15]; 9.6 tb = &d->thread.trap_bounce; 9.7 - tb->flags = TBF_TRAP; 9.8 + tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; 9.9 tb->error_code = pb - eip; 9.10 tb->cs = ti->cs; 9.11 tb->eip = ti->address;
10.1 --- a/xen/arch/x86/x86_32/xen.lds Mon Nov 22 23:08:21 2004 +0000 10.2 +++ b/xen/arch/x86/x86_32/xen.lds Tue Nov 23 22:48:27 2004 +0000 10.3 @@ -1,6 +1,6 @@ 10.4 /* ld script to make i386 Linux kernel 10.5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 10.6 - * Modifified for i386 Xen by Keir Fraser 10.7 + * Modified for i386 Xen by Keir Fraser 10.8 */ 10.9 OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") 10.10 OUTPUT_ARCH(i386) 10.11 @@ -30,6 +30,11 @@ SECTIONS 10.12 __ex_table : { *(__ex_table) } :text 10.13 __stop___ex_table = .; 10.14 10.15 + . = ALIGN(16); /* Pre-exception table */ 10.16 + __start___pre_ex_table = .; 10.17 + __pre_ex_table : { *(__pre_ex_table) } :text 10.18 + __stop___pre_ex_table = .; 10.19 + 10.20 __start___ksymtab = .; /* Kernel symbol table */ 10.21 __ksymtab : { *(__ksymtab) } :text 10.22 __stop___ksymtab = .;
11.1 --- a/xen/include/asm-x86/debugger.h Mon Nov 22 23:08:21 2004 +0000 11.2 +++ b/xen/include/asm-x86/debugger.h Tue Nov 23 22:48:27 2004 +0000 11.3 @@ -25,17 +25,17 @@ 11.4 #include <asm/processor.h> 11.5 11.6 /* The main trap handlers use these helper macros which include early bail. */ 11.7 -#define DEBUGGER_trap_entry(_v, _r, _e) \ 11.8 - if ( debugger_trap_entry(_v, _r, _e) ) return EXCRET_fault_fixed; 11.9 -#define DEBUGGER_trap_fatal(_v, _r, _e) \ 11.10 - if ( debugger_trap_fatal(_v, _r, _e) ) return EXCRET_fault_fixed; 11.11 +#define DEBUGGER_trap_entry(_v, _r) \ 11.12 + if ( debugger_trap_entry(_v, _r) ) return EXCRET_fault_fixed; 11.13 +#define DEBUGGER_trap_fatal(_v, _r) \ 11.14 + if ( debugger_trap_fatal(_v, _r) ) return EXCRET_fault_fixed; 11.15 11.16 #ifdef XEN_DEBUGGER 11.17 11.18 #include <asm/pdb.h> 11.19 11.20 static inline int debugger_trap_entry( 11.21 - unsigned int vector, struct xen_regs *regs, unsigned int error_code) 11.22 + unsigned int vector, struct xen_regs *regs) 11.23 { 11.24 int ret = 0; 11.25 11.26 @@ -44,7 +44,7 @@ static inline int debugger_trap_entry( 11.27 case TRAP_debug: 11.28 if ( pdb_initialized ) 11.29 { 11.30 - pdb_handle_debug_trap(regs, (long)error_code); 11.31 + pdb_handle_debug_trap(regs, regs->error_code); 11.32 ret = 1; /* early exit */ 11.33 } 11.34 break; 11.35 @@ -55,13 +55,14 @@ static inline int debugger_trap_entry( 11.36 break; 11.37 11.38 case TRAP_gp_fault: 11.39 - if ( ((regs->cs & 3) != 0) && ((error_code & 3) == 2) && 11.40 + if ( ((regs->cs & 3) != 0) && ((regs->error_code & 3) == 2) && 11.41 pdb_initialized && (pdb_ctx.system_call != 0) ) 11.42 { 11.43 unsigned long cr3 = read_cr3(); 11.44 if ( cr3 == pdb_ctx.ptbr ) 11.45 pdb_linux_syscall_enter_bkpt( 11.46 - regs, error_code, current->thread.traps + (error_code>>3)); 11.47 + regs, regs->error_code, 11.48 + current->thread.traps + (regs->error_code>>3)); 11.49 } 11.50 break; 11.51 } 11.52 @@ -70,7 +71,7 @@ static inline int debugger_trap_entry( 11.53 } 11.54 11.55 static inline int debugger_trap_fatal( 11.56 - unsigned int vector, struct xen_regs *regs, unsigned int error_code) 11.57 + unsigned int vector, struct xen_regs *regs) 11.58 { 11.59 int ret = 0; 11.60 11.61 @@ -96,21 +97,21 @@ static inline int debugger_trap_fatal( 11.62 extern int kdb_trap(int, int, struct xen_regs *); 11.63 11.64 static inline int debugger_trap_entry( 11.65 - unsigned int vector, struct xen_regs *regs, unsigned int error_code) 11.66 + unsigned int vector, struct xen_regs *regs) 11.67 { 11.68 return 0; 11.69 } 11.70 11.71 static inline int debugger_trap_fatal( 11.72 - unsigned int vector, struct xen_regs *regs, unsigned int error_code) 11.73 + unsigned int vector, struct xen_regs *regs) 11.74 { 11.75 return kdb_trap(vector, 0, regs); 11.76 } 11.77 11.78 #else 11.79 11.80 -#define debugger_trap_entry(_v, _r, _e) (0) 11.81 -#define debugger_trap_fatal(_v, _r, _e) (0) 11.82 +#define debugger_trap_entry(_v, _r) (0) 11.83 +#define debugger_trap_fatal(_v, _r) (0) 11.84 11.85 #endif 11.86
12.1 --- a/xen/include/asm-x86/irq.h Mon Nov 22 23:08:21 2004 +0000 12.2 +++ b/xen/include/asm-x86/irq.h Tue Nov 23 22:48:27 2004 +0000 12.3 @@ -91,7 +91,7 @@ asmlinkage void call_##x(void); \ 12.4 __asm__( \ 12.5 "\n"__ALIGN_STR"\n" \ 12.6 SYMBOL_NAME_STR(x) ":\n\t" \ 12.7 - "push"__OS" $"#v"\n\t" \ 12.8 + "push"__OS" $"#v"<<16\n\t" \ 12.9 SAVE_ALL(a) \ 12.10 SYMBOL_NAME_STR(call_##x)":\n\t" \ 12.11 "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \ 12.12 @@ -104,7 +104,7 @@ asmlinkage void call_##x(void); \ 12.13 __asm__( \ 12.14 "\n"__ALIGN_STR"\n" \ 12.15 SYMBOL_NAME_STR(x) ":\n\t" \ 12.16 - "push"__OS" $"#v"\n\t" \ 12.17 + "push"__OS" $"#v"<<16\n\t" \ 12.18 SAVE_ALL(a) \ 12.19 "mov %"__OP"sp,%"__OP"ax\n\t" \ 12.20 "push %"__OP"ax\n\t" \ 12.21 @@ -131,7 +131,7 @@ asmlinkage void IRQ_NAME(nr); \ 12.22 __asm__( \ 12.23 "\n"__ALIGN_STR"\n" \ 12.24 SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ 12.25 - "push"__OS" $"#nr"\n\t" \ 12.26 + "push"__OS" $"#nr"<<16\n\t" \ 12.27 "jmp common_interrupt"); 12.28 12.29 extern unsigned long prof_cpu_mask;
13.1 --- a/xen/include/asm-x86/processor.h Mon Nov 22 23:08:21 2004 +0000 13.2 +++ b/xen/include/asm-x86/processor.h Tue Nov 23 22:48:27 2004 +0000 13.3 @@ -120,9 +120,16 @@ 13.4 /* 13.5 * 'trap_bounce' flags values. 13.6 */ 13.7 -#define TBF_TRAP 1 13.8 -#define TBF_TRAP_NOCODE 2 13.9 -#define TBF_TRAP_CR2 4 13.10 +#define TBF_EXCEPTION 1 13.11 +#define TBF_EXCEPTION_ERRCODE 2 13.12 +#define TBF_EXCEPTION_CR2 4 13.13 +#define TBF_INTERRUPT 8 13.14 +#define TBF_FAILSAFE 16 13.15 + 13.16 +/* 13.17 + * thread.flags values. 13.18 + */ 13.19 +#define TF_failsafe_return 1 13.20 13.21 #ifndef __ASSEMBLY__ 13.22 13.23 @@ -339,6 +346,8 @@ struct thread_struct { 13.24 unsigned long guestos_sp; 13.25 unsigned long guestos_ss; 13.26 13.27 + unsigned long flags; /* TF_ */ 13.28 + 13.29 /* Hardware debugging registers */ 13.30 unsigned long debugreg[8]; /* %%db0-7 debug registers */ 13.31 13.32 @@ -538,7 +547,7 @@ void show_guest_stack(); 13.33 void show_trace(unsigned long *esp); 13.34 void show_stack(unsigned long *esp); 13.35 void show_registers(struct xen_regs *regs); 13.36 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs, long error_code); 13.37 +asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs); 13.38 13.39 #endif /* !__ASSEMBLY__ */ 13.40
14.1 --- a/xen/include/asm-x86/x86_32/regs.h Mon Nov 22 23:08:21 2004 +0000 14.2 +++ b/xen/include/asm-x86/x86_32/regs.h Tue Nov 23 22:48:27 2004 +0000 14.3 @@ -1,29 +1,32 @@ 14.4 #ifndef _I386_REGS_H 14.5 #define _I386_REGS_H 14.6 14.7 +#include <xen/types.h> 14.8 + 14.9 struct xen_regs 14.10 { 14.11 /* All saved activations contain the following fields. */ 14.12 - long ebx; 14.13 - long ecx; 14.14 - long edx; 14.15 - long esi; 14.16 - long edi; 14.17 - long ebp; 14.18 - long eax; 14.19 - long orig_eax; 14.20 - long eip; 14.21 - int cs; 14.22 - long eflags; 14.23 + u32 ebx; 14.24 + u32 ecx; 14.25 + u32 edx; 14.26 + u32 esi; 14.27 + u32 edi; 14.28 + u32 ebp; 14.29 + u32 eax; 14.30 + u16 error_code; 14.31 + u16 entry_vector; 14.32 + u32 eip; 14.33 + u32 cs; 14.34 + u32 eflags; 14.35 14.36 /* Only saved guest activations contain the following fields. */ 14.37 - long esp; 14.38 - int ss; 14.39 - int es; 14.40 - int ds; 14.41 - int fs; 14.42 - int gs; 14.43 -}; 14.44 + u32 esp; 14.45 + u32 ss; 14.46 + u32 es; 14.47 + u32 ds; 14.48 + u32 fs; 14.49 + u32 gs; 14.50 +} __attribute__ ((packed)); 14.51 14.52 enum EFLAGS { 14.53 EF_CF = 0x00000001,