ia64/xen-unstable
changeset 3167:00328a1830fb
bitkeeper revision 1.1159.1.465 (41a87e70oLTK39bsiBf2aLAqPrQCVg)
sync w/ head.
sync w/ head.
author | cl349@arcadians.cl.cam.ac.uk |
---|---|
date | Sat Nov 27 13:17:36 2004 +0000 (2004-11-27) |
parents | ead1a6850b90 20290eb62e95 |
children | 8f9298dbe845 |
files | linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c xen/arch/x86/x86_32/entry.S |
line diff
1.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S Fri Nov 26 18:24:37 2004 +0000 1.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S Sat Nov 27 13:17:36 2004 +0000 1.3 @@ -64,6 +64,7 @@ ES = 0x20 1.4 ORIG_EAX = 0x24 1.5 EIP = 0x28 1.6 CS = 0x2C 1.7 +EVENT_MASK = 0x2E 1.8 EFLAGS = 0x30 1.9 OLDESP = 0x34 1.10 OLDSS = 0x38 1.11 @@ -91,23 +92,16 @@ VM_MASK = 0x00020000 1.12 1.13 #define XEN_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg) 1.14 #define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg) 1.15 -#define XEN_TEST_PENDING(reg) testb $0x1,evtchn_upcall_pending(reg) 1.16 +#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg) 1.17 1.18 #ifdef CONFIG_PREEMPT 1.19 -#ifdef CONFIG_SMP 1.20 -#define preempt_stop GET_THREAD_INFO(%ebp) ; \ 1.21 - XEN_GET_VCPU_INFO(%esi) ; \ 1.22 - XEN_BLOCK_EVENTS(%esi) 1.23 -#else 1.24 -#define preempt_stop XEN_GET_VCPU_INFO(%esi) ; \ 1.25 - XEN_BLOCK_EVENTS(%esi) 1.26 -#endif 1.27 +#define preempt_stop XEN_BLOCK_EVENTS(%esi) 1.28 #else 1.29 #define preempt_stop 1.30 #define resume_kernel restore_all 1.31 #endif 1.32 1.33 -#define SAVE_ALL \ 1.34 +#define SAVE_ALL_NO_EVENTMASK \ 1.35 cld; \ 1.36 pushl %es; \ 1.37 pushl %ds; \ 1.38 @@ -120,7 +114,13 @@ VM_MASK = 0x00020000 1.39 pushl %ebx; \ 1.40 movl $(__USER_DS), %edx; \ 1.41 movl %edx, %ds; \ 1.42 - movl %edx, %es; 1.43 + movl %edx, %es 1.44 + 1.45 +#define SAVE_ALL \ 1.46 + SAVE_ALL_NO_EVENTMASK; \ 1.47 + XEN_GET_VCPU_INFO(%esi); \ 1.48 + movb evtchn_upcall_mask(%esi), %dl; \ 1.49 + movb %dl, EVENT_MASK(%esp) 1.50 1.51 #define RESTORE_INT_REGS \ 1.52 popl %ebx; \ 1.53 @@ -224,35 +224,30 @@ ret_from_intr: 1.54 testl $(VM_MASK | 2), %eax 1.55 jz resume_kernel # returning to kernel or vm86-space 1.56 ENTRY(resume_userspace) 1.57 - XEN_GET_VCPU_INFO(%esi) 1.58 - XEN_BLOCK_EVENTS(%esi) # make tests atomic 1.59 - # make sure we don't miss an interrupt 1.60 + XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt 1.61 # setting need_resched or sigpending 1.62 # between sampling and the iret 1.63 -ret_syscall_tests: 1.64 movl TI_flags(%ebp), %ecx 1.65 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on 1.66 # int/exception return? 1.67 jne work_pending 1.68 - jmp restore_all_enable_events 1.69 + jmp restore_all 1.70 1.71 #ifdef CONFIG_PREEMPT 1.72 ENTRY(resume_kernel) 1.73 - XEN_GET_VCPU_INFO(%esi) 1.74 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? 1.75 - jnz restore_all_enable_events 1.76 + jnz restore_all 1.77 need_resched: 1.78 movl TI_flags(%ebp), %ecx # need_resched set ? 1.79 testb $_TIF_NEED_RESCHED, %cl 1.80 - jz restore_all_enable_events 1.81 + jz restore_all 1.82 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ? 1.83 - jz restore_all_enable_events 1.84 + jz restore_all 1.85 movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp) 1.86 - XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks 1.87 + XEN_UNBLOCK_EVENTS(%esi) 1.88 call schedule 1.89 movl $0,TI_preempt_count(%ebp) 1.90 - XEN_GET_VCPU_INFO(%esi) 1.91 - XEN_BLOCK_EVENTS(%esi) # make tests atomic 1.92 + XEN_BLOCK_EVENTS(%esi) 1.93 jmp need_resched 1.94 #endif 1.95 1.96 @@ -285,11 +280,11 @@ 1: movl (%ebp),%ebp 1.97 pushl %eax 1.98 SAVE_ALL 1.99 GET_THREAD_INFO(%ebp) 1.100 - cmpl $(nr_syscalls), %eax 1.101 - jae syscall_badsys 1.102 1.103 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 1.104 jnz syscall_trace_entry 1.105 + cmpl $(nr_syscalls), %eax 1.106 + jae syscall_badsys 1.107 call *sys_call_table(,%eax,4) 1.108 movl %eax,EAX(%esp) 1.109 cli 1.110 @@ -308,48 +303,43 @@ ENTRY(system_call) 1.111 pushl %eax # save orig_eax 1.112 SAVE_ALL 1.113 GET_THREAD_INFO(%ebp) 1.114 - cmpl $(nr_syscalls), %eax 1.115 - jae syscall_badsys 1.116 # system call tracing in operation 1.117 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 1.118 jnz syscall_trace_entry 1.119 + cmpl $(nr_syscalls), %eax 1.120 + jae syscall_badsys 1.121 syscall_call: 1.122 call *sys_call_table(,%eax,4) 1.123 movl %eax,EAX(%esp) # store the return value 1.124 syscall_exit: 1.125 - XEN_GET_VCPU_INFO(%esi) 1.126 - XEN_BLOCK_EVENTS(%esi) # make tests atomic 1.127 - # make sure we don't miss an interrupt 1.128 + XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt 1.129 # setting need_resched or sigpending 1.130 # between sampling and the iret 1.131 movl TI_flags(%ebp), %ecx 1.132 testw $_TIF_ALLWORK_MASK, %cx # current->work 1.133 jne syscall_exit_work 1.134 - jmp restore_all_enable_events 1.135 - 1.136 - ALIGN 1.137 restore_all: 1.138 + movb EVENT_MASK(%esp), %al 1.139 + notb %al # %al == ~saved_mask 1.140 + andb evtchn_upcall_mask(%esi),%al 1.141 + andb $1,%al # %al == mask & ~saved_mask 1.142 + jnz restore_all_enable_events # != 0 => reenable event delivery 1.143 RESTORE_ALL 1.144 1.145 # perform work that needs to be done immediately before resumption 1.146 ALIGN 1.147 work_pending: 1.148 - XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks 1.149 testb $_TIF_NEED_RESCHED, %cl 1.150 jz work_notifysig 1.151 work_resched: 1.152 call schedule 1.153 - XEN_GET_VCPU_INFO(%esi) 1.154 - XEN_BLOCK_EVENTS(%esi) # make tests atomic 1.155 - # make sure we don't miss an interrupt 1.156 + XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt 1.157 # setting need_resched or sigpending 1.158 # between sampling and the iret 1.159 movl TI_flags(%ebp), %ecx 1.160 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other 1.161 # than syscall tracing? 1.162 - jz restore_all_enable_events 1.163 - # XXXcl sti missing??? 1.164 - XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks 1.165 + jz restore_all 1.166 testb $_TIF_NEED_RESCHED, %cl 1.167 jnz work_resched 1.168 1.169 @@ -361,8 +351,7 @@ work_notifysig: # deal with pending s 1.170 # vm86-space 1.171 xorl %edx, %edx 1.172 call do_notify_resume 1.173 - XEN_GET_VCPU_INFO(%esi) 1.174 - jmp restore_all_enable_events 1.175 + jmp restore_all 1.176 1.177 ALIGN 1.178 work_notifysig_v86: 1.179 @@ -372,8 +361,7 @@ work_notifysig_v86: 1.180 movl %eax, %esp 1.181 xorl %edx, %edx 1.182 call do_notify_resume 1.183 - XEN_GET_VCPU_INFO(%esi) 1.184 - jmp restore_all_enable_events 1.185 + jmp restore_all 1.186 1.187 # perform syscall exit tracing 1.188 ALIGN 1.189 @@ -390,11 +378,9 @@ syscall_trace_entry: 1.190 # perform syscall exit tracing 1.191 ALIGN 1.192 syscall_exit_work: 1.193 - XEN_GET_VCPU_INFO(%esi) 1.194 - testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl 1.195 + testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl 1.196 jz work_pending 1.197 - XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks 1.198 - # could let do_syscall_trace() call 1.199 + XEN_UNBLOCK_EVENTS(%esi) # could let do_syscall_trace() call 1.200 # schedule() instead 1.201 movl %esp, %eax 1.202 movl $1, %edx 1.203 @@ -414,6 +400,44 @@ syscall_badsys: 1.204 movl $-ENOSYS,EAX(%esp) 1.205 jmp resume_userspace 1.206 1.207 +#if 0 /* XEN */ 1.208 +/* 1.209 + * Build the entry stubs and pointer table with 1.210 + * some assembler magic. 1.211 + */ 1.212 +.data 1.213 +ENTRY(interrupt) 1.214 +.text 1.215 + 1.216 +vector=0 1.217 +ENTRY(irq_entries_start) 1.218 +.rept NR_IRQS 1.219 + ALIGN 1.220 +1: pushl $vector-256 1.221 + jmp common_interrupt 1.222 +.data 1.223 + .long 1b 1.224 +.text 1.225 +vector=vector+1 1.226 +.endr 1.227 + 1.228 + ALIGN 1.229 +common_interrupt: 1.230 + SAVE_ALL 1.231 + call do_IRQ 1.232 + jmp ret_from_intr 1.233 + 1.234 +#define BUILD_INTERRUPT(name, nr) \ 1.235 +ENTRY(name) \ 1.236 + pushl $nr-256; \ 1.237 + SAVE_ALL \ 1.238 + call smp_/**/name; \ 1.239 + jmp ret_from_intr; 1.240 + 1.241 +/* The include is where all of the SMP etc. interrupts come from */ 1.242 +#include "entry_arch.h" 1.243 +#endif /* XEN */ 1.244 + 1.245 ENTRY(divide_error) 1.246 pushl $0 # no error code 1.247 pushl $do_divide_error 1.248 @@ -438,9 +462,12 @@ error_code: 1.249 movl %esp, %edx 1.250 pushl %esi # push the error code 1.251 pushl %edx # push the pt_regs pointer 1.252 - movl $(__KERNEL_DS), %edx # XXXcl USER? 1.253 + movl $(__USER_DS), %edx 1.254 movl %edx, %ds 1.255 movl %edx, %es 1.256 + XEN_GET_VCPU_INFO(%esi) 1.257 + movb evtchn_upcall_mask(%esi), %dl 1.258 + movb %dl, EVENT_MASK+8(%esp) 1.259 call *%edi 1.260 addl $8, %esp 1.261 jmp ret_from_exception 1.262 @@ -458,22 +485,22 @@ error_code: 1.263 # activation and restart the handler using the previous one. 1.264 ENTRY(hypervisor_callback) 1.265 pushl %eax 1.266 - SAVE_ALL 1.267 - GET_THREAD_INFO(%ebp) 1.268 + SAVE_ALL_NO_EVENTMASK 1.269 movl EIP(%esp),%eax 1.270 cmpl $scrit,%eax 1.271 jb 11f 1.272 cmpl $ecrit,%eax 1.273 jb critical_region_fixup 1.274 -11: push %esp 1.275 +11: XEN_GET_VCPU_INFO(%esi) 1.276 + movb $0, EVENT_MASK(%esp) 1.277 + push %esp 1.278 call evtchn_do_upcall 1.279 add $4,%esp 1.280 - XEN_GET_VCPU_INFO(%esi) 1.281 - movb CS(%esp),%cl 1.282 - test $2,%cl # slow return to ring 2 or 3 1.283 - jne ret_syscall_tests 1.284 + jmp ret_from_intr 1.285 + 1.286 + ALIGN 1.287 restore_all_enable_events: 1.288 -safesti:XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks 1.289 + XEN_UNBLOCK_EVENTS(%esi) 1.290 scrit: /**** START OF CRITICAL REGION ****/ 1.291 XEN_TEST_PENDING(%esi) 1.292 jnz 14f # process more events if necessary... 1.293 @@ -599,11 +626,18 @@ ENTRY(debug) 1.294 jne debug_stack_correct 1.295 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 1.296 debug_stack_correct: 1.297 - pushl $0 1.298 - pushl $do_debug 1.299 - jmp error_code 1.300 + pushl $-1 # mark this as an int 1.301 + SAVE_ALL 1.302 + movl %esp,%edx 1.303 + pushl $0 1.304 + pushl %edx 1.305 + call do_debug 1.306 + addl $8,%esp 1.307 + testl %eax,%eax 1.308 + jnz restore_all 1.309 + jmp ret_from_exception 1.310 1.311 -#if 0 1.312 +#if 0 /* XEN */ 1.313 /* 1.314 * NMI is doubly nasty. It can happen _while_ we're handling 1.315 * a debug fault, and the debug fault hasn't yet been able to 1.316 @@ -649,12 +683,19 @@ nmi_debug_stack_check: 1.317 nmi_debug_stack_fixup: 1.318 FIX_STACK(24,nmi_stack_correct, 1) 1.319 jmp nmi_stack_correct 1.320 -#endif 1.321 +#endif /* XEN */ 1.322 1.323 ENTRY(int3) 1.324 + pushl $-1 # mark this as an int 1.325 + SAVE_ALL 1.326 + movl %esp,%edx 1.327 pushl $0 1.328 - pushl $do_int3 1.329 - jmp error_code 1.330 + pushl %edx 1.331 + call do_int3 1.332 + addl $8,%esp 1.333 + testl %eax,%eax 1.334 + jnz restore_all 1.335 + jmp ret_from_exception 1.336 1.337 ENTRY(overflow) 1.338 pushl $0 1.339 @@ -676,10 +717,6 @@ ENTRY(coprocessor_segment_overrun) 1.340 pushl $do_coprocessor_segment_overrun 1.341 jmp error_code 1.342 1.343 -ENTRY(double_fault) 1.344 - pushl $do_double_fault 1.345 - jmp error_code 1.346 - 1.347 ENTRY(invalid_TSS) 1.348 pushl $do_invalid_TSS 1.349 jmp error_code 1.350 @@ -702,36 +739,37 @@ ENTRY(alignment_check) 1.351 1.352 # This handler is special, because it gets an extra value on its stack, 1.353 # which is the linear faulting address. 1.354 -#define PAGE_FAULT_STUB(_name1, _name2) \ 1.355 -ENTRY(_name1) \ 1.356 - pushl %ds ; \ 1.357 - pushl %eax ; \ 1.358 - xorl %eax,%eax ; \ 1.359 - pushl %ebp ; \ 1.360 - pushl %edi ; \ 1.361 - pushl %esi ; \ 1.362 - pushl %edx ; \ 1.363 - decl %eax /* eax = -1 */ ; \ 1.364 - pushl %ecx ; \ 1.365 - pushl %ebx ; \ 1.366 - GET_THREAD_INFO(%ebp) ; \ 1.367 - cld ; \ 1.368 - movl %es,%ecx ; \ 1.369 - movl ORIG_EAX(%esp), %esi /* get the error code */ ; \ 1.370 - movl ES(%esp), %edi /* get the faulting address */ ; \ 1.371 - movl %eax, ORIG_EAX(%esp) ; \ 1.372 - movl %ecx, ES(%esp) ; \ 1.373 - movl %esp,%edx ; \ 1.374 - pushl %edi /* push the faulting address */ ; \ 1.375 - pushl %esi /* push the error code */ ; \ 1.376 - pushl %edx /* push the pt_regs pointer */ ; \ 1.377 - movl $(__KERNEL_DS),%edx ; \ 1.378 - movl %edx,%ds ; \ 1.379 - movl %edx,%es ; \ 1.380 - call _name2 ; \ 1.381 - addl $12,%esp ; \ 1.382 - jmp ret_from_exception ; 1.383 -PAGE_FAULT_STUB(page_fault, do_page_fault) 1.384 +ENTRY(page_fault) 1.385 + pushl %ds 1.386 + pushl %eax 1.387 + xorl %eax,%eax 1.388 + pushl %ebp 1.389 + pushl %edi 1.390 + pushl %esi 1.391 + pushl %edx 1.392 + decl %eax /* eax = -1 */ 1.393 + pushl %ecx 1.394 + pushl %ebx 1.395 + GET_THREAD_INFO(%ebp) 1.396 + cld 1.397 + movl %es,%ecx 1.398 + movl ORIG_EAX(%esp), %esi /* get the error code */ 1.399 + movl ES(%esp), %edi /* get the faulting address */ 1.400 + movl %eax, ORIG_EAX(%esp) 1.401 + movl %ecx, ES(%esp) 1.402 + movl %esp,%edx 1.403 + pushl %edi /* push the faulting address */ 1.404 + pushl %esi /* push the error code */ 1.405 + pushl %edx /* push the pt_regs pointer */ 1.406 + movl $(__KERNEL_DS),%edx 1.407 + movl %edx,%ds 1.408 + movl %edx,%es 1.409 + XEN_GET_VCPU_INFO(%esi) 1.410 + movb evtchn_upcall_mask(%esi), %dl 1.411 + movb %dl, EVENT_MASK+12(%esp) 1.412 + call do_page_fault 1.413 + addl $12,%esp 1.414 + jmp ret_from_exception 1.415 1.416 #ifdef CONFIG_X86_MCE 1.417 ENTRY(machine_check) 1.418 @@ -1030,5 +1068,6 @@ ENTRY(sys_call_table) 1.419 .long sys_mq_notify 1.420 .long sys_mq_getsetattr 1.421 .long sys_ni_syscall /* reserved for kexec */ 1.422 + .long sys_waitid 1.423 1.424 syscall_table_size=(.-sys_call_table)
2.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c Fri Nov 26 18:24:37 2004 +0000 2.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c Sat Nov 27 13:17:36 2004 +0000 2.3 @@ -78,7 +78,6 @@ asmlinkage void overflow(void); 2.4 asmlinkage void bounds(void); 2.5 asmlinkage void invalid_op(void); 2.6 asmlinkage void device_not_available(void); 2.7 -asmlinkage void double_fault(void); 2.8 asmlinkage void coprocessor_segment_overrun(void); 2.9 asmlinkage void invalid_TSS(void); 2.10 asmlinkage void segment_not_present(void); 2.11 @@ -470,7 +469,6 @@ DO_VM86_ERROR( 4, SIGSEGV, "overflow", o 2.12 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) 2.13 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip) 2.14 DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available) 2.15 -DO_ERROR( 8, SIGSEGV, "double fault", double_fault) 2.16 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 2.17 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 2.18 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 2.19 @@ -1032,7 +1030,6 @@ static trap_info_t trap_table[] = { 2.20 { 5, 3, __KERNEL_CS, (unsigned long)bounds }, 2.21 { 6, 0, __KERNEL_CS, (unsigned long)invalid_op }, 2.22 { 7, 0, __KERNEL_CS, (unsigned long)device_not_available }, 2.23 - { 8, 0, __KERNEL_CS, (unsigned long)double_fault }, 2.24 { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun }, 2.25 { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS }, 2.26 { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
3.1 --- a/xen/arch/x86/x86_32/entry.S Fri Nov 26 18:24:37 2004 +0000 3.2 +++ b/xen/arch/x86/x86_32/entry.S Sat Nov 27 13:17:36 2004 +0000 3.3 @@ -339,7 +339,7 @@ error_code: 3.4 SET_XEN_SEGMENTS(a) 3.5 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp) 3.6 jz exception_with_ints_disabled 3.7 - sti # re-enable interrupts 3.8 +1: sti # re-enable interrupts 3.9 xorl %eax,%eax 3.10 movw XREGS_entry_vector(%esp),%ax 3.11 movl %esp,%edx 3.12 @@ -354,8 +354,8 @@ error_code: 3.13 3.14 exception_with_ints_disabled: 3.15 movb XREGS_cs(%esp),%al 3.16 - testb $3,%al # interrupts disabled outside Xen? 3.17 - jnz FATAL_exception_with_ints_disabled 3.18 + testb $3,%al # interrupts disabled outside Xen? 3.19 + jnz 1b # it really does happen! (e.g., DOM0 X server) 3.20 pushl XREGS_eip(%esp) 3.21 call search_pre_exception_table 3.22 addl $4,%esp