ia64/xen-unstable

changeset 3165:44189f1f4320

bitkeeper revision 1.1159.187.21 (41a85113PA1gYVqMumQ4r2S3y6POGw)

Clean up and fix the 2.6 entry.S:
1. Save and restore the event_mask in SAVE_ALL / restore_all.
2. No need to keep reloading %esi all oevr the place as we can load it
once on entry (e.g., SAVE_ALL) and thereafter it is callee-saves.
3. No need for extra XEN_UNBLOCK_EVENTS() where native isn't doing a 'sti',
even if the code looks broken -- it is okay to call schedule() with
interrupts off, in which case it will reenable them itself.
4. Fixed another KERNEL_DS -> USER_DS.
5. Unmacroed the page fault handler.
6. A bunch of other tiny fixes....
author kaf24@scramble.cl.cam.ac.uk
date Sat Nov 27 10:04:03 2004 +0000 (2004-11-27)
parents a46548db5e52
children 20290eb62e95 861d3cdc1dc5
files linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c
line diff
     1.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S	Fri Nov 26 18:21:39 2004 +0000
     1.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S	Sat Nov 27 10:04:03 2004 +0000
     1.3 @@ -64,6 +64,7 @@ ES		= 0x20
     1.4  ORIG_EAX	= 0x24
     1.5  EIP		= 0x28
     1.6  CS		= 0x2C
     1.7 +EVENT_MASK	= 0x2E
     1.8  EFLAGS		= 0x30
     1.9  OLDESP		= 0x34
    1.10  OLDSS		= 0x38
    1.11 @@ -81,17 +82,16 @@ VM_MASK		= 0x00020000
    1.12  
    1.13  #define XEN_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
    1.14  #define XEN_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
    1.15 -#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(%reg)
    1.16 +#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
    1.17  
    1.18  #ifdef CONFIG_PREEMPT
    1.19 -#define preempt_stop		movl HYPERVISOR_shared_info,%esi	; \
    1.20 -				XEN_BLOCK_EVENTS(%esi)
    1.21 +#define preempt_stop		XEN_BLOCK_EVENTS(%esi)
    1.22  #else
    1.23  #define preempt_stop
    1.24  #define resume_kernel		restore_all
    1.25  #endif
    1.26  
    1.27 -#define SAVE_ALL \
    1.28 +#define SAVE_ALL_NO_EVENTMASK \
    1.29  	cld; \
    1.30  	pushl %es; \
    1.31  	pushl %ds; \
    1.32 @@ -104,7 +104,13 @@ VM_MASK		= 0x00020000
    1.33  	pushl %ebx; \
    1.34  	movl $(__USER_DS), %edx; \
    1.35  	movl %edx, %ds; \
    1.36 -	movl %edx, %es;
    1.37 +	movl %edx, %es
    1.38 +
    1.39 +#define SAVE_ALL \
    1.40 +	SAVE_ALL_NO_EVENTMASK; \
    1.41 +	movl HYPERVISOR_shared_info, %esi; \
    1.42 +	movb evtchn_upcall_mask(%esi), %dl; \
    1.43 +	movb %dl, EVENT_MASK(%esp)
    1.44  
    1.45  #define RESTORE_INT_REGS \
    1.46  	popl %ebx;	\
    1.47 @@ -208,35 +214,30 @@ ret_from_intr:
    1.48  	testl $(VM_MASK | 2), %eax
    1.49  	jz resume_kernel		# returning to kernel or vm86-space
    1.50  ENTRY(resume_userspace)
    1.51 -	movl HYPERVISOR_shared_info,%esi
    1.52 -	XEN_BLOCK_EVENTS(%esi)		# make tests atomic
    1.53 -					# make sure we don't miss an interrupt
    1.54 +	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
    1.55  					# setting need_resched or sigpending
    1.56  					# between sampling and the iret
    1.57 -ret_syscall_tests:
    1.58  	movl TI_flags(%ebp), %ecx
    1.59  	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on
    1.60  					# int/exception return?
    1.61  	jne work_pending
    1.62 -	jmp restore_all_enable_events
    1.63 +	jmp restore_all
    1.64  
    1.65  #ifdef CONFIG_PREEMPT
    1.66  ENTRY(resume_kernel)
    1.67 -	movl HYPERVISOR_shared_info,%esi
    1.68  	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
    1.69 -	jnz restore_all_enable_events
    1.70 +	jnz restore_all
    1.71  need_resched:
    1.72  	movl TI_flags(%ebp), %ecx	# need_resched set ?
    1.73  	testb $_TIF_NEED_RESCHED, %cl
    1.74 -	jz restore_all_enable_events
    1.75 +	jz restore_all
    1.76  	testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
    1.77 -	jz restore_all_enable_events
    1.78 +	jz restore_all
    1.79  	movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
    1.80 -	XEN_UNBLOCK_EVENTS(%esi)	# reenable event callbacks
    1.81 +	XEN_UNBLOCK_EVENTS(%esi)
    1.82  	call schedule
    1.83  	movl $0,TI_preempt_count(%ebp)
    1.84 -	movl HYPERVISOR_shared_info,%esi
    1.85 -	XEN_BLOCK_EVENTS(%esi)		# make tests atomic
    1.86 +	XEN_BLOCK_EVENTS(%esi)
    1.87  	jmp need_resched
    1.88  #endif
    1.89  
    1.90 @@ -269,11 +270,11 @@ 1:	movl (%ebp),%ebp
    1.91  	pushl %eax
    1.92  	SAVE_ALL
    1.93  	GET_THREAD_INFO(%ebp)
    1.94 -	cmpl $(nr_syscalls), %eax
    1.95 -	jae syscall_badsys
    1.96  
    1.97  	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
    1.98  	jnz syscall_trace_entry
    1.99 +	cmpl $(nr_syscalls), %eax
   1.100 +	jae syscall_badsys
   1.101  	call *sys_call_table(,%eax,4)
   1.102  	movl %eax,EAX(%esp)
   1.103  	cli
   1.104 @@ -292,48 +293,43 @@ ENTRY(system_call)
   1.105  	pushl %eax			# save orig_eax
   1.106  	SAVE_ALL
   1.107  	GET_THREAD_INFO(%ebp)
   1.108 -	cmpl $(nr_syscalls), %eax
   1.109 -	jae syscall_badsys
   1.110  					# system call tracing in operation
   1.111  	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
   1.112  	jnz syscall_trace_entry
   1.113 +	cmpl $(nr_syscalls), %eax
   1.114 +	jae syscall_badsys
   1.115  syscall_call:
   1.116  	call *sys_call_table(,%eax,4)
   1.117  	movl %eax,EAX(%esp)		# store the return value
   1.118  syscall_exit:
   1.119 -	movl HYPERVISOR_shared_info,%esi
   1.120 -	XEN_BLOCK_EVENTS(%esi)		# make tests atomic
   1.121 -					# make sure we don't miss an interrupt
   1.122 +	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
   1.123  					# setting need_resched or sigpending
   1.124  					# between sampling and the iret
   1.125  	movl TI_flags(%ebp), %ecx
   1.126  	testw $_TIF_ALLWORK_MASK, %cx	# current->work
   1.127  	jne syscall_exit_work
   1.128 -	jmp restore_all_enable_events
   1.129 -
   1.130 -	ALIGN
   1.131  restore_all:
   1.132 +	movb EVENT_MASK(%esp), %al
   1.133 +	notb %al			# %al == ~saved_mask
   1.134 +	andb evtchn_upcall_mask(%esi),%al
   1.135 +	andb $1,%al			# %al == mask & ~saved_mask
   1.136 +	jnz restore_all_enable_events	#     != 0 => reenable event delivery
   1.137  	RESTORE_ALL
   1.138  
   1.139  	# perform work that needs to be done immediately before resumption
   1.140  	ALIGN
   1.141  work_pending:
   1.142 -	XEN_UNBLOCK_EVENTS(%esi)	# reenable event callbacks
   1.143  	testb $_TIF_NEED_RESCHED, %cl
   1.144  	jz work_notifysig
   1.145  work_resched:
   1.146  	call schedule
   1.147 -	movl HYPERVISOR_shared_info,%esi
   1.148 -	XEN_BLOCK_EVENTS(%esi)		# make tests atomic
   1.149 -					# make sure we don't miss an interrupt
   1.150 +	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
   1.151  					# setting need_resched or sigpending
   1.152  					# between sampling and the iret
   1.153  	movl TI_flags(%ebp), %ecx
   1.154  	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
   1.155  					# than syscall tracing?
   1.156 -	jz restore_all_enable_events
   1.157 -	# XXXcl sti missing???
   1.158 -	XEN_UNBLOCK_EVENTS(%esi)	# reenable event callbacks
   1.159 +	jz restore_all
   1.160  	testb $_TIF_NEED_RESCHED, %cl
   1.161  	jnz work_resched
   1.162  
   1.163 @@ -345,8 +341,7 @@ work_notifysig:				# deal with pending s
   1.164  					# vm86-space
   1.165  	xorl %edx, %edx
   1.166  	call do_notify_resume
   1.167 -	movl HYPERVISOR_shared_info,%esi
   1.168 -	jmp restore_all_enable_events
   1.169 +	jmp restore_all
   1.170  
   1.171  	ALIGN
   1.172  work_notifysig_v86:
   1.173 @@ -356,8 +351,7 @@ work_notifysig_v86:
   1.174  	movl %eax, %esp
   1.175  	xorl %edx, %edx
   1.176  	call do_notify_resume
   1.177 -	movl HYPERVISOR_shared_info,%esi
   1.178 -	jmp restore_all_enable_events
   1.179 +	jmp restore_all
   1.180  
   1.181  	# perform syscall exit tracing
   1.182  	ALIGN
   1.183 @@ -374,11 +368,9 @@ syscall_trace_entry:
   1.184  	# perform syscall exit tracing
   1.185  	ALIGN
   1.186  syscall_exit_work:
   1.187 -	movl HYPERVISOR_shared_info,%esi
   1.188 -	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl
   1.189 +	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
   1.190  	jz work_pending
   1.191 -	XEN_UNBLOCK_EVENTS(%esi)	# reenable event callbacks
   1.192 -					# could let do_syscall_trace() call
   1.193 +	XEN_UNBLOCK_EVENTS(%esi)	# could let do_syscall_trace() call
   1.194  					# schedule() instead
   1.195  	movl %esp, %eax
   1.196  	movl $1, %edx
   1.197 @@ -398,6 +390,44 @@ syscall_badsys:
   1.198  	movl $-ENOSYS,EAX(%esp)
   1.199  	jmp resume_userspace
   1.200  
   1.201 +#if 0 /* XEN */
   1.202 +/*
   1.203 + * Build the entry stubs and pointer table with
   1.204 + * some assembler magic.
   1.205 + */
   1.206 +.data
   1.207 +ENTRY(interrupt)
   1.208 +.text
   1.209 +
   1.210 +vector=0
   1.211 +ENTRY(irq_entries_start)
   1.212 +.rept NR_IRQS
   1.213 +	ALIGN
   1.214 +1:	pushl $vector-256
   1.215 +	jmp common_interrupt
   1.216 +.data
   1.217 +	.long 1b
   1.218 +.text
   1.219 +vector=vector+1
   1.220 +.endr
   1.221 +
   1.222 +	ALIGN
   1.223 +common_interrupt:
   1.224 +	SAVE_ALL
   1.225 +	call do_IRQ
   1.226 +	jmp ret_from_intr
   1.227 +
   1.228 +#define BUILD_INTERRUPT(name, nr)	\
   1.229 +ENTRY(name)				\
   1.230 +	pushl $nr-256;			\
   1.231 +	SAVE_ALL			\
   1.232 +	call smp_/**/name;	\
   1.233 +	jmp ret_from_intr;
   1.234 +
   1.235 +/* The include is where all of the SMP etc. interrupts come from */
   1.236 +#include "entry_arch.h"
   1.237 +#endif /* XEN */
   1.238 +
   1.239  ENTRY(divide_error)
   1.240  	pushl $0			# no error code
   1.241  	pushl $do_divide_error
   1.242 @@ -422,9 +452,12 @@ error_code:
   1.243  	movl %esp, %edx
   1.244  	pushl %esi			# push the error code
   1.245  	pushl %edx			# push the pt_regs pointer
   1.246 -	movl $(__KERNEL_DS), %edx	# XXXcl USER?
   1.247 +	movl $(__USER_DS), %edx
   1.248  	movl %edx, %ds
   1.249  	movl %edx, %es
   1.250 +	movl HYPERVISOR_shared_info, %esi
   1.251 +	movb evtchn_upcall_mask(%esi), %dl
   1.252 +	movb %dl, EVENT_MASK+8(%esp)
   1.253  	call *%edi
   1.254  	addl $8, %esp
   1.255  	jmp ret_from_exception
   1.256 @@ -442,24 +475,24 @@ error_code:
   1.257  # activation and restart the handler using the previous one.
   1.258  ENTRY(hypervisor_callback)
   1.259  	pushl %eax
   1.260 -	SAVE_ALL
   1.261 -	GET_THREAD_INFO(%ebp)
   1.262 +	SAVE_ALL_NO_EVENTMASK
   1.263  	movl EIP(%esp),%eax
   1.264  	cmpl $scrit,%eax
   1.265  	jb   11f
   1.266  	cmpl $ecrit,%eax
   1.267  	jb   critical_region_fixup
   1.268 -11:	push %esp
   1.269 +11:	movl HYPERVISOR_shared_info, %esi
   1.270 +	movb $0, EVENT_MASK(%esp)
   1.271 +	push %esp
   1.272  	call evtchn_do_upcall
   1.273  	add  $4,%esp
   1.274 -	movl HYPERVISOR_shared_info,%esi
   1.275 -	movb CS(%esp),%cl
   1.276 -	test $2,%cl			# slow return to ring 2 or 3
   1.277 -	jne  ret_syscall_tests
   1.278 +	jmp  ret_from_intr
   1.279 +
   1.280 +        ALIGN
   1.281  restore_all_enable_events:  
   1.282 -safesti:XEN_UNBLOCK_EVENTS(%esi)	# reenable event callbacks
   1.283 +	XEN_UNBLOCK_EVENTS(%esi)
   1.284  scrit:	/**** START OF CRITICAL REGION ****/
   1.285 -	testb $1,evtchn_upcall_pending(%esi)
   1.286 +	XEN_TEST_PENDING(%esi)
   1.287  	jnz  14f			# process more events if necessary...
   1.288  	RESTORE_ALL
   1.289  14:	XEN_BLOCK_EVENTS(%esi)
   1.290 @@ -583,11 +616,18 @@ ENTRY(debug)
   1.291  	jne debug_stack_correct
   1.292  	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
   1.293  debug_stack_correct:
   1.294 -	pushl $0
   1.295 -	pushl $do_debug
   1.296 -	jmp error_code
   1.297 +	pushl $-1			# mark this as an int
   1.298 +	SAVE_ALL
   1.299 +	movl %esp,%edx
   1.300 +  	pushl $0
   1.301 +	pushl %edx
   1.302 +	call do_debug
   1.303 +	addl $8,%esp
   1.304 +	testl %eax,%eax
   1.305 +	jnz restore_all
   1.306 +	jmp ret_from_exception
   1.307  
   1.308 -#if 0
   1.309 +#if 0 /* XEN */
   1.310  /*
   1.311   * NMI is doubly nasty. It can happen _while_ we're handling
   1.312   * a debug fault, and the debug fault hasn't yet been able to
   1.313 @@ -633,12 +673,19 @@ nmi_debug_stack_check:
   1.314  nmi_debug_stack_fixup:
   1.315  	FIX_STACK(24,nmi_stack_correct, 1)
   1.316  	jmp nmi_stack_correct
   1.317 -#endif
   1.318 +#endif /* XEN */
   1.319  
   1.320  ENTRY(int3)
   1.321 +	pushl $-1			# mark this as an int
   1.322 +	SAVE_ALL
   1.323 +	movl %esp,%edx
   1.324  	pushl $0
   1.325 -	pushl $do_int3
   1.326 -	jmp error_code
   1.327 +	pushl %edx
   1.328 +	call do_int3
   1.329 +	addl $8,%esp
   1.330 +	testl %eax,%eax
   1.331 +	jnz restore_all
   1.332 +	jmp ret_from_exception
   1.333  
   1.334  ENTRY(overflow)
   1.335  	pushl $0
   1.336 @@ -660,10 +707,6 @@ ENTRY(coprocessor_segment_overrun)
   1.337  	pushl $do_coprocessor_segment_overrun
   1.338  	jmp error_code
   1.339  
   1.340 -ENTRY(double_fault)
   1.341 -	pushl $do_double_fault
   1.342 -	jmp error_code
   1.343 -
   1.344  ENTRY(invalid_TSS)
   1.345  	pushl $do_invalid_TSS
   1.346  	jmp error_code
   1.347 @@ -686,36 +729,37 @@ ENTRY(alignment_check)
   1.348  
   1.349  # This handler is special, because it gets an extra value on its stack,
   1.350  # which is the linear faulting address.
   1.351 -#define PAGE_FAULT_STUB(_name1, _name2)					  \
   1.352 -ENTRY(_name1)								  \
   1.353 -	pushl %ds							; \
   1.354 -	pushl %eax							; \
   1.355 -	xorl %eax,%eax							; \
   1.356 -	pushl %ebp							; \
   1.357 -	pushl %edi							; \
   1.358 -	pushl %esi							; \
   1.359 -	pushl %edx							; \
   1.360 -	decl %eax			/* eax = -1 */			; \
   1.361 -	pushl %ecx							; \
   1.362 -	pushl %ebx							; \
   1.363 -	GET_THREAD_INFO(%ebp)						; \
   1.364 -	cld								; \
   1.365 -	movl %es,%ecx							; \
   1.366 -	movl ORIG_EAX(%esp), %esi	/* get the error code */	; \
   1.367 -	movl ES(%esp), %edi		/* get the faulting address */	; \
   1.368 -	movl %eax, ORIG_EAX(%esp)					; \
   1.369 -	movl %ecx, ES(%esp)						; \
   1.370 -	movl %esp,%edx							; \
   1.371 -	pushl %edi			/* push the faulting address */	; \
   1.372 -	pushl %esi			/* push the error code */	; \
   1.373 -	pushl %edx			/* push the pt_regs pointer */	; \
   1.374 -	movl $(__KERNEL_DS),%edx					; \
   1.375 -	movl %edx,%ds							; \
   1.376 -	movl %edx,%es							; \
   1.377 -	call _name2							; \
   1.378 -	addl $12,%esp							; \
   1.379 -	jmp ret_from_exception						;
   1.380 -PAGE_FAULT_STUB(page_fault, do_page_fault)
   1.381 +ENTRY(page_fault)
   1.382 +	pushl %ds
   1.383 +	pushl %eax
   1.384 +	xorl %eax,%eax
   1.385 +	pushl %ebp
   1.386 +	pushl %edi
   1.387 +	pushl %esi
   1.388 +	pushl %edx
   1.389 +	decl %eax			/* eax = -1 */
   1.390 +	pushl %ecx
   1.391 +	pushl %ebx
   1.392 +	GET_THREAD_INFO(%ebp)
   1.393 +	cld
   1.394 +	movl %es,%ecx
   1.395 +	movl ORIG_EAX(%esp), %esi	/* get the error code */
   1.396 +	movl ES(%esp), %edi		/* get the faulting address */
   1.397 +	movl %eax, ORIG_EAX(%esp)
   1.398 +	movl %ecx, ES(%esp)
   1.399 +	movl %esp,%edx
   1.400 +	pushl %edi			/* push the faulting address */
   1.401 +	pushl %esi			/* push the error code */
   1.402 +	pushl %edx			/* push the pt_regs pointer */
   1.403 +	movl $(__KERNEL_DS),%edx
   1.404 +	movl %edx,%ds
   1.405 +	movl %edx,%es
   1.406 +	movl HYPERVISOR_shared_info, %esi
   1.407 +	movb evtchn_upcall_mask(%esi), %dl
   1.408 +	movb %dl, EVENT_MASK+12(%esp)
   1.409 +	call do_page_fault
   1.410 +	addl $12,%esp
   1.411 +	jmp ret_from_exception
   1.412  
   1.413  #ifdef CONFIG_X86_MCE
   1.414  ENTRY(machine_check)
   1.415 @@ -1014,5 +1058,6 @@ ENTRY(sys_call_table)
   1.416  	.long sys_mq_notify
   1.417  	.long sys_mq_getsetattr
   1.418  	.long sys_ni_syscall		/* reserved for kexec */
   1.419 +	.long sys_waitid
   1.420  
   1.421  syscall_table_size=(.-sys_call_table)
     2.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c	Fri Nov 26 18:21:39 2004 +0000
     2.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c	Sat Nov 27 10:04:03 2004 +0000
     2.3 @@ -78,7 +78,6 @@ asmlinkage void overflow(void);
     2.4  asmlinkage void bounds(void);
     2.5  asmlinkage void invalid_op(void);
     2.6  asmlinkage void device_not_available(void);
     2.7 -asmlinkage void double_fault(void);
     2.8  asmlinkage void coprocessor_segment_overrun(void);
     2.9  asmlinkage void invalid_TSS(void);
    2.10  asmlinkage void segment_not_present(void);
    2.11 @@ -470,7 +469,6 @@ DO_VM86_ERROR( 4, SIGSEGV, "overflow", o
    2.12  DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
    2.13  DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
    2.14  DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
    2.15 -DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
    2.16  DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
    2.17  DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
    2.18  DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
    2.19 @@ -1032,7 +1030,6 @@ static trap_info_t trap_table[] = {
    2.20  	{  5, 3, __KERNEL_CS, (unsigned long)bounds			},
    2.21  	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
    2.22  	{  7, 0, __KERNEL_CS, (unsigned long)device_not_available	},
    2.23 -	{  8, 0, __KERNEL_CS, (unsigned long)double_fault		},
    2.24  	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
    2.25  	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
    2.26  	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},