ia64/xen-unstable

changeset 9182:b41ad96f1242

Cleanup i386 entry.S.
Many of the changes to entry.S can be removed because we don't support
CONFIG_PREEMPT anymore.

Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Tue Mar 07 15:48:36 2006 +0000 (2006-03-07)
parents 51c59d5d76b0
children 4293d6760cef
files linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S	Tue Mar 07 14:40:23 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S	Tue Mar 07 15:48:36 2006 +0000
     1.3 @@ -79,6 +79,10 @@ VM_MASK		= 0x00020000
     1.4  /* Pseudo-eflags. */
     1.5  NMI_MASK	= 0x80000000
     1.6  
     1.7 +#ifndef CONFIG_XEN
     1.8 +#define DISABLE_INTERRUPTS	cli
     1.9 +#define ENABLE_INTERRUPTS	sti
    1.10 +#else
    1.11  /* Offsets into shared_info_t. */
    1.12  #define evtchn_upcall_pending		/* 0 */
    1.13  #define evtchn_upcall_mask		1
    1.14 @@ -86,33 +90,24 @@ NMI_MASK	= 0x80000000
    1.15  #define sizeof_vcpu_shift		6
    1.16  
    1.17  #ifdef CONFIG_SMP
    1.18 -#define preempt_disable(reg)	incl TI_preempt_count(reg)
    1.19 -#define preempt_enable(reg)	decl TI_preempt_count(reg)
    1.20 -#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%ebp)			; \
    1.21 -				movl TI_cpu(%ebp),reg			; \
    1.22 -				shl  $sizeof_vcpu_shift,reg		; \
    1.23 -				addl HYPERVISOR_shared_info,reg
    1.24 -#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
    1.25 -#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
    1.26 +#define GET_VCPU_INFO		movl TI_cpu(%ebp),%esi			; \
    1.27 +				shl  $sizeof_vcpu_shift,%esi		; \
    1.28 +				addl HYPERVISOR_shared_info,%esi
    1.29  #else
    1.30 -#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
    1.31 -#define XEN_PUT_VCPU_INFO(reg)
    1.32 -#define XEN_PUT_VCPU_INFO_fixup
    1.33 +#define GET_VCPU_INFO		movl HYPERVISOR_shared_info,%esi
    1.34  #endif
    1.35  
    1.36 -#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
    1.37 -#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
    1.38 -#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
    1.39 -				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
    1.40 -    				XEN_PUT_VCPU_INFO(reg)
    1.41 -#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
    1.42 -				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
    1.43 -    				XEN_PUT_VCPU_INFO(reg)
    1.44 -#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
    1.45 +#define __DISABLE_INTERRUPTS	movb $1,evtchn_upcall_mask(%esi)
    1.46 +#define __ENABLE_INTERRUPTS	movb $0,evtchn_upcall_mask(%esi)
    1.47 +#define DISABLE_INTERRUPTS	GET_VCPU_INFO				; \
    1.48 +				__DISABLE_INTERRUPTS
    1.49 +#define ENABLE_INTERRUPTS	GET_VCPU_INFO				; \
    1.50 +				__ENABLE_INTERRUPTS
    1.51 +#define __TEST_PENDING		testb $0xFF,evtchn_upcall_pending(%esi)
    1.52 +#endif
    1.53  
    1.54  #ifdef CONFIG_PREEMPT
    1.55 -#define preempt_stop		GET_THREAD_INFO(%ebp)			; \
    1.56 -				XEN_BLOCK_EVENTS(%esi)
    1.57 +#define preempt_stop		cli
    1.58  #else
    1.59  #define preempt_stop
    1.60  #define resume_kernel		restore_nocheck
    1.61 @@ -159,21 +154,6 @@ 4:	movl $0,(%esp);	\
    1.62  .previous
    1.63  
    1.64  
    1.65 -#define RESTORE_ALL	\
    1.66 -	RESTORE_REGS	\
    1.67 -	addl $4, %esp;	\
    1.68 -1:	iret;		\
    1.69 -.section .fixup,"ax";   \
    1.70 -2:	pushl $0;	\
    1.71 -	pushl $do_iret_error;	\
    1.72 -	jmp error_code;	\
    1.73 -.previous;		\
    1.74 -.section __ex_table,"a";\
    1.75 -	.align 4;	\
    1.76 -	.long 1b,2b;	\
    1.77 -.previous
    1.78 -
    1.79 -
    1.80  ENTRY(ret_from_fork)
    1.81  	pushl %eax
    1.82  	call schedule_tail
    1.83 @@ -199,7 +179,7 @@ ret_from_intr:
    1.84  	testl $(VM_MASK | 2), %eax
    1.85  	jz resume_kernel
    1.86  ENTRY(resume_userspace)
    1.87 -	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
    1.88 +	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
    1.89  					# setting need_resched or sigpending
    1.90  					# between sampling and the iret
    1.91  	movl TI_flags(%ebp), %ecx
    1.92 @@ -210,15 +190,15 @@ ENTRY(resume_userspace)
    1.93  
    1.94  #ifdef CONFIG_PREEMPT
    1.95  ENTRY(resume_kernel)
    1.96 -	XEN_BLOCK_EVENTS(%esi)
    1.97 +	cli
    1.98  	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
    1.99  	jnz restore_nocheck
   1.100  need_resched:
   1.101  	movl TI_flags(%ebp), %ecx	# need_resched set ?
   1.102  	testb $_TIF_NEED_RESCHED, %cl
   1.103  	jz restore_all
   1.104 -	testb $0xFF,EVENT_MASK(%esp)	# interrupts off (exception path) ?
   1.105 -	jnz restore_all
   1.106 +	testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
   1.107 +	jz restore_all
   1.108  	call preempt_schedule_irq
   1.109  	jmp need_resched
   1.110  #endif
   1.111 @@ -289,7 +269,7 @@ syscall_call:
   1.112  	call *sys_call_table(,%eax,4)
   1.113  	movl %eax,EAX(%esp)		# store the return value
   1.114  syscall_exit:
   1.115 -	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
   1.116 +	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
   1.117  					# setting need_resched or sigpending
   1.118  					# between sampling and the iret
   1.119  	movl TI_flags(%ebp), %ecx
   1.120 @@ -297,7 +277,7 @@ syscall_exit:
   1.121  	jne syscall_exit_work
   1.122  
   1.123  restore_all:
   1.124 -#if 0 /* XEN */
   1.125 +#ifndef CONFIG_XEN
   1.126  	movl EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
   1.127  	# Warning: OLDSS(%esp) contains the wrong/random values if we
   1.128  	# are returning to the kernel.
   1.129 @@ -307,22 +287,26 @@ restore_all:
   1.130  	andl $(VM_MASK | (4 << 8) | 3), %eax
   1.131  	cmpl $((4 << 8) | 3), %eax
   1.132  	je ldt_ss			# returning to user-space with LDT SS
   1.133 -#endif /* XEN */
   1.134 +restore_nocheck:
   1.135 +#else
   1.136  restore_nocheck:
   1.137  	testl $(VM_MASK|NMI_MASK), EFLAGS(%esp)
   1.138  	jnz hypervisor_iret
   1.139  	movb EVENT_MASK(%esp), %al
   1.140  	notb %al			# %al == ~saved_mask
   1.141 -	XEN_GET_VCPU_INFO(%esi)
   1.142 +	GET_VCPU_INFO
   1.143  	andb evtchn_upcall_mask(%esi),%al
   1.144  	andb $1,%al			# %al == mask & ~saved_mask
   1.145  	jnz restore_all_enable_events	#     != 0 => reenable event delivery
   1.146 -	XEN_PUT_VCPU_INFO(%esi)
   1.147 +#endif
   1.148  	RESTORE_REGS
   1.149  	addl $4, %esp
   1.150  1:	iret
   1.151  .section .fixup,"ax"
   1.152  iret_exc:
   1.153 +#ifndef CONFIG_XEN
   1.154 +	sti
   1.155 +#endif
   1.156  	pushl $0			# no error code
   1.157  	pushl $do_iret_error
   1.158  	jmp error_code
   1.159 @@ -332,13 +316,7 @@ iret_exc:
   1.160  	.long 1b,iret_exc
   1.161  .previous
   1.162  
   1.163 -hypervisor_iret:
   1.164 -	andl $~NMI_MASK, EFLAGS(%esp)
   1.165 -	RESTORE_REGS
   1.166 -	addl $4, %esp
   1.167 -	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
   1.168 -
   1.169 -#if 0 /* XEN */
   1.170 +#ifndef CONFIG_XEN
   1.171  ldt_ss:
   1.172  	larl OLDSS(%esp), %eax
   1.173  	jnz restore_nocheck
   1.174 @@ -363,7 +341,13 @@ 1:	iret
   1.175  	.align 4
   1.176  	.long 1b,iret_exc
   1.177  .previous
   1.178 -#endif /* XEN */
   1.179 +#else
   1.180 +hypervisor_iret:
   1.181 +	andl $~NMI_MASK, EFLAGS(%esp)
   1.182 +	RESTORE_REGS
   1.183 +	addl $4, %esp
   1.184 +	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
   1.185 +#endif
   1.186  
   1.187  	# perform work that needs to be done immediately before resumption
   1.188  	ALIGN
   1.189 @@ -372,7 +356,7 @@ work_pending:
   1.190  	jz work_notifysig
   1.191  work_resched:
   1.192  	call schedule
   1.193 -	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
   1.194 +	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
   1.195  					# setting need_resched or sigpending
   1.196  					# between sampling and the iret
   1.197  	movl TI_flags(%ebp), %ecx
   1.198 @@ -424,7 +408,7 @@ syscall_trace_entry:
   1.199  syscall_exit_work:
   1.200  	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
   1.201  	jz work_pending
   1.202 -	XEN_UNBLOCK_EVENTS(%esi)	# could let do_syscall_trace() call
   1.203 +	ENABLE_INTERRUPTS		# could let do_syscall_trace() call
   1.204  					# schedule() instead
   1.205  	movl %esp, %eax
   1.206  	movl $1, %edx
   1.207 @@ -444,7 +428,7 @@ syscall_badsys:
   1.208  	movl $-ENOSYS,EAX(%esp)
   1.209  	jmp resume_userspace
   1.210  
   1.211 -#if 0 /* XEN */
   1.212 +#ifndef CONFIG_XEN
   1.213  #define FIXUP_ESPFIX_STACK \
   1.214  	movl %esp, %eax; \
   1.215  	/* switch to 32bit stack using the pointer on top of 16bit stack */ \
   1.216 @@ -503,7 +487,9 @@ ENTRY(name)				\
   1.217  
   1.218  /* The include is where all of the SMP etc. interrupts come from */
   1.219  #include "entry_arch.h"
   1.220 -#endif /* XEN */
   1.221 +#else
   1.222 +#define UNWIND_ESPFIX_STACK
   1.223 +#endif
   1.224  
   1.225  ENTRY(divide_error)
   1.226  	pushl $0			# no error code
   1.227 @@ -522,7 +508,7 @@ error_code:
   1.228  	pushl %ebx
   1.229  	cld
   1.230  	pushl %es
   1.231 -#	UNWIND_ESPFIX_STACK
   1.232 +	UNWIND_ESPFIX_STACK
   1.233  	popl %ecx
   1.234  	movl ES(%esp), %edi		# get the function address
   1.235  	movl ORIG_EAX(%esp), %edx	# get the error code
   1.236 @@ -535,6 +521,7 @@ error_code:
   1.237  	call *%edi
   1.238  	jmp ret_from_exception
   1.239  
   1.240 +#ifdef CONFIG_XEN
   1.241  # A note on the "critical region" in our callback handler.
   1.242  # We want to avoid stacking callback handlers due to events occurring
   1.243  # during handling of the last event. To do this, we keep events disabled
   1.244 @@ -561,14 +548,23 @@ 11:	push %esp
   1.245  
   1.246          ALIGN
   1.247  restore_all_enable_events:
   1.248 -	XEN_LOCKED_UNBLOCK_EVENTS(%esi)
   1.249 +	__ENABLE_INTERRUPTS
   1.250  scrit:	/**** START OF CRITICAL REGION ****/
   1.251 -	XEN_TEST_PENDING(%esi)
   1.252 +	__TEST_PENDING
   1.253  	jnz  14f			# process more events if necessary...
   1.254 -	XEN_PUT_VCPU_INFO(%esi)
   1.255 -	RESTORE_ALL
   1.256 -14:	XEN_LOCKED_BLOCK_EVENTS(%esi)
   1.257 -	XEN_PUT_VCPU_INFO(%esi)
   1.258 +	RESTORE_REGS
   1.259 +	addl $4, %esp
   1.260 +1:	iret
   1.261 +.section .fixup,"ax"
   1.262 +2:	pushl $0
   1.263 +	pushl $do_iret_error
   1.264 +	jmp error_code
   1.265 +.previous
   1.266 +.section __ex_table,"a"
   1.267 +	.align 4
   1.268 +	.long 1b,2b
   1.269 +.previous
   1.270 +14:	__DISABLE_INTERRUPTS
   1.271  	jmp  11b
   1.272  ecrit:  /**** END OF CRITICAL REGION ****/
   1.273  # [How we do the fixup]. We want to merge the current stack frame with the
   1.274 @@ -584,7 +580,6 @@ critical_region_fixup:
   1.275  	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
   1.276  	jne  15f
   1.277  	GET_THREAD_INFO(%ebp)
   1.278 -	XEN_PUT_VCPU_INFO(%esi)         # abort vcpu_info critical region
   1.279          xorl %eax,%eax
   1.280  15:	mov  %esp,%esi
   1.281  	add  %eax,%esi			# %esi points at end of src region
   1.282 @@ -602,9 +597,8 @@ 17:	movl %edi,%esp			# final %edi is top
   1.283  	jmp  11b
   1.284  
   1.285  critical_fixup_table:
   1.286 -	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = XEN_TEST_PENDING
   1.287 +	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = __TEST_PENDING
   1.288  	.byte 0xff,0xff			# jnz  14f
   1.289 -	XEN_PUT_VCPU_INFO_fixup
   1.290  	.byte 0x00			# pop  %ebx
   1.291  	.byte 0x04			# pop  %ecx
   1.292  	.byte 0x08			# pop  %edx
   1.293 @@ -617,7 +611,6 @@ critical_fixup_table:
   1.294  	.byte 0x24,0x24,0x24		# add  $4,%esp
   1.295  	.byte 0x28			# iret
   1.296  	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
   1.297 -	XEN_PUT_VCPU_INFO_fixup
   1.298  	.byte 0x00,0x00			# jmp  11b
   1.299  
   1.300  # Hypervisor uses this for application faults while it executes.
   1.301 @@ -646,6 +639,7 @@ 9:	movl $0,(%esp);	\
   1.302  	.long 3b,8b;	\
   1.303  	.long 4b,9b;	\
   1.304  .previous
   1.305 +#endif
   1.306  
   1.307  ENTRY(coprocessor_error)
   1.308  	pushl $0
   1.309 @@ -660,7 +654,17 @@ ENTRY(simd_coprocessor_error)
   1.310  ENTRY(device_not_available)
   1.311  	pushl $-1			# mark this as an int
   1.312  	SAVE_ALL
   1.313 -	#preempt_stop /* This is already an interrupt gate on Xen. */
   1.314 +#ifndef CONFIG_XEN
   1.315 +	movl %cr0, %eax
   1.316 +	testl $0x4, %eax		# EM (math emulation bit)
   1.317 +	je device_available_emulate
   1.318 +	pushl $0			# temporary storage for ORIG_EIP
   1.319 +	call math_emulate
   1.320 +	addl $4, %esp
   1.321 +	jmp ret_from_exception
   1.322 +device_available_emulate:
   1.323 +#endif
   1.324 +	preempt_stop
   1.325  	call math_state_restore
   1.326  	jmp ret_from_exception
   1.327  
   1.328 @@ -703,16 +707,7 @@ debug_stack_correct:
   1.329  	jmp ret_from_exception
   1.330  	.previous .text
   1.331  
   1.332 -ENTRY(nmi)
   1.333 -	pushl %eax
   1.334 -	SAVE_ALL
   1.335 -	xorl %edx,%edx		# zero error code
   1.336 -	movl %esp,%eax		# pt_regs pointer
   1.337 -	call do_nmi
   1.338 -	orl  $NMI_MASK, EFLAGS(%esp)
   1.339 -	jmp restore_all
   1.340 -
   1.341 -#if 0 /* XEN */
   1.342 +#ifndef CONFIG_XEN
   1.343  /*
   1.344   * NMI is doubly nasty. It can happen _while_ we're handling
   1.345   * a debug fault, and the debug fault hasn't yet been able to
   1.346 @@ -783,7 +778,16 @@ 1:	iret
   1.347  	.align 4
   1.348  	.long 1b,iret_exc
   1.349  .previous
   1.350 -#endif /* XEN */
   1.351 +#else
   1.352 +ENTRY(nmi)
   1.353 +	pushl %eax
   1.354 +	SAVE_ALL
   1.355 +	xorl %edx,%edx		# zero error code
   1.356 +	movl %esp,%eax		# pt_regs pointer
   1.357 +	call do_nmi
   1.358 +	orl  $NMI_MASK, EFLAGS(%esp)
   1.359 +	jmp restore_all
   1.360 +#endif
   1.361  
   1.362  KPROBE_ENTRY(int3)
   1.363  	pushl $-1			# mark this as an int