ia64/xen-unstable

changeset 3202:ddbbd48e3254

bitkeeper revision 1.1159.1.471 (41ab4844yEdQ7zMWVGeDP2F7X0FEQQ)

system.h:
Make disabling/enabling interrupts preemption safe.
Also g/c __save_and_sti.
entry.S:
Make disabling/enabling interrupts preemption safe.
author cl349@arcadians.cl.cam.ac.uk
date Mon Nov 29 16:03:16 2004 +0000 (2004-11-29)
parents 3e0c45a8c812
children c23dd7ec1f54
files linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h
line diff
     1.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S	Mon Nov 29 11:08:49 2004 +0000
     1.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S	Mon Nov 29 16:03:16 2004 +0000
     1.3 @@ -83,19 +83,38 @@ VM_MASK		= 0x00020000
     1.4  #define sizeof_vcpu_shift		3
     1.5  
     1.6  #ifdef CONFIG_SMP
     1.7 -#define XEN_GET_VCPU_INFO(reg)	movl TI_cpu(%ebp),reg			; \
     1.8 +#define XEN_GET_VCPU_INFO(reg)
     1.9 +#define preempt_disable(reg)	incl TI_preempt_count(reg)
    1.10 +#define preempt_enable(reg)	decl TI_preempt_count(reg)
    1.11 +#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%ebp)		; \
    1.12 +				movl TI_cpu(%ebp),reg			; \
    1.13  				shl  $sizeof_vcpu_shift,reg		; \
    1.14  				addl HYPERVISOR_shared_info,reg
    1.15 -#define XEN_GET_VCPU_INFO_IF_SMP(reg) XEN_GET_VCPU_INFO(reg)
    1.16 -#define GET_THREAD_INFO_IF_SMP(reg) GET_THREAD_INFO(reg)
    1.17 +#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%ebp)
    1.18 +#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0x00,0x00,0x00
    1.19 +#define XEN_BLOCK_EVENTS(reg)	XEN_LOCK_VCPU_INFO_SMP(reg)		; \
    1.20 +				movb $1,evtchn_upcall_mask(reg)		; \
    1.21 +    				XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.22 +#define XEN_UNBLOCK_EVENTS(reg)	XEN_LOCK_VCPU_INFO_SMP(reg)		; \
    1.23 +				movb $0,evtchn_upcall_mask(reg)		; \
    1.24 +    				XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.25 +#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp)		; \
    1.26 +				XEN_LOCK_VCPU_INFO_SMP(reg)		; \
    1.27 +				movb evtchn_upcall_mask(reg), tmp	; \
    1.28 +				movb tmp, off(%esp)			; \
    1.29 +    				XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.30  #else
    1.31  #define XEN_GET_VCPU_INFO(reg)	movl HYPERVISOR_shared_info,reg
    1.32 -#define XEN_GET_VCPU_INFO_IF_SMP(reg)
    1.33 -#define GET_THREAD_INFO_IF_SMP(reg)
    1.34 +#define XEN_LOCK_VCPU_INFO_SMP(reg)
    1.35 +#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.36 +#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
    1.37 +#define XEN_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
    1.38 +#define XEN_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
    1.39 +#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
    1.40 +	movb evtchn_upcall_mask(reg), tmp; \
    1.41 +	movb tmp, off(%esp)
    1.42  #endif
    1.43  
    1.44 -#define XEN_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
    1.45 -#define XEN_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
    1.46  #define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
    1.47  
    1.48  #ifdef CONFIG_PREEMPT
    1.49 @@ -118,14 +137,12 @@ VM_MASK		= 0x00020000
    1.50  	pushl %ebx; \
    1.51  	movl $(__USER_DS), %edx; \
    1.52  	movl %edx, %ds; \
    1.53 -	movl %edx, %es; \
    1.54 -	GET_THREAD_INFO_IF_SMP(%ebp); 
    1.55 +	movl %edx, %es;
    1.56  
    1.57  #define SAVE_ALL \
    1.58  	SAVE_ALL_NO_EVENTMASK; \
    1.59  	XEN_GET_VCPU_INFO(%esi); \
    1.60 -	movb evtchn_upcall_mask(%esi), %dl; \
    1.61 -	movb %dl, EVENT_MASK(%esp)
    1.62 +	XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
    1.63  
    1.64  #define RESTORE_INT_REGS \
    1.65  	popl %ebx;	\
    1.66 @@ -222,7 +239,6 @@ ENTRY(ret_from_fork)
    1.67  	# userspace resumption stub bypassing syscall exit tracing
    1.68  	ALIGN
    1.69  ret_from_exception:
    1.70 -	XEN_GET_VCPU_INFO_IF_SMP(%esi)
    1.71  	preempt_stop
    1.72  ret_from_intr:
    1.73  	GET_THREAD_INFO(%ebp)
    1.74 @@ -256,7 +272,6 @@ need_resched:
    1.75  	XEN_UNBLOCK_EVENTS(%esi)
    1.76  	call schedule
    1.77  	movl $0,TI_preempt_count(%ebp)
    1.78 -	XEN_GET_VCPU_INFO_IF_SMP(%esi)
    1.79  	XEN_BLOCK_EVENTS(%esi)
    1.80  	jmp need_resched
    1.81  #endif
    1.82 @@ -322,7 +337,6 @@ syscall_call:
    1.83  	call *sys_call_table(,%eax,4)
    1.84  	movl %eax,EAX(%esp)		# store the return value
    1.85  syscall_exit:
    1.86 -	XEN_GET_VCPU_INFO_IF_SMP(%esi)
    1.87  	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
    1.88  					# setting need_resched or sigpending
    1.89  					# between sampling and the iret
    1.90 @@ -334,9 +348,11 @@ restore_all:
    1.91  	jnz resume_vm86
    1.92  	movb EVENT_MASK(%esp), %al
    1.93  	notb %al			# %al == ~saved_mask
    1.94 +	XEN_LOCK_VCPU_INFO_SMP(%esi)
    1.95  	andb evtchn_upcall_mask(%esi),%al
    1.96  	andb $1,%al			# %al == mask & ~saved_mask
    1.97  	jnz restore_all_enable_events	#     != 0 => reenable event delivery
    1.98 +	XEN_UNLOCK_VCPU_INFO_SMP(%esi)
    1.99  	RESTORE_ALL
   1.100  
   1.101  resume_vm86:
   1.102 @@ -354,7 +370,6 @@ work_pending:
   1.103  	jz work_notifysig
   1.104  work_resched:
   1.105  	call schedule
   1.106 -	XEN_GET_VCPU_INFO_IF_SMP(%esi)
   1.107  	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
   1.108  					# setting need_resched or sigpending
   1.109  					# between sampling and the iret
   1.110 @@ -400,7 +415,6 @@ syscall_trace_entry:
   1.111  	# perform syscall exit tracing
   1.112  	ALIGN
   1.113  syscall_exit_work:
   1.114 -	XEN_GET_VCPU_INFO_IF_SMP(%esi)
   1.115  	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
   1.116  	jz work_pending
   1.117  	XEN_UNBLOCK_EVENTS(%esi)	# could let do_syscall_trace() call
   1.118 @@ -488,10 +502,8 @@ error_code:
   1.119  	movl $(__USER_DS), %edx
   1.120  	movl %edx, %ds
   1.121  	movl %edx, %es
   1.122 -	GET_THREAD_INFO_IF_SMP(%ebp) 
   1.123  	XEN_GET_VCPU_INFO(%esi)
   1.124 -	movb evtchn_upcall_mask(%esi), %dl
   1.125 -	movb %dl, EVENT_MASK+8(%esp)
   1.126 +	XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK+8)
   1.127  	call *%edi
   1.128  	addl $8, %esp
   1.129  	jmp ret_from_exception
   1.130 @@ -528,8 +540,10 @@ restore_all_enable_events:
   1.131  scrit:	/**** START OF CRITICAL REGION ****/
   1.132  	XEN_TEST_PENDING(%esi)
   1.133  	jnz  14f			# process more events if necessary...
   1.134 +	XEN_UNLOCK_VCPU_INFO_SMP(%esi)
   1.135  	RESTORE_ALL
   1.136  14:	XEN_BLOCK_EVENTS(%esi)
   1.137 +	XEN_UNLOCK_VCPU_INFO_SMP(%esi)
   1.138  	jmp  11b
   1.139  ecrit:  /**** END OF CRITICAL REGION ****/
   1.140  # [How we do the fixup]. We want to merge the current stack frame with the
   1.141 @@ -560,6 +574,7 @@ 16:	movl %edi,%esp			# final %edi is top
   1.142  critical_fixup_table:
   1.143  	.byte 0x00,0x00,0x00		# testb $0x1,(%esi) = XEN_TEST_PENDING
   1.144  	.byte 0x00,0x00			# jnz  14f
   1.145 +	XEN_UNLOCK_VCPU_INFO_SMP_fixup
   1.146  	.byte 0x00			# pop  %ebx
   1.147  	.byte 0x04			# pop  %ecx
   1.148  	.byte 0x08			# pop  %edx
   1.149 @@ -572,6 +587,7 @@ critical_fixup_table:
   1.150  	.byte 0x24,0x24,0x24		# add  $4,%esp
   1.151  	.byte 0x28			# iret
   1.152  	.byte 0x00,0x00,0x00,0x00	# movb $1,1(%esi)
   1.153 +	XEN_UNLOCK_VCPU_INFO_SMP_fixup
   1.154  	.byte 0x00,0x00			# jmp  11b
   1.155  
   1.156  # Hypervisor uses this for application faults while it executes.
   1.157 @@ -712,7 +728,6 @@ ENTRY(int3)
   1.158  	pushl %edx
   1.159  	call do_int3
   1.160  	addl $8,%esp
   1.161 -	XEN_GET_VCPU_INFO_IF_SMP(%esi)
   1.162  	testl %eax,%eax
   1.163  	jnz restore_all
   1.164  	jmp ret_from_exception
   1.165 @@ -783,10 +798,8 @@ ENTRY(page_fault)
   1.166  	movl $(__KERNEL_DS),%edx
   1.167  	movl %edx,%ds
   1.168  	movl %edx,%es
   1.169 -	GET_THREAD_INFO_IF_SMP(%ebp) 
   1.170  	XEN_GET_VCPU_INFO(%esi)
   1.171 -	movb evtchn_upcall_mask(%esi), %dl
   1.172 -	movb %dl, EVENT_MASK+12(%esp)
   1.173 +	XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK+12)
   1.174  	call do_page_fault
   1.175  	addl $12,%esp
   1.176  	jmp ret_from_exception
     2.1 --- a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h	Mon Nov 29 11:08:49 2004 +0000
     2.2 +++ b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h	Mon Nov 29 16:03:16 2004 +0000
     2.3 @@ -450,65 +450,62 @@ struct alt_instr {
     2.4  
     2.5  #define __cli()								\
     2.6  do {									\
     2.7 -	vcpu_info_t *_vcpu =						\
     2.8 -		&HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
     2.9 +	vcpu_info_t *_vcpu;						\
    2.10 +	preempt_disable();						\
    2.11 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.12  	_vcpu->evtchn_upcall_mask = 1;					\
    2.13 +	preempt_enable_no_resched();					\
    2.14  	barrier();							\
    2.15  } while (0)
    2.16  
    2.17  #define __sti()								\
    2.18  do {									\
    2.19 -	vcpu_info_t *_vcpu =						\
    2.20 -		&HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.21 +	vcpu_info_t *_vcpu;						\
    2.22  	barrier();							\
    2.23 +	preempt_disable();						\
    2.24 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.25  	_vcpu->evtchn_upcall_mask = 0;					\
    2.26  	barrier(); /* unmask then check (avoid races) */		\
    2.27  	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
    2.28  		force_evtchn_callback();				\
    2.29 +	preempt_enable();						\
    2.30  } while (0)
    2.31  
    2.32  #define __save_flags(x)							\
    2.33  do {									\
    2.34 -	vcpu_info_t *_vcpu =						\
    2.35 -		&HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.36 +	vcpu_info_t *_vcpu;						\
    2.37 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.38  	(x) = _vcpu->evtchn_upcall_mask;				\
    2.39  } while (0)
    2.40  
    2.41  #define __restore_flags(x)						\
    2.42  do {									\
    2.43 -	vcpu_info_t *_vcpu =						\
    2.44 -		&HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.45 +	vcpu_info_t *_vcpu;						\
    2.46  	barrier();							\
    2.47 -	if ( (_vcpu->evtchn_upcall_mask = (x)) == 0 ) {			\
    2.48 +	preempt_disable();						\
    2.49 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.50 +	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
    2.51  		barrier(); /* unmask then check (avoid races) */	\
    2.52  		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
    2.53  			force_evtchn_callback();			\
    2.54 -	}								\
    2.55 +		preempt_enable();					\
    2.56 +	} else								\
    2.57 +		preempt_enable_no_resched();				\
    2.58  } while (0)
    2.59  
    2.60  #define safe_halt()		((void)0)
    2.61  
    2.62  #define __save_and_cli(x)						\
    2.63  do {									\
    2.64 -	vcpu_info_t *_vcpu =						\
    2.65 -		&HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.66 +	vcpu_info_t *_vcpu;						\
    2.67 +	preempt_disable();						\
    2.68 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.69  	(x) = _vcpu->evtchn_upcall_mask;				\
    2.70  	_vcpu->evtchn_upcall_mask = 1;					\
    2.71 +	preempt_enable_no_resched();					\
    2.72  	barrier();							\
    2.73  } while (0)
    2.74  
    2.75 -#define __save_and_sti(x)						\
    2.76 -do {									\
    2.77 -	vcpu_info_t *_vcpu =						\
    2.78 -		&HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.79 -	barrier();							\
    2.80 -	(x) = _vcpu->evtchn_upcall_mask;				\
    2.81 -	_vcpu->evtchn_upcall_mask = 0;					\
    2.82 -	barrier(); /* unmask then check (avoid races) */		\
    2.83 -	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
    2.84 -		force_evtchn_callback();				\
    2.85 -} while (0)
    2.86 -
    2.87  #define local_irq_save(x)	__save_and_cli(x)
    2.88  #define local_irq_restore(x)	__restore_flags(x)
    2.89  #define local_save_flags(x)	__save_flags(x)