ia64/xen-unstable

changeset 4949:386956408063

bitkeeper revision 1.1427 (4289b612hOngj49yfggKcA17ckis2g)

Xen saves the upcall mask onto the stack when making an upcall to the
guest. This can be used by the guest to determine whether it must
re-enable event delivery on return from the upcall activation.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue May 17 09:14:58 2005 +0000 (2005-05-17)
parents 5b730cb3857a
children cdb951900d9d
files linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S xen/arch/x86/traps.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/traps.c xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h
line diff
     1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S	Tue May 17 09:04:37 2005 +0000
     1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S	Tue May 17 09:14:58 2005 +0000
     1.3 @@ -83,42 +83,28 @@ VM_MASK		= 0x00020000
     1.4  #define sizeof_vcpu_shift		3
     1.5  
     1.6  #ifdef CONFIG_SMP
     1.7 -#define XEN_GET_VCPU_INFO(reg)
     1.8  #define preempt_disable(reg)	incl TI_preempt_count(reg)
     1.9  #define preempt_enable(reg)	decl TI_preempt_count(reg)
    1.10 -#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%ebp)		; \
    1.11 +#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%ebp)			; \
    1.12  				movl TI_cpu(%ebp),reg			; \
    1.13  				shl  $sizeof_vcpu_shift,reg		; \
    1.14  				addl HYPERVISOR_shared_info,reg
    1.15 -#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%ebp)
    1.16 -#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0xff,0xff,0xff
    1.17 -#define Ux00 0xff
    1.18 -#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
    1.19 -#define XEN_BLOCK_EVENTS(reg)	XEN_LOCK_VCPU_INFO_SMP(reg)		; \
    1.20 -				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
    1.21 -    				XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.22 -#define XEN_UNBLOCK_EVENTS(reg)	XEN_LOCK_VCPU_INFO_SMP(reg)		; \
    1.23 -				movb $0,evtchn_upcall_mask(reg)		; \
    1.24 -    				XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.25 -#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp)		; \
    1.26 -				XEN_LOCK_VCPU_INFO_SMP(reg)		; \
    1.27 -				movb evtchn_upcall_mask(reg), tmp	; \
    1.28 -				movb tmp, off(%esp)			; \
    1.29 -    				XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.30 +#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
    1.31 +#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
    1.32  #else
    1.33 -#define XEN_GET_VCPU_INFO(reg)	movl HYPERVISOR_shared_info,reg
    1.34 -#define XEN_LOCK_VCPU_INFO_SMP(reg)
    1.35 -#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
    1.36 -#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
    1.37 -#define Ux00 0x00
    1.38 -#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
    1.39 -#define XEN_BLOCK_EVENTS(reg)	XEN_LOCKED_BLOCK_EVENTS(reg)
    1.40 -#define XEN_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
    1.41 -#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
    1.42 -	movb evtchn_upcall_mask(reg), tmp; \
    1.43 -	movb tmp, off(%esp)
    1.44 +#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
    1.45 +#define XEN_PUT_VCPU_INFO(reg)
    1.46 +#define XEN_PUT_VCPU_INFO_fixup
    1.47  #endif
    1.48  
    1.49 +#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
    1.50 +#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
    1.51 +#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
    1.52 +				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
    1.53 +    				XEN_PUT_VCPU_INFO(reg)
    1.54 +#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
    1.55 +				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
    1.56 +    				XEN_PUT_VCPU_INFO(reg)
    1.57  #define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
    1.58  
    1.59  #ifdef CONFIG_PREEMPT
    1.60 @@ -128,7 +114,7 @@ VM_MASK		= 0x00020000
    1.61  #define resume_kernel		restore_all
    1.62  #endif
    1.63  
    1.64 -#define SAVE_ALL_NO_EVENTMASK \
    1.65 +#define SAVE_ALL \
    1.66  	cld; \
    1.67  	pushl %es; \
    1.68  	pushl %ds; \
    1.69 @@ -141,12 +127,7 @@ VM_MASK		= 0x00020000
    1.70  	pushl %ebx; \
    1.71  	movl $(__USER_DS), %edx; \
    1.72  	movl %edx, %ds; \
    1.73 -	movl %edx, %es;
    1.74 -
    1.75 -#define SAVE_ALL \
    1.76 -	SAVE_ALL_NO_EVENTMASK; \
    1.77 -	XEN_GET_VCPU_INFO(%esi); \
    1.78 -	XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
    1.79 +	movl %edx, %es
    1.80  
    1.81  #define RESTORE_INT_REGS \
    1.82  	popl %ebx;	\
    1.83 @@ -196,7 +177,6 @@ ENTRY(ret_from_fork)
    1.84  	call schedule_tail
    1.85  	GET_THREAD_INFO(%ebp)
    1.86  	popl %eax
    1.87 -	XEN_GET_VCPU_INFO(%esi)
    1.88  	jmp syscall_exit
    1.89  
    1.90  /*
    1.91 @@ -217,7 +197,6 @@ ret_from_intr:
    1.92  	testl $(VM_MASK | 2), %eax
    1.93  	jz resume_kernel		# returning to kernel or vm86-space
    1.94  ENTRY(resume_userspace)
    1.95 -	XEN_GET_VCPU_INFO(%esi)
    1.96  	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
    1.97  					# setting need_resched or sigpending
    1.98  					# between sampling and the iret
    1.99 @@ -229,7 +208,6 @@ ENTRY(resume_userspace)
   1.100  
   1.101  #ifdef CONFIG_PREEMPT
   1.102  ENTRY(resume_kernel)
   1.103 -	XEN_GET_VCPU_INFO(%esi)
   1.104  	XEN_BLOCK_EVENTS(%esi)
   1.105  	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
   1.106  	jnz restore_all
   1.107 @@ -316,11 +294,11 @@ restore_all:
   1.108  	jnz resume_vm86
   1.109  	movb EVENT_MASK(%esp), %al
   1.110  	notb %al			# %al == ~saved_mask
   1.111 -	XEN_LOCK_VCPU_INFO_SMP(%esi)
   1.112 +	XEN_GET_VCPU_INFO(%esi)
   1.113  	andb evtchn_upcall_mask(%esi),%al
   1.114  	andb $1,%al			# %al == mask & ~saved_mask
   1.115  	jnz restore_all_enable_events	#     != 0 => reenable event delivery
   1.116 -	XEN_UNLOCK_VCPU_INFO_SMP(%esi)
   1.117 +	XEN_PUT_VCPU_INFO(%esi)
   1.118  	RESTORE_ALL
   1.119  
   1.120  resume_vm86:
   1.121 @@ -470,8 +448,6 @@ error_code:
   1.122  	movl %ecx, %ds
   1.123  	movl %ecx, %es
   1.124  	movl %esp,%eax			# pt_regs pointer
   1.125 -	XEN_GET_VCPU_INFO(%esi)
   1.126 -	XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
   1.127  	call *%edi
   1.128  	jmp ret_from_exception
   1.129  
   1.130 @@ -488,29 +464,27 @@ error_code:
   1.131  # activation and restart the handler using the previous one.
   1.132  ENTRY(hypervisor_callback)
   1.133  	pushl %eax
   1.134 -	SAVE_ALL_NO_EVENTMASK
   1.135 +	SAVE_ALL
   1.136  	movl EIP(%esp),%eax
   1.137  	cmpl $scrit,%eax
   1.138  	jb   11f
   1.139  	cmpl $ecrit,%eax
   1.140  	jb   critical_region_fixup
   1.141 -11:	XEN_GET_VCPU_INFO(%esi)
   1.142 -	movb $0, EVENT_MASK(%esp)
   1.143 -	push %esp
   1.144 +11:	push %esp
   1.145  	call evtchn_do_upcall
   1.146  	add  $4,%esp
   1.147  	jmp  ret_from_intr
   1.148  
   1.149          ALIGN
   1.150  restore_all_enable_events:  
   1.151 -	XEN_UNBLOCK_EVENTS(%esi)
   1.152 +	XEN_LOCKED_UNBLOCK_EVENTS(%esi)
   1.153  scrit:	/**** START OF CRITICAL REGION ****/
   1.154  	XEN_TEST_PENDING(%esi)
   1.155  	jnz  14f			# process more events if necessary...
   1.156 -	XEN_UNLOCK_VCPU_INFO_SMP(%esi)
   1.157 +	XEN_PUT_VCPU_INFO(%esi)
   1.158  	RESTORE_ALL
   1.159  14:	XEN_LOCKED_BLOCK_EVENTS(%esi)
   1.160 -	XEN_UNLOCK_VCPU_INFO_SMP(%esi)
   1.161 +	XEN_PUT_VCPU_INFO(%esi)
   1.162  	jmp  11b
   1.163  ecrit:  /**** END OF CRITICAL REGION ****/
   1.164  # [How we do the fixup]. We want to merge the current stack frame with the
   1.165 @@ -523,15 +497,12 @@ ecrit:  /**** END OF CRITICAL REGION ***
   1.166  critical_region_fixup:
   1.167  	addl $critical_fixup_table-scrit,%eax
   1.168  	movzbl (%eax),%eax		# %eax contains num bytes popped
   1.169 -#ifdef CONFIG_SMP
   1.170 -	cmpb $0xff,%al
   1.171 +	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
   1.172  	jne  15f
   1.173 -	add  $1,%al
   1.174  	GET_THREAD_INFO(%ebp)
   1.175 -	XEN_UNLOCK_VCPU_INFO_SMP(%esi)
   1.176 -15:
   1.177 -#endif
   1.178 -    	mov  %esp,%esi
   1.179 +	XEN_PUT_VCPU_INFO(%esi)         # abort vcpu_info critical region
   1.180 +        xorl %eax,%eax
   1.181 +15:	mov  %esp,%esi
   1.182  	add  %eax,%esi			# %esi points at end of src region
   1.183  	mov  %esp,%edi
   1.184  	add  $0x34,%edi			# %edi points at end of dst region
   1.185 @@ -547,9 +518,9 @@ 17:	movl %edi,%esp			# final %edi is top
   1.186  	jmp  11b
   1.187  
   1.188  critical_fixup_table:
   1.189 -	.byte Ux00,Ux00,Ux00		# testb $0xff,(%esi) = XEN_TEST_PENDING
   1.190 -	.byte Ux00,Ux00			# jnz  14f
   1.191 -	XEN_UNLOCK_VCPU_INFO_SMP_fixup
   1.192 +	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = XEN_TEST_PENDING
   1.193 +	.byte 0xff,0xff			# jnz  14f
   1.194 +	XEN_PUT_VCPU_INFO_fixup
   1.195  	.byte 0x00			# pop  %ebx
   1.196  	.byte 0x04			# pop  %ecx
   1.197  	.byte 0x08			# pop  %edx
   1.198 @@ -561,8 +532,8 @@ critical_fixup_table:
   1.199  	.byte 0x20			# pop  %es
   1.200  	.byte 0x24,0x24,0x24		# add  $4,%esp
   1.201  	.byte 0x28			# iret
   1.202 -	.byte Ux00,Ux00,Ux00,Ux00	# movb $1,1(%esi)
   1.203 -	XEN_UNLOCK_VCPU_INFO_SMP_fixup
   1.204 +	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
   1.205 +	XEN_PUT_VCPU_INFO_fixup
   1.206  	.byte 0x00,0x00			# jmp  11b
   1.207  
   1.208  # Hypervisor uses this for application faults while it executes.
   1.209 @@ -766,8 +737,6 @@ ENTRY(page_fault)
   1.210  	movl %eax, %ds
   1.211  	movl %eax, %es
   1.212  	movl %esp,%eax			/* pt_regs pointer */
   1.213 -	XEN_GET_VCPU_INFO(%esi)
   1.214 -	XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
   1.215  	call do_page_fault
   1.216  	jmp ret_from_exception
   1.217  
     2.1 --- a/xen/arch/x86/traps.c	Tue May 17 09:04:37 2005 +0000
     2.2 +++ b/xen/arch/x86/traps.c	Tue May 17 09:14:58 2005 +0000
     2.3 @@ -257,7 +257,7 @@ static inline int do_trap(int trapnr, ch
     2.4          tb->error_code = regs->error_code;
     2.5      }
     2.6      if ( TI_GET_IF(ti) )
     2.7 -        ed->vcpu_info->evtchn_upcall_mask = 1;
     2.8 +        tb->flags |= TBF_INTERRUPT;
     2.9      return 0;
    2.10  
    2.11   xen_fault:
    2.12 @@ -322,7 +322,7 @@ asmlinkage int do_int3(struct cpu_user_r
    2.13      tb->cs    = ti->cs;
    2.14      tb->eip   = ti->address;
    2.15      if ( TI_GET_IF(ti) )
    2.16 -        ed->vcpu_info->evtchn_upcall_mask = 1;
    2.17 +        tb->flags |= TBF_INTERRUPT;
    2.18  
    2.19      return 0;
    2.20  }
    2.21 @@ -345,7 +345,7 @@ void propagate_page_fault(unsigned long 
    2.22      tb->cs         = ti->cs;
    2.23      tb->eip        = ti->address;
    2.24      if ( TI_GET_IF(ti) )
    2.25 -        ed->vcpu_info->evtchn_upcall_mask = 1;
    2.26 +        tb->flags |= TBF_INTERRUPT;
    2.27  
    2.28      ed->arch.guest_cr2 = addr;
    2.29  }
    2.30 @@ -911,7 +911,7 @@ asmlinkage int do_general_protection(str
    2.31      tb->cs         = ti->cs;
    2.32      tb->eip        = ti->address;
    2.33      if ( TI_GET_IF(ti) )
    2.34 -        ed->vcpu_info->evtchn_upcall_mask = 1;
    2.35 +        tb->flags |= TBF_INTERRUPT;
    2.36      return 0;
    2.37  
    2.38   gp_in_kernel:
     3.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Tue May 17 09:04:37 2005 +0000
     3.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Tue May 17 09:14:58 2005 +0000
     3.3 @@ -42,6 +42,7 @@ void __dummy__(void)
     3.4      OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
     3.5      OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
     3.6      OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
     3.7 +    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
     3.8      OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
     3.9      DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    3.10      BLANK();
     4.1 --- a/xen/arch/x86/x86_32/entry.S	Tue May 17 09:04:37 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_32/entry.S	Tue May 17 09:14:58 2005 +0000
     4.3 @@ -288,8 +288,6 @@ test_all_events:
     4.4          movw %ax,TRAPBOUNCE_cs(%edx)
     4.5          movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
     4.6          call create_bounce_frame
     4.7 -        movl EDOMAIN_vcpu_info(%ebx),%eax
     4.8 -        movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
     4.9          jmp  test_all_events
    4.10  
    4.11          ALIGN
    4.12 @@ -330,14 +328,20 @@ ring1:  /* obtain ss/esp from oldss/olde
    4.13          movl UREGS_esp+4(%esp),%esi
    4.14  FLT13:  movl UREGS_ss+4(%esp),%gs 
    4.15  1:      /* Construct a stack frame: EFLAGS, CS/EIP */
    4.16 +        movb TRAPBOUNCE_flags(%edx),%cl
    4.17          subl $12,%esi
    4.18          movl UREGS_eip+4(%esp),%eax
    4.19  FLT14:  movl %eax,%gs:(%esi) 
    4.20 -        movl UREGS_cs+4(%esp),%eax
    4.21 +        movl EDOMAIN_vcpu_info(%ebx),%eax
    4.22 +        pushl VCPUINFO_upcall_mask(%eax)
    4.23 +        testb $TBF_INTERRUPT,%cl
    4.24 +        setnz VCPUINFO_upcall_mask(%eax) # TBF_INTERRUPT -> clear upcall mask
    4.25 +        popl %eax
    4.26 +        shll $16,%eax                    # Bits 16-23: saved_upcall_mask
    4.27 +        movw UREGS_cs+4(%esp),%ax        # Bits  0-15: CS
    4.28  FLT15:  movl %eax,%gs:4(%esi) 
    4.29          movl UREGS_eflags+4(%esp),%eax
    4.30  FLT16:  movl %eax,%gs:8(%esi)
    4.31 -        movb TRAPBOUNCE_flags(%edx),%cl
    4.32          test $TBF_EXCEPTION_ERRCODE,%cl
    4.33          jz   1f
    4.34          subl $4,%esi                    # push error_code onto guest frame
     5.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Tue May 17 09:04:37 2005 +0000
     5.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Tue May 17 09:14:58 2005 +0000
     5.3 @@ -275,7 +275,7 @@ int gpf_emulate_4gb(struct cpu_user_regs
     5.4      u32           disp32 = 0;
     5.5      u8            *eip;         /* ptr to instruction start */
     5.6      u8            *pb, b;       /* ptr into instr. / current instr. byte */
     5.7 -    u32           *pseg = NULL; /* segment for memory operand (NULL=default) */
     5.8 +    u16           *pseg = NULL; /* segment for memory operand (NULL=default) */
     5.9  
    5.10      /* WARNING: We only work for ring-3 segments. */
    5.11      if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) )
    5.12 @@ -456,7 +456,7 @@ int gpf_emulate_4gb(struct cpu_user_regs
    5.13          tb->cs         = ti->cs;
    5.14          tb->eip        = ti->address;
    5.15          if ( TI_GET_IF(ti) )
    5.16 -            d->vcpu_info->evtchn_upcall_mask = 1;
    5.17 +            tb->flags |= TBF_INTERRUPT;
    5.18      }
    5.19  
    5.20      return EXCRET_fault_fixed;
     6.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Tue May 17 09:04:37 2005 +0000
     6.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Tue May 17 09:14:58 2005 +0000
     6.3 @@ -46,6 +46,7 @@ void __dummy__(void)
     6.4      OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
     6.5      OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
     6.6      OFFSET(UREGS_ss, struct cpu_user_regs, ss);
     6.7 +    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
     6.8      OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
     6.9      DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    6.10      BLANK();
     7.1 --- a/xen/arch/x86/x86_64/entry.S	Tue May 17 09:04:37 2005 +0000
     7.2 +++ b/xen/arch/x86/x86_64/entry.S	Tue May 17 09:14:58 2005 +0000
     7.3 @@ -147,8 +147,6 @@ test_all_events:
     7.4          movq  %rax,TRAPBOUNCE_eip(%rdx)
     7.5          movw  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
     7.6          call  create_bounce_frame
     7.7 -        movq  EDOMAIN_vcpu_info(%rbx),%rax
     7.8 -        movb  $1,VCPUINFO_upcall_mask(%rax) # Upcalls masked during delivery
     7.9          jmp   test_all_events
    7.10  
    7.11  #ifdef CONFIG_VMX
    7.12 @@ -305,18 +303,24 @@ 2:      movq  $HYPERVISOR_VIRT_START,%ra
    7.13          movq  $HYPERVISOR_VIRT_END+60,%rax
    7.14          cmpq  %rax,%rsi
    7.15          jb    domain_crash_synchronous  # Above Xen private area? Then okay.
    7.16 -1:      subq  $40,%rsi
    7.17 +1:      movb  TRAPBOUNCE_flags(%rdx),%cl
    7.18 +        subq  $40,%rsi
    7.19          movq  UREGS_ss+8(%rsp),%rax
    7.20  FLT2:   movq  %rax,32(%rsi)             # SS
    7.21          movq  UREGS_rsp+8(%rsp),%rax
    7.22  FLT3:   movq  %rax,24(%rsi)             # RSP
    7.23          movq  UREGS_eflags+8(%rsp),%rax
    7.24  FLT4:   movq  %rax,16(%rsi)             # RFLAGS
    7.25 -        movq  UREGS_cs+8(%rsp),%rax
    7.26 -FLT5:   movq  %rax,8(%rsi)              # CS
    7.27 +        movq  EDOMAIN_vcpu_info(%rbx),%rax
    7.28 +        pushq VCPUINFO_upcall_mask(%rax)
    7.29 +        testb $TBF_INTERRUPT,%cl
    7.30 +        setnz VCPUINFO_upcall_mask(%eax)# TBF_INTERRUPT -> clear upcall mask
    7.31 +        popq  %rax
    7.32 +        shll  $16,%eax                  # Bits 16-23: saved_upcall_mask
    7.33 +        movw  UREGS_cs+8(%esp),%ax      # Bits  0-15: CS
    7.34 +FLT5:   movq  %rax,8(%rsi)              # CS/saved_upcall_mask
    7.35          movq  UREGS_rip+8(%rsp),%rax
    7.36  FLT6:   movq  %rax,(%rsi)               # RIP
    7.37 -        movb  TRAPBOUNCE_flags(%rdx),%cl
    7.38          testb $TBF_EXCEPTION_ERRCODE,%cl
    7.39          jz    1f
    7.40          subq  $8,%rsi
     8.1 --- a/xen/arch/x86/x86_64/traps.c	Tue May 17 09:04:37 2005 +0000
     8.2 +++ b/xen/arch/x86/x86_64/traps.c	Tue May 17 09:14:58 2005 +0000
     8.3 @@ -12,7 +12,7 @@
     8.4  
     8.5  void show_registers(struct cpu_user_regs *regs)
     8.6  {
     8.7 -    printk("CPU:    %d\nEIP:    %04lx:[<%016lx>]      \nEFLAGS: %016lx\n",
     8.8 +    printk("CPU:    %d\nEIP:    %04x:[<%016lx>]      \nEFLAGS: %016lx\n",
     8.9             smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags);
    8.10      printk("rax: %016lx   rbx: %016lx   rcx: %016lx   rdx: %016lx\n",
    8.11             regs->rax, regs->rbx, regs->rcx, regs->rdx);
     9.1 --- a/xen/include/public/arch-x86_32.h	Tue May 17 09:04:37 2005 +0000
     9.2 +++ b/xen/include/public/arch-x86_32.h	Tue May 17 09:14:58 2005 +0000
     9.3 @@ -108,14 +108,16 @@ typedef struct cpu_user_regs {
     9.4      u16 error_code;    /* private */
     9.5      u16 entry_vector;  /* private */
     9.6      u32 eip;
     9.7 -    u32 cs;
     9.8 +    u16 cs;
     9.9 +    u8  saved_upcall_mask;
    9.10 +    u8  _pad0;
    9.11      u32 eflags;
    9.12      u32 esp;
    9.13 -    u32 ss;
    9.14 -    u32 es;
    9.15 -    u32 ds;
    9.16 -    u32 fs;
    9.17 -    u32 gs;
    9.18 +    u16 ss, _pad1;
    9.19 +    u16 es, _pad2;
    9.20 +    u16 ds, _pad3;
    9.21 +    u16 fs, _pad4;
    9.22 +    u16 gs, _pad5;
    9.23  } cpu_user_regs_t;
    9.24  
    9.25  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
    10.1 --- a/xen/include/public/arch-x86_64.h	Tue May 17 09:04:37 2005 +0000
    10.2 +++ b/xen/include/public/arch-x86_64.h	Tue May 17 09:14:58 2005 +0000
    10.3 @@ -147,28 +147,30 @@ typedef struct cpu_user_regs {
    10.4      u64 r14;
    10.5      u64 r13;
    10.6      u64 r12;
    10.7 -    union { u64 rbp, ebp; } PACKED;
    10.8 -    union { u64 rbx, ebx; } PACKED;
    10.9 +    union { u64 rbp, ebp; };
   10.10 +    union { u64 rbx, ebx; };
   10.11      u64 r11;
   10.12      u64 r10;
   10.13      u64 r9;
   10.14      u64 r8;
   10.15 -    union { u64 rax, eax; } PACKED;
   10.16 -    union { u64 rcx, ecx; } PACKED;
   10.17 -    union { u64 rdx, edx; } PACKED;
   10.18 -    union { u64 rsi, esi; } PACKED;
   10.19 -    union { u64 rdi, edi; } PACKED;
   10.20 +    union { u64 rax, eax; };
   10.21 +    union { u64 rcx, ecx; };
   10.22 +    union { u64 rdx, edx; };
   10.23 +    union { u64 rsi, esi; };
   10.24 +    union { u64 rdi, edi; };
   10.25      u32 error_code;    /* private */
   10.26      u32 entry_vector;  /* private */
   10.27 -    union { u64 rip, eip; } PACKED;
   10.28 -    u64 cs;
   10.29 -    union { u64 rflags, eflags; } PACKED;
   10.30 -    union { u64 rsp, esp; } PACKED;
   10.31 -    u64 ss;
   10.32 -    u64 es;
   10.33 -    u64 ds;
   10.34 -    u64 fs;      /* Non-zero => takes precedence over fs_base.      */
   10.35 -    u64 gs;      /* Non-zero => takes precedence over gs_base_user. */
   10.36 +    union { u64 rip, eip; };
   10.37 +    u16 cs;
   10.38 +    u8  saved_upcall_mask;
   10.39 +    u8  _pad0[5];
   10.40 +    union { u64 rflags, eflags; };
   10.41 +    union { u64 rsp, esp; };
   10.42 +    u16 ss, _pad1[3];
   10.43 +    u16 es, _pad2[3];
   10.44 +    u16 ds, _pad3[3];
   10.45 +    u16 fs, _pad4[3]; /* Non-zero => takes precedence over fs_base.      */
   10.46 +    u16 gs, _pad5[3]; /* Non-zero => takes precedence over gs_base_user. */
   10.47  } cpu_user_regs_t;
   10.48  
   10.49  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */