ia64/xen-unstable
changeset 3170:08ca2c180189
bitkeeper revision 1.1159.183.19 (41a897fdy5UXp_KAyGlyQ4KVUBRHhQ)
Merge scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
Merge scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Sat Nov 27 15:06:37 2004 +0000 (2004-11-27) |
parents | 20290eb62e95 861d3cdc1dc5 |
children | 456cf7ce1617 |
files | linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S xen/arch/x86/pdb-stub.c xen/arch/x86/traps.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/seg_fixup.c xen/include/asm-x86/debugger.h xen/include/asm-x86/x86_32/asm_defns.h xen/include/asm-x86/x86_32/regs.h xen/include/public/xen.h |
line diff
1.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S Sat Nov 27 13:05:31 2004 +0000 1.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S Sat Nov 27 15:06:37 2004 +0000 1.3 @@ -309,6 +309,8 @@ syscall_exit: 1.4 testw $_TIF_ALLWORK_MASK, %cx # current->work 1.5 jne syscall_exit_work 1.6 restore_all: 1.7 + testl $VM_MASK, EFLAGS(%esp) 1.8 + jnz resume_vm86 1.9 movb EVENT_MASK(%esp), %al 1.10 notb %al # %al == ~saved_mask 1.11 andb evtchn_upcall_mask(%esi),%al 1.12 @@ -316,6 +318,13 @@ restore_all: 1.13 jnz restore_all_enable_events # != 0 => reenable event delivery 1.14 RESTORE_ALL 1.15 1.16 +resume_vm86: 1.17 + RESTORE_REGS 1.18 + pushl %eax,(%esp) 1.19 + movl $__HYPERVISOR_switch_vm86,%eax 1.20 + int $0x82 # Atomically enables events delivery 1.21 + ud2 1.22 + 1.23 # perform work that needs to be done immediately before resumption 1.24 ALIGN 1.25 work_pending: 1.26 @@ -546,7 +555,8 @@ 1: popl %ds 1.27 2: popl %es 1.28 3: popl %fs 1.29 4: popl %gs 1.30 -5: iret 1.31 + SAVE_ALL 1.32 + jmp ret_from_exception 1.33 .section .fixup,"ax"; \ 1.34 6: movl $0,(%esp); \ 1.35 jmp 1b; \ 1.36 @@ -556,12 +566,6 @@ 8: movl $0,(%esp); \ 1.37 jmp 3b; \ 1.38 9: movl $0,(%esp); \ 1.39 jmp 4b; \ 1.40 -10: pushl %ss; \ 1.41 - popl %ds; \ 1.42 - pushl %ss; \ 1.43 - popl %es; \ 1.44 - pushl $11; \ 1.45 - call do_exit; \ 1.46 .previous; \ 1.47 .section __ex_table,"a";\ 1.48 .align 4; \ 1.49 @@ -569,7 +573,6 @@ 10: pushl %ss; \ 1.50 .long 2b,7b; \ 1.51 .long 3b,8b; \ 1.52 .long 4b,9b; \ 1.53 - .long 5b,10b; \ 1.54 .previous 1.55 1.56 ENTRY(coprocessor_error) 1.57 @@ -887,7 +890,7 @@ ENTRY(sys_call_table) 1.58 .long sys_iopl /* 110 */ 1.59 .long sys_vhangup 1.60 .long sys_ni_syscall /* old "idle" system call */ 1.61 - .long sys_ni_syscall /* disable sys_vm86old */ 1.62 + .long sys_vm86old 1.63 .long sys_wait4 1.64 .long sys_swapoff /* 115 */ 1.65 .long sys_sysinfo
2.1 --- a/xen/arch/x86/pdb-stub.c Sat Nov 27 13:05:31 2004 +0000 2.2 +++ b/xen/arch/x86/pdb-stub.c Sat Nov 27 15:06:37 2004 +0000 2.3 @@ -1083,13 +1083,17 @@ int pdb_handle_exception(int exceptionVe 2.4 int watchdog_save; 2.5 unsigned long cr3 = read_cr3(); 2.6 2.7 + /* No vm86 handling here as yet. */ 2.8 + if ( VM86_MODE(xen_regs) ) 2.9 + return 1; 2.10 + 2.11 /* If the exception is an int3 from user space then pdb is only 2.12 interested if it re-wrote an instruction set the breakpoint. 2.13 This occurs when leaving a system call from a domain. 2.14 */ 2.15 - if ( exceptionVector == 3 && 2.16 - (xen_regs->cs & 3) == 3 && 2.17 - xen_regs->eip != pdb_system_call_next_addr + 1) 2.18 + if ( (exceptionVector == 3) && 2.19 + RING_3(xen_regs) && 2.20 + (xen_regs->eip != (pdb_system_call_next_addr + 1)) ) 2.21 { 2.22 TRC(printf("pdb: user bkpt (0x%x) at 0x%x:0x%lx:0x%x\n", 2.23 exceptionVector, xen_regs->cs & 3, cr3, xen_regs->eip));
3.1 --- a/xen/arch/x86/traps.c Sat Nov 27 13:05:31 2004 +0000 3.2 +++ b/xen/arch/x86/traps.c Sat Nov 27 15:06:37 2004 +0000 3.3 @@ -54,6 +54,8 @@ 3.4 3.5 #if defined(__i386__) 3.6 3.7 +#define GUEST_FAULT(_r) (likely(VM86_MODE(_r) || !RING_0(_r))) 3.8 + 3.9 #define DOUBLEFAULT_STACK_SIZE 1024 3.10 static struct tss_struct doublefault_tss; 3.11 static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE]; 3.12 @@ -164,7 +166,7 @@ void show_registers(struct xen_regs *reg 3.13 unsigned long esp; 3.14 unsigned short ss, ds, es, fs, gs; 3.15 3.16 - if ( regs->cs & 3 ) 3.17 + if ( GUEST_FAULT(regs) ) 3.18 { 3.19 esp = regs->esp; 3.20 ss = regs->ss & 0xffff; 3.21 @@ -247,7 +249,7 @@ static inline int do_trap(int trapnr, ch 3.22 3.23 DEBUGGER_trap_entry(trapnr, regs); 3.24 3.25 - if ( !(regs->cs & 3) ) 3.26 + if ( !GUEST_FAULT(regs) ) 3.27 goto xen_fault; 3.28 3.29 ti = current->thread.traps + trapnr; 3.30 @@ -313,7 +315,7 @@ asmlinkage int do_int3(struct xen_regs * 3.31 3.32 DEBUGGER_trap_entry(TRAP_int3, regs); 3.33 3.34 - if ( unlikely((regs->cs & 3) == 0) ) 3.35 + if ( !GUEST_FAULT(regs) ) 3.36 { 3.37 DEBUGGER_trap_fatal(TRAP_int3, regs); 3.38 show_registers(regs); 3.39 @@ -419,7 +421,7 @@ asmlinkage int do_page_fault(struct xen_ 3.40 return EXCRET_fault_fixed; /* successfully copied the mapping */ 3.41 } 3.42 3.43 - if ( unlikely(!(regs->cs & 3)) ) 3.44 + if ( !GUEST_FAULT(regs) ) 3.45 goto xen_fault; 3.46 3.47 ti = d->thread.traps + 14; 3.48 @@ -479,8 +481,10 @@ asmlinkage int do_general_protection(str 3.49 3.50 DEBUGGER_trap_entry(TRAP_gp_fault, regs); 3.51 3.52 - /* Badness if error in ring 0, or result of an interrupt. */ 3.53 - if ( !(regs->cs & 3) || (regs->error_code & 1) ) 3.54 + if ( regs->error_code & 1 ) 3.55 + goto hardware_gp; 3.56 + 3.57 + if ( !GUEST_FAULT(regs) ) 3.58 goto gp_in_kernel; 3.59 3.60 /* 3.61 @@ -507,7 +511,7 @@ asmlinkage int do_general_protection(str 3.62 { 3.63 /* This fault must be due to <INT n> instruction. */ 3.64 ti = current->thread.traps + (regs->error_code>>3); 3.65 - if ( TI_GET_DPL(ti) >= (regs->cs & 3) ) 3.66 + if ( TI_GET_DPL(ti) >= (VM86_MODE(regs) ? 3 : (regs->cs & 3)) ) 3.67 { 3.68 tb->flags = TBF_EXCEPTION; 3.69 regs->eip += 2; 3.70 @@ -545,6 +549,7 @@ asmlinkage int do_general_protection(str 3.71 3.72 DEBUGGER_trap_fatal(TRAP_gp_fault, regs); 3.73 3.74 + hardware_gp: 3.75 show_registers(regs); 3.76 panic("CPU%d GENERAL PROTECTION FAULT\n[error_code=%04x]\n", 3.77 smp_processor_id(), regs->error_code); 3.78 @@ -641,7 +646,7 @@ asmlinkage int do_debug(struct xen_regs 3.79 goto out; 3.80 } 3.81 3.82 - if ( (regs->cs & 3) == 0 ) 3.83 + if ( !GUEST_FAULT(regs) ) 3.84 { 3.85 /* Clear TF just for absolute sanity. */ 3.86 regs->eflags &= ~EF_TF;
4.1 --- a/xen/arch/x86/x86_32/entry.S Sat Nov 27 13:05:31 2004 +0000 4.2 +++ b/xen/arch/x86/x86_32/entry.S Sat Nov 27 15:06:37 2004 +0000 4.3 @@ -73,10 +73,13 @@ ENTRY(continue_nonidle_task) 4.4 restore_all_guest: 4.5 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx) 4.6 jnz failsafe_callback 4.7 + testl $X86_EFLAGS_VM,XREGS_eflags(%esp) 4.8 + jnz restore_all_vm86 4.9 FLT1: movl XREGS_ds(%esp),%ds 4.10 FLT2: movl XREGS_es(%esp),%es 4.11 FLT3: movl XREGS_fs(%esp),%fs 4.12 FLT4: movl XREGS_gs(%esp),%gs 4.13 +restore_all_vm86: 4.14 popl %ebx 4.15 popl %ecx 4.16 popl %edx 4.17 @@ -218,10 +221,11 @@ process_softirqs: 4.18 /* {EIP, CS, EFLAGS, [ESP, SS]} */ 4.19 /* %edx == trap_bounce, %ebx == task_struct */ 4.20 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */ 4.21 -create_bounce_frame: 4.22 +create_bounce_frame: 4.23 + movl XREGS_eflags+4(%esp),%ecx 4.24 movb XREGS_cs+4(%esp),%cl 4.25 - testb $2,%cl 4.26 - jz 1f /* jump if returning to an existing ring-1 activation */ 4.27 + testl $(2|X86_EFLAGS_VM),%ecx 4.28 + jz ring1 /* jump if returning to an existing ring-1 activation */ 4.29 /* obtain ss/esp from TSS -- no current ring-1 activations */ 4.30 movl DOMAIN_processor(%ebx),%eax 4.31 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */ 4.32 @@ -232,48 +236,73 @@ create_bounce_frame: 4.33 addl $init_tss + 12,%eax 4.34 movl (%eax),%esi /* tss->esp1 */ 4.35 FLT7: movl 4(%eax),%gs /* tss->ss1 */ 4.36 - /* base of stack frame must contain ss/esp (inter-priv iret) */ 4.37 - subl $8,%esi 4.38 + testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) 4.39 + jz nvm86_1 4.40 + subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */ 4.41 + movl XREGS_es+4(%esp),%eax 4.42 +FLT8: movl %eax,%gs:(%esi) 4.43 + movl XREGS_ds+4(%esp),%eax 4.44 +FLT9: movl %eax,%gs:4(%esi) 4.45 + movl XREGS_fs+4(%esp),%eax 4.46 +FLT10: movl %eax,%gs:8(%esi) 4.47 + movl XREGS_gs+4(%esp),%eax 4.48 +FLT11: movl %eax,%gs:12(%esi) 4.49 +nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */ 4.50 movl XREGS_esp+4(%esp),%eax 4.51 -FLT8: movl %eax,%gs:(%esi) 4.52 +FLT12: movl %eax,%gs:(%esi) 4.53 movl XREGS_ss+4(%esp),%eax 4.54 -FLT9: movl %eax,%gs:4(%esi) 4.55 - jmp 2f 4.56 -1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ 4.57 +FLT13: movl %eax,%gs:4(%esi) 4.58 + jmp 1f 4.59 +ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ 4.60 movl XREGS_esp+4(%esp),%esi 4.61 -FLT10: movl XREGS_ss+4(%esp),%gs 4.62 -2: /* Construct a stack frame: EFLAGS, CS/EIP */ 4.63 +FLT14: movl XREGS_ss+4(%esp),%gs 4.64 +1: /* Construct a stack frame: EFLAGS, CS/EIP */ 4.65 subl $12,%esi 4.66 movl XREGS_eip+4(%esp),%eax 4.67 -FLT11: movl %eax,%gs:(%esi) 4.68 +FLT15: movl %eax,%gs:(%esi) 4.69 movl XREGS_cs+4(%esp),%eax 4.70 -FLT12: movl %eax,%gs:4(%esi) 4.71 +FLT16: movl %eax,%gs:4(%esi) 4.72 movl XREGS_eflags+4(%esp),%eax 4.73 -FLT13: movl %eax,%gs:8(%esi) 4.74 +FLT17: movl %eax,%gs:8(%esi) 4.75 movb TRAPBOUNCE_flags(%edx),%cl 4.76 test $TBF_EXCEPTION_ERRCODE,%cl 4.77 jz 1f 4.78 subl $4,%esi # push error_code onto guest frame 4.79 movl TRAPBOUNCE_error_code(%edx),%eax 4.80 -FLT14: movl %eax,%gs:(%esi) 4.81 +FLT18: movl %eax,%gs:(%esi) 4.82 testb $TBF_EXCEPTION_CR2,%cl 4.83 jz 2f 4.84 subl $4,%esi # push %cr2 onto guest frame 4.85 movl TRAPBOUNCE_cr2(%edx),%eax 4.86 -FLT15: movl %eax,%gs:(%esi) 4.87 +FLT19: movl %eax,%gs:(%esi) 4.88 1: testb $TBF_FAILSAFE,%cl 4.89 jz 2f 4.90 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame 4.91 - movl XREGS_ds+4(%esp),%eax 4.92 -FLT16: movl %eax,%gs:(%esi) 4.93 + testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) 4.94 + jz nvm86_2 4.95 + xorl %eax,%eax # VM86: we write zero selector values 4.96 +FLT20: movl %eax,%gs:(%esi) 4.97 +FLT21: movl %eax,%gs:4(%esi) 4.98 +FLT22: movl %eax,%gs:8(%esi) 4.99 +FLT23: movl %eax,%gs:12(%esi) 4.100 + jmp 2f 4.101 +nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values 4.102 +FLT24: movl %eax,%gs:(%esi) 4.103 movl XREGS_es+4(%esp),%eax 4.104 -FLT17: movl %eax,%gs:4(%esi) 4.105 +FLT25: movl %eax,%gs:4(%esi) 4.106 movl XREGS_fs+4(%esp),%eax 4.107 -FLT18: movl %eax,%gs:8(%esi) 4.108 +FLT26: movl %eax,%gs:8(%esi) 4.109 movl XREGS_gs+4(%esp),%eax 4.110 -FLT19: movl %eax,%gs:12(%esi) 4.111 +FLT27: movl %eax,%gs:12(%esi) 4.112 2: movb $0,TRAPBOUNCE_flags(%edx) 4.113 - /* Rewrite our stack frame and return to ring 1. */ 4.114 + testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) 4.115 + jz nvm86_3 4.116 + xorl %eax,%eax /* zero DS-GS, just as a real CPU would */ 4.117 + movl %eax,XREGS_ds+4(%esp) 4.118 + movl %eax,XREGS_es+4(%esp) 4.119 + movl %eax,XREGS_fs+4(%esp) 4.120 + movl %eax,XREGS_gs+4(%esp) 4.121 +nvm86_3:/* Rewrite our stack frame and return to ring 1. */ 4.122 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ 4.123 andl $0xfffcbeff,XREGS_eflags+4(%esp) 4.124 movl %gs,XREGS_ss+4(%esp) 4.125 @@ -297,19 +326,11 @@ FIX7: sti 4.126 DBLFLT2:jmp process_guest_exception_and_events 4.127 .previous 4.128 .section __pre_ex_table,"a" 4.129 - .long FLT7,FIX7 4.130 - .long FLT8,FIX7 4.131 - .long FLT9,FIX7 4.132 - .long FLT10,FIX7 4.133 - .long FLT11,FIX7 4.134 - .long FLT12,FIX7 4.135 - .long FLT13,FIX7 4.136 - .long FLT14,FIX7 4.137 - .long FLT15,FIX7 4.138 - .long FLT16,FIX7 4.139 - .long FLT17,FIX7 4.140 - .long FLT18,FIX7 4.141 - .long FLT19,FIX7 4.142 + .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7 4.143 + .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7 4.144 + .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7 4.145 + .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7 4.146 + .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7 4.147 .previous 4.148 .section __ex_table,"a" 4.149 .long DBLFLT2,domain_crash 4.150 @@ -325,11 +346,12 @@ process_guest_exception_and_events: 4.151 4.152 ALIGN 4.153 ENTRY(ret_from_intr) 4.154 - GET_CURRENT(%ebx) 4.155 - movb XREGS_cs(%esp),%al 4.156 - testb $3,%al # return to non-supervisor? 4.157 - jnz test_all_events 4.158 - jmp restore_all_xen 4.159 + GET_CURRENT(%ebx) 4.160 + movl XREGS_eflags(%esp),%eax 4.161 + movb XREGS_cs(%esp),%al 4.162 + testl $(3|X86_EFLAGS_VM),%eax 4.163 + jnz test_all_events 4.164 + jmp restore_all_xen 4.165 4.166 ENTRY(divide_error) 4.167 pushl $TRAP_divide_error<<16 4.168 @@ -347,15 +369,18 @@ 1: sti 4.169 GET_CURRENT(%ebx) 4.170 call *SYMBOL_NAME(exception_table)(,%eax,4) 4.171 addl $4,%esp 4.172 + movl XREGS_eflags(%esp),%eax 4.173 movb XREGS_cs(%esp),%al 4.174 - testb $3,%al 4.175 + testl $(3|X86_EFLAGS_VM),%eax 4.176 jz restore_all_xen 4.177 jmp process_guest_exception_and_events 4.178 4.179 exception_with_ints_disabled: 4.180 + movl XREGS_eflags(%esp),%eax 4.181 movb XREGS_cs(%esp),%al 4.182 - testb $3,%al # interrupts disabled outside Xen? 4.183 - jnz 1b # it really does happen! (e.g., DOM0 X server) 4.184 + testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen? 4.185 + jnz 1b # it really does happen! 4.186 + # (e.g., DOM0 X server) 4.187 pushl XREGS_eip(%esp) 4.188 call search_pre_exception_table 4.189 addl $4,%esp 4.190 @@ -469,8 +494,9 @@ ENTRY(nmi) 4.191 # In all other cases we bail without touching DS-GS, as we have 4.192 # interrupted an enclosing Xen activation in tricky prologue or 4.193 # epilogue code. 4.194 + movl XREGS_eflags(%esp),%eax 4.195 movb XREGS_cs(%esp),%al 4.196 - testb $3,%al 4.197 + testl $(3|X86_EFLAGS_VM),%eax 4.198 jnz do_watchdog_tick 4.199 movl XREGS_ds(%esp),%eax 4.200 cmpw $(__HYPERVISOR_DS),%ax 4.201 @@ -494,8 +520,9 @@ do_watchdog_tick: 4.202 pushl %edx # regs 4.203 call SYMBOL_NAME(do_nmi) 4.204 addl $8,%esp 4.205 + movl XREGS_eflags(%esp),%eax 4.206 movb XREGS_cs(%esp),%al 4.207 - testb $3,%al 4.208 + testl $(3|X86_EFLAGS_VM),%eax 4.209 jz restore_all_xen 4.210 GET_CURRENT(%ebx) 4.211 jmp restore_all_guest 4.212 @@ -539,7 +566,62 @@ nmi_io_err: 4.213 call SYMBOL_NAME(io_check_error) 4.214 addl $4,%esp 4.215 jmp ret_from_intr 4.216 - 4.217 + 4.218 + 4.219 +ENTRY(setup_vm86_frame) 4.220 + # Copies the entire stack frame forwards by 16 bytes. 4.221 + .macro copy_vm86_words count=18 4.222 + .if \count 4.223 + pushl ((\count-1)*4)(%esp) 4.224 + popl ((\count-1)*4)+16(%esp) 4.225 + copy_vm86_words "(\count-1)" 4.226 + .endif 4.227 + .endm 4.228 + copy_vm86_words 4.229 + addl $16,%esp 4.230 + ret 4.231 + 4.232 +do_switch_vm86: 4.233 + # Discard the return address 4.234 + addl $4,%esp 4.235 + 4.236 + movl XREGS_eflags(%esp),%ecx 4.237 + 4.238 + # GS:ESI == Ring-1 stack activation 4.239 + movl XREGS_esp(%esp),%esi 4.240 +VFLT1: movl XREGS_ss(%esp),%gs 4.241 + 4.242 + # ES:EDI == Ring-0 stack activation 4.243 + leal XREGS_eip(%esp),%edi 4.244 + 4.245 + # Restore the hypercall-number-clobbered EAX on our stack frame 4.246 +VFLT2: movl %gs:(%esi),%eax 4.247 + movl %eax,XREGS_eax(%esp) 4.248 + addl $4,%esi 4.249 + 4.250 + # Copy the VM86 activation from the ring-1 stack to the ring-0 stack 4.251 + movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx 4.252 +VFLT3: movl %gs:(%esi),%eax 4.253 + stosl 4.254 + addl $4,%esi 4.255 + loop VFLT3 4.256 + 4.257 + # Fix up EFLAGS 4.258 + andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp) 4.259 + andl $X86_EFLAGS_IOPL,%ecx # Ignore attempts to change EFLAGS.IOPL 4.260 + jnz 1f 4.261 + orl $X86_EFLAGS_IF,%ecx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF 4.262 +1: orl $X86_EFLAGS_VM,%ecx # Force EFLAGS.VM 4.263 + orl %ecx,XREGS_eflags(%esp) 4.264 + 4.265 + jmp test_all_events 4.266 + 4.267 +.section __ex_table,"a" 4.268 + .long VFLT1,domain_crash 4.269 + .long VFLT2,domain_crash 4.270 + .long VFLT3,domain_crash 4.271 +.previous 4.272 + 4.273 .data 4.274 4.275 ENTRY(exception_table) 4.276 @@ -588,6 +670,7 @@ ENTRY(hypercall_table) 4.277 .long SYMBOL_NAME(do_grant_table_op) /* 20 */ 4.278 .long SYMBOL_NAME(do_vm_assist) 4.279 .long SYMBOL_NAME(do_update_va_mapping_otherdomain) 4.280 + .long SYMBOL_NAME(do_switch_vm86) 4.281 .rept NR_hypercalls-((.-hypercall_table)/4) 4.282 .long SYMBOL_NAME(do_ni_hypercall) 4.283 .endr
5.1 --- a/xen/arch/x86/x86_32/seg_fixup.c Sat Nov 27 13:05:31 2004 +0000 5.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c Sat Nov 27 15:06:37 2004 +0000 5.3 @@ -304,7 +304,7 @@ int gpf_emulate_4gb(struct xen_regs *reg 5.4 unsigned int *pseg = NULL; /* segment for memory operand (NULL=default) */ 5.5 5.6 /* WARNING: We only work for ring-3 segments. */ 5.7 - if ( unlikely((regs->cs & 3) != 3) ) 5.8 + if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) ) 5.9 { 5.10 DPRINTK("Taken fault at bad CS %04x\n", regs->cs); 5.11 goto fail;
6.1 --- a/xen/include/asm-x86/debugger.h Sat Nov 27 13:05:31 2004 +0000 6.2 +++ b/xen/include/asm-x86/debugger.h Sat Nov 27 15:06:37 2004 +0000 6.3 @@ -55,7 +55,8 @@ static inline int debugger_trap_entry( 6.4 break; 6.5 6.6 case TRAP_gp_fault: 6.7 - if ( ((regs->cs & 3) != 0) && ((regs->error_code & 3) == 2) && 6.8 + if ( (VM86_MODE(regs) || !RING_0(regs)) && 6.9 + ((regs->error_code & 3) == 2) && 6.10 pdb_initialized && (pdb_ctx.system_call != 0) ) 6.11 { 6.12 unsigned long cr3 = read_cr3();
7.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h Sat Nov 27 13:05:31 2004 +0000 7.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h Sat Nov 27 15:06:37 2004 +0000 7.3 @@ -11,7 +11,7 @@ 7.4 /* Maybe auto-generate the following two cases (quoted vs. unquoted). */ 7.5 #ifndef __ASSEMBLY__ 7.6 7.7 -#define __SAVE_ALL_PRE(_reg) \ 7.8 +#define __SAVE_ALL_PRE \ 7.9 "cld;" \ 7.10 "pushl %eax;" \ 7.11 "pushl %ebp;" \ 7.12 @@ -20,16 +20,20 @@ 7.13 "pushl %edx;" \ 7.14 "pushl %ecx;" \ 7.15 "pushl %ebx;" \ 7.16 - "movb "STR(XREGS_cs)"(%esp),%"STR(_reg)"l;" \ 7.17 - "testb $3,%"STR(_reg)"l;" \ 7.18 - "je 1f;" \ 7.19 + "testl $"STR(X86_EFLAGS_VM)","STR(XREGS_eflags)"(%esp);" \ 7.20 + "jz 2f;" \ 7.21 + "call setup_vm86_frame;" \ 7.22 + "jmp 3f;" \ 7.23 + "2:testb $3,"STR(XREGS_cs)"(%esp);" \ 7.24 + "jz 1f;" \ 7.25 "movl %ds,"STR(XREGS_ds)"(%esp);" \ 7.26 "movl %es,"STR(XREGS_es)"(%esp);" \ 7.27 "movl %fs,"STR(XREGS_fs)"(%esp);" \ 7.28 - "movl %gs,"STR(XREGS_gs)"(%esp);" 7.29 + "movl %gs,"STR(XREGS_gs)"(%esp);" \ 7.30 + "3:" 7.31 7.32 #define SAVE_ALL_NOSEGREGS(_reg) \ 7.33 - __SAVE_ALL_PRE(_reg) \ 7.34 + __SAVE_ALL_PRE \ 7.35 "1:" 7.36 7.37 #define SET_XEN_SEGMENTS(_reg) \ 7.38 @@ -38,13 +42,13 @@ 7.39 "movl %e"STR(_reg)"x,%es;" 7.40 7.41 #define SAVE_ALL(_reg) \ 7.42 - __SAVE_ALL_PRE(_reg) \ 7.43 + __SAVE_ALL_PRE \ 7.44 SET_XEN_SEGMENTS(_reg) \ 7.45 "1:" 7.46 7.47 #else 7.48 7.49 -#define __SAVE_ALL_PRE(_reg) \ 7.50 +#define __SAVE_ALL_PRE \ 7.51 cld; \ 7.52 pushl %eax; \ 7.53 pushl %ebp; \ 7.54 @@ -53,16 +57,20 @@ 7.55 pushl %edx; \ 7.56 pushl %ecx; \ 7.57 pushl %ebx; \ 7.58 - movb XREGS_cs(%esp),% ## _reg ## l; \ 7.59 - testb $3,% ## _reg ## l; \ 7.60 - je 1f; \ 7.61 + testl $X86_EFLAGS_VM,XREGS_eflags(%esp); \ 7.62 + jz 2f; \ 7.63 + call setup_vm86_frame; \ 7.64 + jmp 3f; \ 7.65 + 2:testb $3,XREGS_cs(%esp); \ 7.66 + jz 1f; \ 7.67 movl %ds,XREGS_ds(%esp); \ 7.68 movl %es,XREGS_es(%esp); \ 7.69 movl %fs,XREGS_fs(%esp); \ 7.70 - movl %gs,XREGS_gs(%esp); 7.71 + movl %gs,XREGS_gs(%esp); \ 7.72 + 3: 7.73 7.74 #define SAVE_ALL_NOSEGREGS(_reg) \ 7.75 - __SAVE_ALL_PRE(_reg) \ 7.76 + __SAVE_ALL_PRE \ 7.77 1: 7.78 7.79 #define SET_XEN_SEGMENTS(_reg) \ 7.80 @@ -71,7 +79,7 @@ 7.81 movl %e ## _reg ## x,%es; 7.82 7.83 #define SAVE_ALL(_reg) \ 7.84 - __SAVE_ALL_PRE(_reg) \ 7.85 + __SAVE_ALL_PRE \ 7.86 SET_XEN_SEGMENTS(_reg) \ 7.87 1: 7.88
8.1 --- a/xen/include/asm-x86/x86_32/regs.h Sat Nov 27 13:05:31 2004 +0000 8.2 +++ b/xen/include/asm-x86/x86_32/regs.h Sat Nov 27 15:06:37 2004 +0000 8.3 @@ -49,4 +49,10 @@ enum EFLAGS { 8.4 EF_ID = 0x00200000, /* id */ 8.5 }; 8.6 8.7 +#define VM86_MODE(_r) ((_r)->eflags & EF_VM) 8.8 +#define RING_0(_r) (((_r)->cs & 3) == 0) 8.9 +#define RING_1(_r) (((_r)->cs & 3) == 1) 8.10 +#define RING_2(_r) (((_r)->cs & 3) == 2) 8.11 +#define RING_3(_r) (((_r)->cs & 3) == 3) 8.12 + 8.13 #endif
9.1 --- a/xen/include/public/xen.h Sat Nov 27 13:05:31 2004 +0000 9.2 +++ b/xen/include/public/xen.h Sat Nov 27 15:06:37 2004 +0000 9.3 @@ -48,6 +48,7 @@ 9.4 #define __HYPERVISOR_grant_table_op 20 9.5 #define __HYPERVISOR_vm_assist 21 9.6 #define __HYPERVISOR_update_va_mapping_otherdomain 22 9.7 +#define __HYPERVISOR_switch_vm86 23 9.8 9.9 /* 9.10 * MULTICALLS