ia64/xen-unstable
changeset 12479:3fa6635d04b9
[XEN] Various selector and callback cleanups to simplify the tools
and assumptions about callback selector values on x86/32.
Original patch from Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
and assumptions about callback selector values on x86/32.
Original patch from Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Nov 15 18:41:06 2006 +0000 (2006-11-15) |
parents | a9d2106313fa |
children | 66a169df22c5 |
files | tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/traps.c xen/arch/x86/x86_32/entry.S xen/include/asm-x86/x86_32/regs.h xen/include/asm-x86/x86_64/regs.h xen/include/xen/sched.h |
line diff
1.1 --- a/tools/libxc/xc_linux_build.c Wed Nov 15 16:53:43 2006 +0000 1.2 +++ b/tools/libxc/xc_linux_build.c Wed Nov 15 18:41:06 2006 +0000 1.3 @@ -1106,7 +1106,7 @@ static int xc_linux_build_internal(int x 1.4 { 1.5 struct xen_domctl launch_domctl; 1.6 DECLARE_DOMCTL; 1.7 - int rc, i; 1.8 + int rc; 1.9 struct vcpu_guest_context st_ctxt, *ctxt = &st_ctxt; 1.10 unsigned long vstartinfo_start, vkern_entry, vstack_start; 1.11 uint32_t features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, }; 1.12 @@ -1180,21 +1180,8 @@ static int xc_linux_build_internal(int x 1.13 1.14 ctxt->flags = VGCF_IN_KERNEL; 1.15 1.16 - /* Virtual IDT is empty at start-of-day. */ 1.17 - for ( i = 0; i < 256; i++ ) 1.18 - { 1.19 - ctxt->trap_ctxt[i].vector = i; 1.20 - ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS; 1.21 - } 1.22 - 1.23 - /* Ring 1 stack is the initial stack. */ 1.24 - ctxt->kernel_ss = FLAT_KERNEL_SS; 1.25 - ctxt->kernel_sp = vstack_start + PAGE_SIZE; 1.26 - 1.27 -#if defined(__i386__) 1.28 - ctxt->event_callback_cs = FLAT_KERNEL_CS; 1.29 - ctxt->failsafe_callback_cs = FLAT_KERNEL_CS; 1.30 -#endif 1.31 + ctxt->kernel_ss = ctxt->user_regs.ss; 1.32 + ctxt->kernel_sp = ctxt->user_regs.esp; 1.33 #endif /* x86 */ 1.34 1.35 memset(&launch_domctl, 0, sizeof(launch_domctl));
2.1 --- a/tools/libxc/xc_linux_restore.c Wed Nov 15 16:53:43 2006 +0000 2.2 +++ b/tools/libxc/xc_linux_restore.c Wed Nov 15 18:41:06 2006 +0000 2.3 @@ -774,39 +774,6 @@ int xc_linux_restore(int xc_handle, int 2.4 memcpy(live_p2m, p2m, P2M_SIZE); 2.5 munmap(live_p2m, P2M_SIZE); 2.6 2.7 - /* 2.8 - * Safety checking of saved context: 2.9 - * 1. user_regs is fine, as Xen checks that on context switch. 2.10 - * 2. fpu_ctxt is fine, as it can't hurt Xen. 2.11 - * 3. trap_ctxt needs the code selectors checked. 2.12 - * 4. ldt base must be page-aligned, no more than 8192 ents, ... 2.13 - * 5. gdt already done, and further checking is done by Xen. 2.14 - * 6. check that kernel_ss is safe. 2.15 - * 7. pt_base is already done. 2.16 - * 8. debugregs are checked by Xen. 2.17 - * 9. callback code selectors need checking. 2.18 - */ 2.19 - for ( i = 0; i < 256; i++ ) { 2.20 - ctxt.trap_ctxt[i].vector = i; 2.21 - if ((ctxt.trap_ctxt[i].cs & 3) == 0) 2.22 - ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS; 2.23 - } 2.24 - if ((ctxt.kernel_ss & 3) == 0) 2.25 - ctxt.kernel_ss = FLAT_KERNEL_DS; 2.26 -#if defined(__i386__) 2.27 - if ((ctxt.event_callback_cs & 3) == 0) 2.28 - ctxt.event_callback_cs = FLAT_KERNEL_CS; 2.29 - if ((ctxt.failsafe_callback_cs & 3) == 0) 2.30 - ctxt.failsafe_callback_cs = FLAT_KERNEL_CS; 2.31 -#endif 2.32 - if (((ctxt.ldt_base & (PAGE_SIZE - 1)) != 0) || 2.33 - (ctxt.ldt_ents > 8192) || 2.34 - (ctxt.ldt_base > hvirt_start) || 2.35 - ((ctxt.ldt_base + ctxt.ldt_ents*8) > hvirt_start)) { 2.36 - ERROR("Bad LDT base or size"); 2.37 - goto out; 2.38 - } 2.39 - 2.40 DPRINTF("Domain ready to be built.\n"); 2.41 2.42 domctl.cmd = XEN_DOMCTL_setvcpucontext;
3.1 --- a/xen/arch/x86/domain.c Wed Nov 15 16:53:43 2006 +0000 3.2 +++ b/xen/arch/x86/domain.c Wed Nov 15 18:41:06 2006 +0000 3.3 @@ -294,6 +294,12 @@ int arch_set_info_guest( 3.4 3.5 for ( i = 0; i < 256; i++ ) 3.6 fixup_guest_code_selector(c->trap_ctxt[i].cs); 3.7 + 3.8 + /* LDT safety checks. */ 3.9 + if ( ((c->ldt_base & (PAGE_SIZE-1)) != 0) || 3.10 + (c->ldt_ents > 8192) || 3.11 + !array_access_ok(c->ldt_base, c->ldt_ents, LDT_ENTRY_SIZE) ) 3.12 + return -EINVAL; 3.13 } 3.14 3.15 clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 3.16 @@ -423,33 +429,6 @@ arch_do_vcpu_op( 3.17 return rc; 3.18 } 3.19 3.20 -void new_thread(struct vcpu *d, 3.21 - unsigned long start_pc, 3.22 - unsigned long start_stack, 3.23 - unsigned long start_info) 3.24 -{ 3.25 - struct cpu_user_regs *regs = &d->arch.guest_context.user_regs; 3.26 - 3.27 - /* 3.28 - * Initial register values: 3.29 - * DS,ES,FS,GS = FLAT_KERNEL_DS 3.30 - * CS:EIP = FLAT_KERNEL_CS:start_pc 3.31 - * SS:ESP = FLAT_KERNEL_SS:start_stack 3.32 - * ESI = start_info 3.33 - * [EAX,EBX,ECX,EDX,EDI,EBP are zero] 3.34 - */ 3.35 - regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS; 3.36 - regs->ss = FLAT_KERNEL_SS; 3.37 - regs->cs = FLAT_KERNEL_CS; 3.38 - regs->eip = start_pc; 3.39 - regs->esp = start_stack; 3.40 - regs->esi = start_info; 3.41 - 3.42 - __save_flags(regs->eflags); 3.43 - regs->eflags |= X86_EFLAGS_IF; 3.44 -} 3.45 - 3.46 - 3.47 #ifdef __x86_64__ 3.48 3.49 #define loadsegment(seg,value) ({ \
4.1 --- a/xen/arch/x86/domain_build.c Wed Nov 15 16:53:43 2006 +0000 4.2 +++ b/xen/arch/x86/domain_build.c Wed Nov 15 18:41:06 2006 +0000 4.3 @@ -249,6 +249,7 @@ int construct_dom0(struct domain *d, 4.4 char *cmdline) 4.5 { 4.6 int i, rc, dom0_pae, xen_pae, order; 4.7 + struct cpu_user_regs *regs; 4.8 unsigned long pfn, mfn; 4.9 unsigned long nr_pages; 4.10 unsigned long nr_pt_pages; 4.11 @@ -441,19 +442,7 @@ int construct_dom0(struct domain *d, 4.12 mpt_alloc = (vpt_start - dsi.v_start) + 4.13 (unsigned long)pfn_to_paddr(alloc_spfn); 4.14 4.15 - /* 4.16 - * We're basically forcing default RPLs to 1, so that our "what privilege 4.17 - * level are we returning to?" logic works. 4.18 - */ 4.19 - v->arch.guest_context.kernel_ss = FLAT_KERNEL_SS; 4.20 - for ( i = 0; i < 256; i++ ) 4.21 - v->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS; 4.22 - 4.23 #if defined(__i386__) 4.24 - 4.25 - v->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS; 4.26 - v->arch.guest_context.event_callback_cs = FLAT_KERNEL_CS; 4.27 - 4.28 /* 4.29 * Protect the lowest 1GB of memory. We use a temporary mapping there 4.30 * from which we copy the kernel and ramdisk images. 4.31 @@ -816,7 +805,22 @@ int construct_dom0(struct domain *d, 4.32 4.33 set_bit(_VCPUF_initialised, &v->vcpu_flags); 4.34 4.35 - new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start); 4.36 + /* 4.37 + * Initial register values: 4.38 + * DS,ES,FS,GS = FLAT_KERNEL_DS 4.39 + * CS:EIP = FLAT_KERNEL_CS:start_pc 4.40 + * SS:ESP = FLAT_KERNEL_SS:start_stack 4.41 + * ESI = start_info 4.42 + * [EAX,EBX,ECX,EDX,EDI,EBP are zero] 4.43 + */ 4.44 + regs = &d->arch.guest_context.user_regs; 4.45 + regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS; 4.46 + regs->ss = FLAT_KERNEL_SS; 4.47 + regs->cs = FLAT_KERNEL_CS; 4.48 + regs->eip = dsi.v_kernentry; 4.49 + regs->esp = vstack_end; 4.50 + regs->esi = vstartinfo_start; 4.51 + regs->eflags = X86_EFLAGS_IF; 4.52 4.53 if ( opt_dom0_shadow ) 4.54 if ( shadow_test_enable(d) == 0 )
5.1 --- a/xen/arch/x86/traps.c Wed Nov 15 16:53:43 2006 +0000 5.2 +++ b/xen/arch/x86/traps.c Wed Nov 15 18:41:06 2006 +0000 5.3 @@ -331,14 +331,9 @@ void show_execution_state(struct cpu_use 5.4 show_stack(regs); 5.5 } 5.6 5.7 -/* 5.8 - * This is called for faults at very unexpected times (e.g., when interrupts 5.9 - * are disabled). In such situations we can't do much that is safe. We try to 5.10 - * print out some tracing and then we just spin. 5.11 - */ 5.12 -asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs) 5.13 +char *trapstr(int trapnr) 5.14 { 5.15 - static char *trapstr[] = { 5.16 + static char *strings[] = { 5.17 "divide error", "debug", "nmi", "bkpt", "overflow", "bounds", 5.18 "invalid opcode", "device not available", "double fault", 5.19 "coprocessor segment", "invalid tss", "segment not found", 5.20 @@ -347,6 +342,19 @@ asmlinkage void fatal_trap(int trapnr, s 5.21 "machine check", "simd error" 5.22 }; 5.23 5.24 + if ( (trapnr < 0) || (trapnr >= ARRAY_SIZE(strings)) ) 5.25 + return "???"; 5.26 + 5.27 + return strings[trapnr]; 5.28 +} 5.29 + 5.30 +/* 5.31 + * This is called for faults at very unexpected times (e.g., when interrupts 5.32 + * are disabled). In such situations we can't do much that is safe. We try to 5.33 + * print out some tracing and then we just spin. 5.34 + */ 5.35 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs) 5.36 +{ 5.37 watchdog_disable(); 5.38 console_start_sync(); 5.39 5.40 @@ -361,38 +369,51 @@ asmlinkage void fatal_trap(int trapnr, s 5.41 5.42 panic("FATAL TRAP: vector = %d (%s)\n" 5.43 "[error_code=%04x] %s\n", 5.44 - trapnr, trapstr[trapnr], regs->error_code, 5.45 + trapnr, trapstr(trapnr), regs->error_code, 5.46 (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT"); 5.47 } 5.48 5.49 -static inline int do_trap(int trapnr, char *str, 5.50 - struct cpu_user_regs *regs, 5.51 - int use_error_code) 5.52 +static int do_guest_trap( 5.53 + int trapnr, const struct cpu_user_regs *regs, int use_error_code) 5.54 { 5.55 struct vcpu *v = current; 5.56 - struct trap_bounce *tb = &v->arch.trap_bounce; 5.57 - struct trap_info *ti; 5.58 - unsigned long fixup; 5.59 + struct trap_bounce *tb; 5.60 + const struct trap_info *ti; 5.61 5.62 - DEBUGGER_trap_entry(trapnr, regs); 5.63 + tb = &v->arch.trap_bounce; 5.64 + ti = &v->arch.guest_context.trap_ctxt[trapnr]; 5.65 5.66 - if ( !guest_mode(regs) ) 5.67 - goto xen_fault; 5.68 - 5.69 - ti = ¤t->arch.guest_context.trap_ctxt[trapnr]; 5.70 tb->flags = TBF_EXCEPTION; 5.71 tb->cs = ti->cs; 5.72 tb->eip = ti->address; 5.73 + 5.74 if ( use_error_code ) 5.75 { 5.76 tb->flags |= TBF_EXCEPTION_ERRCODE; 5.77 tb->error_code = regs->error_code; 5.78 } 5.79 + 5.80 if ( TI_GET_IF(ti) ) 5.81 tb->flags |= TBF_INTERRUPT; 5.82 + 5.83 + if ( unlikely(null_trap_bounce(tb)) ) 5.84 + gdprintk(XENLOG_WARNING, "Unhandled %s fault/trap [#%d] in " 5.85 + "domain %d on VCPU %d [ec=%04x]\n", 5.86 + trapstr(trapnr), trapnr, v->domain->domain_id, v->vcpu_id, 5.87 + regs->error_code); 5.88 + 5.89 return 0; 5.90 +} 5.91 5.92 - xen_fault: 5.93 +static inline int do_trap( 5.94 + int trapnr, struct cpu_user_regs *regs, int use_error_code) 5.95 +{ 5.96 + unsigned long fixup; 5.97 + 5.98 + DEBUGGER_trap_entry(trapnr, regs); 5.99 + 5.100 + if ( guest_mode(regs) ) 5.101 + return do_guest_trap(trapnr, regs, use_error_code); 5.102 5.103 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 5.104 { 5.105 @@ -407,32 +428,32 @@ static inline int do_trap(int trapnr, ch 5.106 show_execution_state(regs); 5.107 panic("FATAL TRAP: vector = %d (%s)\n" 5.108 "[error_code=%04x]\n", 5.109 - trapnr, str, regs->error_code); 5.110 + trapnr, trapstr(trapnr), regs->error_code); 5.111 return 0; 5.112 } 5.113 5.114 -#define DO_ERROR_NOCODE(trapnr, str, name) \ 5.115 +#define DO_ERROR_NOCODE(trapnr, name) \ 5.116 asmlinkage int do_##name(struct cpu_user_regs *regs) \ 5.117 { \ 5.118 - return do_trap(trapnr, str, regs, 0); \ 5.119 + return do_trap(trapnr, regs, 0); \ 5.120 } 5.121 5.122 -#define DO_ERROR(trapnr, str, name) \ 5.123 +#define DO_ERROR(trapnr, name) \ 5.124 asmlinkage int do_##name(struct cpu_user_regs *regs) \ 5.125 { \ 5.126 - return do_trap(trapnr, str, regs, 1); \ 5.127 + return do_trap(trapnr, regs, 1); \ 5.128 } 5.129 5.130 -DO_ERROR_NOCODE( 0, "divide error", divide_error) 5.131 -DO_ERROR_NOCODE( 4, "overflow", overflow) 5.132 -DO_ERROR_NOCODE( 5, "bounds", bounds) 5.133 -DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun) 5.134 -DO_ERROR(10, "invalid TSS", invalid_TSS) 5.135 -DO_ERROR(11, "segment not present", segment_not_present) 5.136 -DO_ERROR(12, "stack segment", stack_segment) 5.137 -DO_ERROR_NOCODE(16, "fpu error", coprocessor_error) 5.138 -DO_ERROR(17, "alignment check", alignment_check) 5.139 -DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error) 5.140 +DO_ERROR_NOCODE(TRAP_divide_error, divide_error) 5.141 +DO_ERROR_NOCODE(TRAP_overflow, overflow) 5.142 +DO_ERROR_NOCODE(TRAP_bounds, bounds) 5.143 +DO_ERROR_NOCODE(TRAP_copro_seg, coprocessor_segment_overrun) 5.144 +DO_ERROR( TRAP_invalid_tss, invalid_TSS) 5.145 +DO_ERROR( TRAP_no_segment, segment_not_present) 5.146 +DO_ERROR( TRAP_stack_error, stack_segment) 5.147 +DO_ERROR_NOCODE(TRAP_copro_error, coprocessor_error) 5.148 +DO_ERROR( TRAP_alignment_check, alignment_check) 5.149 +DO_ERROR_NOCODE(TRAP_simd_error, simd_coprocessor_error) 5.150 5.151 int rdmsr_hypervisor_regs( 5.152 uint32_t idx, uint32_t *eax, uint32_t *edx) 5.153 @@ -599,9 +620,6 @@ static int emulate_forced_invalid_op(str 5.154 5.155 asmlinkage int do_invalid_op(struct cpu_user_regs *regs) 5.156 { 5.157 - struct vcpu *v = current; 5.158 - struct trap_bounce *tb = &v->arch.trap_bounce; 5.159 - struct trap_info *ti; 5.160 int rc; 5.161 5.162 DEBUGGER_trap_entry(TRAP_invalid_op, regs); 5.163 @@ -625,22 +643,11 @@ asmlinkage int do_invalid_op(struct cpu_ 5.164 if ( (rc = emulate_forced_invalid_op(regs)) != 0 ) 5.165 return rc; 5.166 5.167 - ti = ¤t->arch.guest_context.trap_ctxt[TRAP_invalid_op]; 5.168 - tb->flags = TBF_EXCEPTION; 5.169 - tb->cs = ti->cs; 5.170 - tb->eip = ti->address; 5.171 - if ( TI_GET_IF(ti) ) 5.172 - tb->flags |= TBF_INTERRUPT; 5.173 - 5.174 - return 0; 5.175 + return do_guest_trap(TRAP_invalid_op, regs, 0); 5.176 } 5.177 5.178 asmlinkage int do_int3(struct cpu_user_regs *regs) 5.179 { 5.180 - struct vcpu *v = current; 5.181 - struct trap_bounce *tb = &v->arch.trap_bounce; 5.182 - struct trap_info *ti; 5.183 - 5.184 DEBUGGER_trap_entry(TRAP_int3, regs); 5.185 5.186 if ( !guest_mode(regs) ) 5.187 @@ -650,14 +657,7 @@ asmlinkage int do_int3(struct cpu_user_r 5.188 panic("FATAL TRAP: vector = 3 (Int3)\n"); 5.189 } 5.190 5.191 - ti = ¤t->arch.guest_context.trap_ctxt[TRAP_int3]; 5.192 - tb->flags = TBF_EXCEPTION; 5.193 - tb->cs = ti->cs; 5.194 - tb->eip = ti->address; 5.195 - if ( TI_GET_IF(ti) ) 5.196 - tb->flags |= TBF_INTERRUPT; 5.197 - 5.198 - return 0; 5.199 + return do_guest_trap(TRAP_int3, regs, 0); 5.200 } 5.201 5.202 asmlinkage int do_machine_check(struct cpu_user_regs *regs) 5.203 @@ -687,6 +687,12 @@ void propagate_page_fault(unsigned long 5.204 tb->eip = ti->address; 5.205 if ( TI_GET_IF(ti) ) 5.206 tb->flags |= TBF_INTERRUPT; 5.207 + if ( unlikely(null_trap_bounce(tb)) ) 5.208 + { 5.209 + printk("Unhandled page fault in domain %d on VCPU %d (ec=%04X)\n", 5.210 + v->domain->domain_id, v->vcpu_id, error_code); 5.211 + show_page_walk(addr); 5.212 + } 5.213 } 5.214 5.215 static int handle_gdt_ldt_mapping_fault( 5.216 @@ -1481,8 +1487,6 @@ static int emulate_privileged_op(struct 5.217 asmlinkage int do_general_protection(struct cpu_user_regs *regs) 5.218 { 5.219 struct vcpu *v = current; 5.220 - struct trap_bounce *tb = &v->arch.trap_bounce; 5.221 - struct trap_info *ti; 5.222 unsigned long fixup; 5.223 5.224 DEBUGGER_trap_entry(TRAP_gp_fault, regs); 5.225 @@ -1516,12 +1520,13 @@ asmlinkage int do_general_protection(str 5.226 if ( (regs->error_code & 3) == 2 ) 5.227 { 5.228 /* This fault must be due to <INT n> instruction. */ 5.229 - ti = ¤t->arch.guest_context.trap_ctxt[regs->error_code>>3]; 5.230 + const struct trap_info *ti; 5.231 + unsigned char vector = regs->error_code >> 3; 5.232 + ti = &v->arch.guest_context.trap_ctxt[vector]; 5.233 if ( permit_softint(TI_GET_DPL(ti), v, regs) ) 5.234 { 5.235 - tb->flags = TBF_EXCEPTION; 5.236 regs->eip += 2; 5.237 - goto finish_propagation; 5.238 + return do_guest_trap(vector, regs, 0); 5.239 } 5.240 } 5.241 5.242 @@ -1538,15 +1543,7 @@ asmlinkage int do_general_protection(str 5.243 #endif 5.244 5.245 /* Pass on GPF as is. */ 5.246 - ti = ¤t->arch.guest_context.trap_ctxt[TRAP_gp_fault]; 5.247 - tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; 5.248 - tb->error_code = regs->error_code; 5.249 - finish_propagation: 5.250 - tb->cs = ti->cs; 5.251 - tb->eip = ti->address; 5.252 - if ( TI_GET_IF(ti) ) 5.253 - tb->flags |= TBF_INTERRUPT; 5.254 - return 0; 5.255 + return do_guest_trap(TRAP_gp_fault, regs, 1); 5.256 5.257 gp_in_kernel: 5.258 5.259 @@ -1684,22 +1681,11 @@ void unset_nmi_callback(void) 5.260 5.261 asmlinkage int math_state_restore(struct cpu_user_regs *regs) 5.262 { 5.263 - struct trap_bounce *tb; 5.264 - struct trap_info *ti; 5.265 - 5.266 setup_fpu(current); 5.267 5.268 if ( current->arch.guest_context.ctrlreg[0] & X86_CR0_TS ) 5.269 { 5.270 - tb = ¤t->arch.trap_bounce; 5.271 - ti = ¤t->arch.guest_context.trap_ctxt[TRAP_no_device]; 5.272 - 5.273 - tb->flags = TBF_EXCEPTION; 5.274 - tb->cs = ti->cs; 5.275 - tb->eip = ti->address; 5.276 - if ( TI_GET_IF(ti) ) 5.277 - tb->flags |= TBF_INTERRUPT; 5.278 - 5.279 + do_guest_trap(TRAP_no_device, regs, 0); 5.280 current->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS; 5.281 } 5.282 5.283 @@ -1710,8 +1696,6 @@ asmlinkage int do_debug(struct cpu_user_ 5.284 { 5.285 unsigned long condition; 5.286 struct vcpu *v = current; 5.287 - struct trap_bounce *tb = &v->arch.trap_bounce; 5.288 - struct trap_info *ti; 5.289 5.290 __asm__ __volatile__("mov %%db6,%0" : "=r" (condition)); 5.291 5.292 @@ -1741,12 +1725,7 @@ asmlinkage int do_debug(struct cpu_user_ 5.293 /* Save debug status register where guest OS can peek at it */ 5.294 v->arch.guest_context.debugreg[6] = condition; 5.295 5.296 - ti = &v->arch.guest_context.trap_ctxt[TRAP_debug]; 5.297 - tb->flags = TBF_EXCEPTION; 5.298 - tb->cs = ti->cs; 5.299 - tb->eip = ti->address; 5.300 - if ( TI_GET_IF(ti) ) 5.301 - tb->flags |= TBF_INTERRUPT; 5.302 + return do_guest_trap(TRAP_debug, regs, 0); 5.303 5.304 out: 5.305 return EXCRET_not_a_fault;
6.1 --- a/xen/arch/x86/x86_32/entry.S Wed Nov 15 16:53:43 2006 +0000 6.2 +++ b/xen/arch/x86/x86_32/entry.S Wed Nov 15 18:41:06 2006 +0000 6.3 @@ -373,10 +373,11 @@ nvm86_3:/* Rewrite our stack frame and r 6.4 mov %gs,UREGS_ss+4(%esp) 6.5 movl %esi,UREGS_esp+4(%esp) 6.6 movzwl TRAPBOUNCE_cs(%edx),%eax 6.7 + /* Null selectors (0-3) are not allowed. */ 6.8 + testl $~3,%eax 6.9 + jz domain_crash_synchronous 6.10 movl %eax,UREGS_cs+4(%esp) 6.11 movl TRAPBOUNCE_eip(%edx),%eax 6.12 - test %eax,%eax 6.13 - jz domain_crash_synchronous 6.14 movl %eax,UREGS_eip+4(%esp) 6.15 movb $0,TRAPBOUNCE_flags(%edx) 6.16 ret
7.1 --- a/xen/include/asm-x86/x86_32/regs.h Wed Nov 15 16:53:43 2006 +0000 7.2 +++ b/xen/include/asm-x86/x86_32/regs.h Wed Nov 15 18:41:06 2006 +0000 7.3 @@ -16,6 +16,9 @@ 7.4 #define permit_softint(dpl, v, r) \ 7.5 ((dpl) >= (vm86_mode(r) ? 3 : ((r)->cs & 3))) 7.6 7.7 +/* Check for null trap callback handler: Is the selector null (0-3)? */ 7.8 +#define null_trap_bounce(tb) (((tb)->cs & ~3) == 0) 7.9 + 7.10 /* Number of bytes of on-stack execution state to be context-switched. */ 7.11 #define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs)) 7.12
8.1 --- a/xen/include/asm-x86/x86_64/regs.h Wed Nov 15 16:53:43 2006 +0000 8.2 +++ b/xen/include/asm-x86/x86_64/regs.h Wed Nov 15 18:41:06 2006 +0000 8.3 @@ -16,6 +16,9 @@ 8.4 #define permit_softint(dpl, v, r) \ 8.5 ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3)) 8.6 8.7 +/* Check for null trap callback handler: Is the EIP null? */ 8.8 +#define null_trap_bounce(tb) ((tb)->eip == 0) 8.9 + 8.10 /* Number of bytes of on-stack execution state to be context-switched. */ 8.11 /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */ 8.12 #define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
9.1 --- a/xen/include/xen/sched.h Wed Nov 15 16:53:43 2006 +0000 9.2 +++ b/xen/include/xen/sched.h Wed Nov 15 18:41:06 2006 +0000 9.3 @@ -282,11 +282,6 @@ void __domain_crash_synchronous(void) __ 9.4 __domain_crash_synchronous(); \ 9.5 } while (0) 9.6 9.7 -void new_thread(struct vcpu *d, 9.8 - unsigned long start_pc, 9.9 - unsigned long start_stack, 9.10 - unsigned long start_info); 9.11 - 9.12 #define set_current_state(_s) do { current->state = (_s); } while (0) 9.13 void scheduler_init(void); 9.14 void schedulers_start(void);