ia64/xen-unstable
changeset 16304:b41333afc9cc
Merge with ia64/xen-unstable.hg
author | Keir Fraser <keir@xensource.com> |
---|---|
date | Fri Nov 02 09:16:57 2007 +0000 (2007-11-02) |
parents | cbf8224779c6 2462265f09ae |
children | 3582a7a52c10 |
files |
line diff
1.1 --- a/tools/console/daemon/io.c Thu Nov 01 10:30:12 2007 -0600 1.2 +++ b/tools/console/daemon/io.c Fri Nov 02 09:16:57 2007 +0000 1.3 @@ -628,7 +628,6 @@ static void shutdown_domain(struct domai 1.4 if (d->xce_handle != -1) 1.5 xc_evtchn_close(d->xce_handle); 1.6 d->xce_handle = -1; 1.7 - cleanup_domain(d); 1.8 } 1.9 1.10 void enum_domains(void) 1.11 @@ -674,6 +673,9 @@ static void handle_tty_read(struct domai 1.12 struct xencons_interface *intf = dom->interface; 1.13 XENCONS_RING_IDX prod; 1.14 1.15 + if (dom->is_dead) 1.16 + return; 1.17 + 1.18 len = ring_free_bytes(dom); 1.19 if (len == 0) 1.20 return; 1.21 @@ -711,6 +713,9 @@ static void handle_tty_write(struct doma 1.22 { 1.23 ssize_t len; 1.24 1.25 + if (dom->is_dead) 1.26 + return; 1.27 + 1.28 len = write(dom->tty_fd, dom->buffer.data + dom->buffer.consumed, 1.29 dom->buffer.size - dom->buffer.consumed); 1.30 if (len < 1) { 1.31 @@ -734,6 +739,9 @@ static void handle_ring_read(struct doma 1.32 { 1.33 evtchn_port_or_error_t port; 1.34 1.35 + if (dom->is_dead) 1.36 + return; 1.37 + 1.38 if ((port = xc_evtchn_pending(dom->xce_handle)) == -1) 1.39 return; 1.40
2.1 --- a/tools/ioemu/target-i386-dm/helper2.c Thu Nov 01 10:30:12 2007 -0600 2.2 +++ b/tools/ioemu/target-i386-dm/helper2.c Fri Nov 02 09:16:57 2007 +0000 2.3 @@ -553,20 +553,17 @@ void __handle_buffered_iopage(CPUState * 2.4 IOREQ_BUFFER_SLOT_NUM]; 2.5 req.size = 1UL << buf_req->size; 2.6 req.count = 1; 2.7 + req.addr = buf_req->addr; 2.8 req.data = buf_req->data; 2.9 req.state = STATE_IOREQ_READY; 2.10 req.dir = buf_req->dir; 2.11 req.type = buf_req->type; 2.12 qw = req.size == 8; 2.13 if (qw) { 2.14 - req.data |= ((uint64_t)buf_req->addr) << 16; 2.15 buf_req = &buffered_io_page->buf_ioreq[(buffered_io_page->read_pointer+1) % 2.16 IOREQ_BUFFER_SLOT_NUM]; 2.17 req.data |= ((uint64_t)buf_req->data) << 32; 2.18 - req.data |= ((uint64_t)buf_req->addr) << 48; 2.19 } 2.20 - else 2.21 - req.addr = buf_req->addr; 2.22 2.23 __handle_ioreq(env, &req); 2.24
3.1 --- a/xen/arch/x86/acpi/suspend.c Thu Nov 01 10:30:12 2007 -0600 3.2 +++ b/xen/arch/x86/acpi/suspend.c Fri Nov 02 09:16:57 2007 +0000 3.3 @@ -29,9 +29,6 @@ void save_rest_processor_state(void) 3.4 #endif 3.5 } 3.6 3.7 -#define loaddebug(_v,_reg) \ 3.8 - __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) 3.9 - 3.10 void restore_rest_processor_state(void) 3.11 { 3.12 int cpu = smp_processor_id(); 3.13 @@ -54,15 +51,15 @@ void restore_rest_processor_state(void) 3.14 #endif 3.15 3.16 /* Maybe load the debug registers. */ 3.17 + BUG_ON(is_hvm_vcpu(v)); 3.18 if ( !is_idle_vcpu(v) && unlikely(v->arch.guest_context.debugreg[7]) ) 3.19 { 3.20 - loaddebug(&v->arch.guest_context, 0); 3.21 - loaddebug(&v->arch.guest_context, 1); 3.22 - loaddebug(&v->arch.guest_context, 2); 3.23 - loaddebug(&v->arch.guest_context, 3); 3.24 - /* no 4 and 5 */ 3.25 - loaddebug(&v->arch.guest_context, 6); 3.26 - loaddebug(&v->arch.guest_context, 7); 3.27 + write_debugreg(0, v->arch.guest_context.debugreg[0]); 3.28 + write_debugreg(1, v->arch.guest_context.debugreg[1]); 3.29 + write_debugreg(2, v->arch.guest_context.debugreg[2]); 3.30 + write_debugreg(3, v->arch.guest_context.debugreg[3]); 3.31 + write_debugreg(6, v->arch.guest_context.debugreg[6]); 3.32 + write_debugreg(7, v->arch.guest_context.debugreg[7]); 3.33 } 3.34 3.35 /* Reload FPU state on next FPU use. */
4.1 --- a/xen/arch/x86/domain.c Thu Nov 01 10:30:12 2007 -0600 4.2 +++ b/xen/arch/x86/domain.c Fri Nov 02 09:16:57 2007 +0000 4.3 @@ -687,14 +687,14 @@ int arch_set_info_guest( 4.4 v->arch.guest_context.ctrlreg[4] = 4.5 (cr4 == 0) ? mmu_cr4_features : pv_guest_cr4_fixup(cr4); 4.6 4.7 - if ( v->is_initialised ) 4.8 - goto out; 4.9 - 4.10 memset(v->arch.guest_context.debugreg, 0, 4.11 sizeof(v->arch.guest_context.debugreg)); 4.12 for ( i = 0; i < 8; i++ ) 4.13 (void)set_debugreg(v, i, c(debugreg[i])); 4.14 4.15 + if ( v->is_initialised ) 4.16 + goto out; 4.17 + 4.18 if ( v->vcpu_id == 0 ) 4.19 d->vm_assist = c(vm_assist); 4.20 4.21 @@ -1210,6 +1210,15 @@ static inline void switch_kernel_stack(s 4.22 static void paravirt_ctxt_switch_from(struct vcpu *v) 4.23 { 4.24 save_segments(v); 4.25 + 4.26 + /* 4.27 + * Disable debug breakpoints. We do this aggressively because if we switch 4.28 + * to an HVM guest we may load DR0-DR3 with values that can cause #DE 4.29 + * inside Xen, before we get a chance to reload DR7, and this cannot always 4.30 + * safely be handled. 4.31 + */ 4.32 + if ( unlikely(v->arch.guest_context.debugreg[7]) ) 4.33 + write_debugreg(7, 0); 4.34 } 4.35 4.36 static void paravirt_ctxt_switch_to(struct vcpu *v) 4.37 @@ -1219,11 +1228,18 @@ static void paravirt_ctxt_switch_to(stru 4.38 4.39 if ( unlikely(read_cr4() != v->arch.guest_context.ctrlreg[4]) ) 4.40 write_cr4(v->arch.guest_context.ctrlreg[4]); 4.41 + 4.42 + if ( unlikely(v->arch.guest_context.debugreg[7]) ) 4.43 + { 4.44 + write_debugreg(0, v->arch.guest_context.debugreg[0]); 4.45 + write_debugreg(1, v->arch.guest_context.debugreg[1]); 4.46 + write_debugreg(2, v->arch.guest_context.debugreg[2]); 4.47 + write_debugreg(3, v->arch.guest_context.debugreg[3]); 4.48 + write_debugreg(6, v->arch.guest_context.debugreg[6]); 4.49 + write_debugreg(7, v->arch.guest_context.debugreg[7]); 4.50 + } 4.51 } 4.52 4.53 -#define loaddebug(_v,_reg) \ 4.54 - asm volatile ( "mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]) ) 4.55 - 4.56 static void __context_switch(void) 4.57 { 4.58 struct cpu_user_regs *stack_regs = guest_cpu_user_regs(); 4.59 @@ -1248,18 +1264,6 @@ static void __context_switch(void) 4.60 memcpy(stack_regs, 4.61 &n->arch.guest_context.user_regs, 4.62 CTXT_SWITCH_STACK_BYTES); 4.63 - 4.64 - /* Maybe switch the debug registers. */ 4.65 - if ( unlikely(n->arch.guest_context.debugreg[7]) ) 4.66 - { 4.67 - loaddebug(&n->arch.guest_context, 0); 4.68 - loaddebug(&n->arch.guest_context, 1); 4.69 - loaddebug(&n->arch.guest_context, 2); 4.70 - loaddebug(&n->arch.guest_context, 3); 4.71 - /* no 4 and 5 */ 4.72 - loaddebug(&n->arch.guest_context, 6); 4.73 - loaddebug(&n->arch.guest_context, 7); 4.74 - } 4.75 n->arch.ctxt_switch_to(n); 4.76 } 4.77
5.1 --- a/xen/arch/x86/hvm/svm/svm.c Thu Nov 01 10:30:12 2007 -0600 5.2 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Nov 02 09:16:57 2007 +0000 5.3 @@ -137,12 +137,6 @@ static enum handler_return long_mode_do_ 5.4 return HNDL_done; 5.5 } 5.6 5.7 - 5.8 -#define loaddebug(_v,_reg) \ 5.9 - asm volatile ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) 5.10 -#define savedebug(_v,_reg) \ 5.11 - asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) 5.12 - 5.13 static void svm_save_dr(struct vcpu *v) 5.14 { 5.15 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 5.16 @@ -152,26 +146,45 @@ static void svm_save_dr(struct vcpu *v) 5.17 5.18 /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */ 5.19 v->arch.hvm_vcpu.flag_dr_dirty = 0; 5.20 - v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES; 5.21 + v->arch.hvm_svm.vmcb->dr_intercepts = ~0u; 5.22 5.23 - savedebug(&v->arch.guest_context, 0); 5.24 - savedebug(&v->arch.guest_context, 1); 5.25 - savedebug(&v->arch.guest_context, 2); 5.26 - savedebug(&v->arch.guest_context, 3); 5.27 + v->arch.guest_context.debugreg[0] = read_debugreg(0); 5.28 + v->arch.guest_context.debugreg[1] = read_debugreg(1); 5.29 + v->arch.guest_context.debugreg[2] = read_debugreg(2); 5.30 + v->arch.guest_context.debugreg[3] = read_debugreg(3); 5.31 v->arch.guest_context.debugreg[6] = vmcb->dr6; 5.32 v->arch.guest_context.debugreg[7] = vmcb->dr7; 5.33 } 5.34 5.35 - 5.36 static void __restore_debug_registers(struct vcpu *v) 5.37 { 5.38 - loaddebug(&v->arch.guest_context, 0); 5.39 - loaddebug(&v->arch.guest_context, 1); 5.40 - loaddebug(&v->arch.guest_context, 2); 5.41 - loaddebug(&v->arch.guest_context, 3); 5.42 - /* DR6 and DR7 are loaded from the VMCB. */ 5.43 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 5.44 + 5.45 + ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty); 5.46 + v->arch.hvm_vcpu.flag_dr_dirty = 1; 5.47 + vmcb->dr_intercepts = 0; 5.48 + 5.49 + write_debugreg(0, v->arch.guest_context.debugreg[0]); 5.50 + write_debugreg(1, v->arch.guest_context.debugreg[1]); 5.51 + write_debugreg(2, v->arch.guest_context.debugreg[2]); 5.52 + write_debugreg(3, v->arch.guest_context.debugreg[3]); 5.53 + vmcb->dr6 = v->arch.guest_context.debugreg[6]; 5.54 + vmcb->dr7 = v->arch.guest_context.debugreg[7]; 5.55 } 5.56 5.57 +/* 5.58 + * DR7 is saved and restored on every vmexit. Other debug registers only 5.59 + * need to be restored if their value is going to affect execution -- i.e., 5.60 + * if one of the breakpoints is enabled. So mask out all bits that don't 5.61 + * enable some breakpoint functionality. 5.62 + */ 5.63 +#define DR7_ACTIVE_MASK 0xff 5.64 + 5.65 +static void svm_restore_dr(struct vcpu *v) 5.66 +{ 5.67 + if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) ) 5.68 + __restore_debug_registers(v); 5.69 +} 5.70 5.71 int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c) 5.72 { 5.73 @@ -351,9 +364,6 @@ int svm_vmcb_restore(struct vcpu *v, str 5.74 vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table); 5.75 } 5.76 5.77 - vmcb->dr6 = c->dr6; 5.78 - vmcb->dr7 = c->dr7; 5.79 - 5.80 if ( c->pending_valid ) 5.81 { 5.82 gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n", 5.83 @@ -421,12 +431,6 @@ static int svm_load_vmcb_ctxt(struct vcp 5.84 return 0; 5.85 } 5.86 5.87 -static void svm_restore_dr(struct vcpu *v) 5.88 -{ 5.89 - if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) ) 5.90 - __restore_debug_registers(v); 5.91 -} 5.92 - 5.93 static enum hvm_intblk svm_interrupt_blocked( 5.94 struct vcpu *v, struct hvm_intack intack) 5.95 { 5.96 @@ -1147,16 +1151,8 @@ static void set_reg( 5.97 5.98 static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs) 5.99 { 5.100 - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 5.101 - 5.102 HVMTRACE_0D(DR_WRITE, v); 5.103 - 5.104 - v->arch.hvm_vcpu.flag_dr_dirty = 1; 5.105 - 5.106 __restore_debug_registers(v); 5.107 - 5.108 - /* allow the guest full access to the debug registers */ 5.109 - vmcb->dr_intercepts = 0; 5.110 } 5.111 5.112
6.1 --- a/xen/arch/x86/hvm/svm/vmcb.c Thu Nov 01 10:30:12 2007 -0600 6.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c Fri Nov 02 09:16:57 2007 +0000 6.3 @@ -130,7 +130,7 @@ static int construct_vmcb(struct vcpu *v 6.4 GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP; 6.5 6.6 /* Intercept all debug-register writes. */ 6.7 - vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES; 6.8 + vmcb->dr_intercepts = ~0u; 6.9 6.10 /* Intercept all control-register accesses except for CR2 and CR8. */ 6.11 vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
7.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Nov 01 10:30:12 2007 -0600 7.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Nov 02 09:16:57 2007 +0000 7.3 @@ -381,11 +381,6 @@ static enum handler_return long_mode_do_ 7.4 7.5 #endif /* __i386__ */ 7.6 7.7 -#define loaddebug(_v,_reg) \ 7.8 - __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) 7.9 -#define savedebug(_v,_reg) \ 7.10 - __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) 7.11 - 7.12 static int vmx_guest_x86_mode(struct vcpu *v) 7.13 { 7.14 unsigned int cs_ar_bytes; 7.15 @@ -411,25 +406,43 @@ static void vmx_save_dr(struct vcpu *v) 7.16 v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING; 7.17 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); 7.18 7.19 - savedebug(&v->arch.guest_context, 0); 7.20 - savedebug(&v->arch.guest_context, 1); 7.21 - savedebug(&v->arch.guest_context, 2); 7.22 - savedebug(&v->arch.guest_context, 3); 7.23 - savedebug(&v->arch.guest_context, 6); 7.24 + v->arch.guest_context.debugreg[0] = read_debugreg(0); 7.25 + v->arch.guest_context.debugreg[1] = read_debugreg(1); 7.26 + v->arch.guest_context.debugreg[2] = read_debugreg(2); 7.27 + v->arch.guest_context.debugreg[3] = read_debugreg(3); 7.28 + v->arch.guest_context.debugreg[6] = read_debugreg(6); 7.29 + /* DR7 must be saved as it is used by vmx_restore_dr(). */ 7.30 v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7); 7.31 } 7.32 7.33 static void __restore_debug_registers(struct vcpu *v) 7.34 { 7.35 - loaddebug(&v->arch.guest_context, 0); 7.36 - loaddebug(&v->arch.guest_context, 1); 7.37 - loaddebug(&v->arch.guest_context, 2); 7.38 - loaddebug(&v->arch.guest_context, 3); 7.39 - /* No 4 and 5 */ 7.40 - loaddebug(&v->arch.guest_context, 6); 7.41 + ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty); 7.42 + v->arch.hvm_vcpu.flag_dr_dirty = 1; 7.43 + 7.44 + write_debugreg(0, v->arch.guest_context.debugreg[0]); 7.45 + write_debugreg(1, v->arch.guest_context.debugreg[1]); 7.46 + write_debugreg(2, v->arch.guest_context.debugreg[2]); 7.47 + write_debugreg(3, v->arch.guest_context.debugreg[3]); 7.48 + write_debugreg(6, v->arch.guest_context.debugreg[6]); 7.49 /* DR7 is loaded from the VMCS. */ 7.50 } 7.51 7.52 +/* 7.53 + * DR7 is saved and restored on every vmexit. Other debug registers only 7.54 + * need to be restored if their value is going to affect execution -- i.e., 7.55 + * if one of the breakpoints is enabled. So mask out all bits that don't 7.56 + * enable some breakpoint functionality. 7.57 + */ 7.58 +#define DR7_ACTIVE_MASK 0xff 7.59 + 7.60 +static void vmx_restore_dr(struct vcpu *v) 7.61 +{ 7.62 + /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */ 7.63 + if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) ) 7.64 + __restore_debug_registers(v); 7.65 +} 7.66 + 7.67 void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) 7.68 { 7.69 uint32_t ev; 7.70 @@ -703,21 +716,6 @@ static int vmx_load_vmcs_ctxt(struct vcp 7.71 return 0; 7.72 } 7.73 7.74 -/* 7.75 - * DR7 is saved and restored on every vmexit. Other debug registers only 7.76 - * need to be restored if their value is going to affect execution -- i.e., 7.77 - * if one of the breakpoints is enabled. So mask out all bits that don't 7.78 - * enable some breakpoint functionality. 7.79 - */ 7.80 -#define DR7_ACTIVE_MASK 0xff 7.81 - 7.82 -static void vmx_restore_dr(struct vcpu *v) 7.83 -{ 7.84 - /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */ 7.85 - if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) ) 7.86 - __restore_debug_registers(v); 7.87 -} 7.88 - 7.89 static void vmx_ctxt_switch_from(struct vcpu *v) 7.90 { 7.91 vmx_save_guest_msrs(v); 7.92 @@ -1322,15 +1320,12 @@ static void vmx_dr_access(unsigned long 7.93 7.94 HVMTRACE_0D(DR_WRITE, v); 7.95 7.96 - v->arch.hvm_vcpu.flag_dr_dirty = 1; 7.97 - 7.98 - /* We could probably be smarter about this */ 7.99 - __restore_debug_registers(v); 7.100 + if ( !v->arch.hvm_vcpu.flag_dr_dirty ) 7.101 + __restore_debug_registers(v); 7.102 7.103 /* Allow guest direct access to DR registers */ 7.104 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING; 7.105 - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 7.106 - v->arch.hvm_vmx.exec_control); 7.107 + __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); 7.108 } 7.109 7.110 /*
8.1 --- a/xen/arch/x86/traps.c Thu Nov 01 10:30:12 2007 -0600 8.2 +++ b/xen/arch/x86/traps.c Fri Nov 02 09:16:57 2007 +0000 8.3 @@ -2493,50 +2493,44 @@ asmlinkage int do_device_not_available(s 8.4 8.5 asmlinkage int do_debug(struct cpu_user_regs *regs) 8.6 { 8.7 - unsigned long condition; 8.8 struct vcpu *v = current; 8.9 8.10 - asm volatile ( "mov %%db6,%0" : "=r" (condition) ); 8.11 - 8.12 - /* Mask out spurious debug traps due to lazy DR7 setting */ 8.13 - if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) && 8.14 - (v->arch.guest_context.debugreg[7] == 0) ) 8.15 - { 8.16 - asm volatile ( "mov %0,%%db7" : : "r" (0UL) ); 8.17 - goto out; 8.18 - } 8.19 - 8.20 DEBUGGER_trap_entry(TRAP_debug, regs); 8.21 8.22 if ( !guest_mode(regs) ) 8.23 { 8.24 + if ( regs->eflags & EF_TF ) 8.25 + { 8.26 #ifdef __x86_64__ 8.27 - void sysenter_entry(void); 8.28 - void sysenter_eflags_saved(void); 8.29 - /* In SYSENTER entry path we cannot zap TF until EFLAGS is saved. */ 8.30 - if ( (regs->rip >= (unsigned long)sysenter_entry) && 8.31 - (regs->rip < (unsigned long)sysenter_eflags_saved) ) 8.32 - goto out; 8.33 - WARN_ON(regs->rip != (unsigned long)sysenter_eflags_saved); 8.34 + void sysenter_entry(void); 8.35 + void sysenter_eflags_saved(void); 8.36 + /* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */ 8.37 + if ( (regs->rip >= (unsigned long)sysenter_entry) && 8.38 + (regs->rip < (unsigned long)sysenter_eflags_saved) ) 8.39 + goto out; 8.40 + WARN_ON(regs->rip != (unsigned long)sysenter_eflags_saved); 8.41 #else 8.42 - WARN_ON(1); 8.43 + WARN_ON(1); 8.44 #endif 8.45 - /* Clear TF just for absolute sanity. */ 8.46 - regs->eflags &= ~EF_TF; 8.47 - /* 8.48 - * We ignore watchpoints when they trigger within Xen. This may happen 8.49 - * when a buffer is passed to us which previously had a watchpoint set 8.50 - * on it. No need to bump EIP; the only faulting trap is an instruction 8.51 - * breakpoint, which can't happen to us. 8.52 - */ 8.53 + regs->eflags &= ~EF_TF; 8.54 + } 8.55 + else 8.56 + { 8.57 + /* 8.58 + * We ignore watchpoints when they trigger within Xen. This may 8.59 + * happen when a buffer is passed to us which previously had a 8.60 + * watchpoint set on it. No need to bump EIP; the only faulting 8.61 + * trap is an instruction breakpoint, which can't happen to us. 8.62 + */ 8.63 + WARN_ON(!search_exception_table(regs->eip)); 8.64 + } 8.65 goto out; 8.66 - } 8.67 + } 8.68 8.69 /* Save debug status register where guest OS can peek at it */ 8.70 - v->arch.guest_context.debugreg[6] = condition; 8.71 + v->arch.guest_context.debugreg[6] = read_debugreg(6); 8.72 8.73 ler_enable(); 8.74 - 8.75 return do_guest_trap(TRAP_debug, regs, 0); 8.76 8.77 out: 8.78 @@ -2750,25 +2744,25 @@ long set_debugreg(struct vcpu *v, int re 8.79 if ( !access_ok(value, sizeof(long)) ) 8.80 return -EPERM; 8.81 if ( v == curr ) 8.82 - asm volatile ( "mov %0, %%db0" : : "r" (value) ); 8.83 + write_debugreg(0, value); 8.84 break; 8.85 case 1: 8.86 if ( !access_ok(value, sizeof(long)) ) 8.87 return -EPERM; 8.88 if ( v == curr ) 8.89 - asm volatile ( "mov %0, %%db1" : : "r" (value) ); 8.90 + write_debugreg(1, value); 8.91 break; 8.92 case 2: 8.93 if ( !access_ok(value, sizeof(long)) ) 8.94 return -EPERM; 8.95 if ( v == curr ) 8.96 - asm volatile ( "mov %0, %%db2" : : "r" (value) ); 8.97 + write_debugreg(2, value); 8.98 break; 8.99 case 3: 8.100 if ( !access_ok(value, sizeof(long)) ) 8.101 return -EPERM; 8.102 if ( v == curr ) 8.103 - asm volatile ( "mov %0, %%db3" : : "r" (value) ); 8.104 + write_debugreg(3, value); 8.105 break; 8.106 case 6: 8.107 /* 8.108 @@ -2778,7 +2772,7 @@ long set_debugreg(struct vcpu *v, int re 8.109 value &= 0xffffefff; /* reserved bits => 0 */ 8.110 value |= 0xffff0ff0; /* reserved bits => 1 */ 8.111 if ( v == curr ) 8.112 - asm volatile ( "mov %0, %%db6" : : "r" (value) ); 8.113 + write_debugreg(6, value); 8.114 break; 8.115 case 7: 8.116 /* 8.117 @@ -2797,9 +2791,22 @@ long set_debugreg(struct vcpu *v, int re 8.118 if ( (value & (1<<13)) != 0 ) return -EPERM; 8.119 for ( i = 0; i < 16; i += 2 ) 8.120 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM; 8.121 + /* 8.122 + * If DR7 was previously clear then we need to load all other 8.123 + * debug registers at this point as they were not restored during 8.124 + * context switch. 8.125 + */ 8.126 + if ( (v == curr) && (v->arch.guest_context.debugreg[7] == 0) ) 8.127 + { 8.128 + write_debugreg(0, v->arch.guest_context.debugreg[0]); 8.129 + write_debugreg(1, v->arch.guest_context.debugreg[1]); 8.130 + write_debugreg(2, v->arch.guest_context.debugreg[2]); 8.131 + write_debugreg(3, v->arch.guest_context.debugreg[3]); 8.132 + write_debugreg(6, v->arch.guest_context.debugreg[6]); 8.133 + } 8.134 } 8.135 - if ( v == current ) 8.136 - asm volatile ( "mov %0, %%db7" : : "r" (value) ); 8.137 + if ( v == curr ) 8.138 + write_debugreg(7, value); 8.139 break; 8.140 default: 8.141 return -EINVAL;
9.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h Thu Nov 01 10:30:12 2007 -0600 9.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Fri Nov 02 09:16:57 2007 +0000 9.3 @@ -151,13 +151,6 @@ enum DRInterceptBits 9.4 DR_INTERCEPT_DR15_WRITE = 1 << 31, 9.5 }; 9.6 9.7 -/* for lazy save/restore we'd like to intercept all DR writes */ 9.8 -#define DR_INTERCEPT_ALL_WRITES \ 9.9 - (DR_INTERCEPT_DR0_WRITE|DR_INTERCEPT_DR1_WRITE|DR_INTERCEPT_DR2_WRITE \ 9.10 - |DR_INTERCEPT_DR3_WRITE|DR_INTERCEPT_DR4_WRITE|DR_INTERCEPT_DR5_WRITE \ 9.11 - |DR_INTERCEPT_DR6_WRITE|DR_INTERCEPT_DR7_WRITE) 9.12 - 9.13 - 9.14 enum VMEXIT_EXITCODE 9.15 { 9.16 /* control register read exitcodes */
10.1 --- a/xen/include/asm-x86/processor.h Thu Nov 01 10:30:12 2007 -0600 10.2 +++ b/xen/include/asm-x86/processor.h Fri Nov 02 09:16:57 2007 +0000 10.3 @@ -481,6 +481,15 @@ long set_gdt(struct vcpu *d, 10.4 unsigned long *frames, 10.5 unsigned int entries); 10.6 10.7 +#define write_debugreg(reg, val) do { \ 10.8 + unsigned long __val = val; \ 10.9 + asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \ 10.10 +} while (0) 10.11 +#define read_debugreg(reg) ({ \ 10.12 + unsigned long __val; \ 10.13 + asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \ 10.14 + __val; \ 10.15 +}) 10.16 long set_debugreg(struct vcpu *p, int reg, unsigned long value); 10.17 10.18 struct microcode_header {