ia64/xen-unstable

changeset 16301:338f3c34e656

x86: Fix various problems with debug-register handling.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Thu Nov 01 16:16:25 2007 +0000 (2007-11-01)
parents adefbadab27c
children 7cd040290f82
files xen/arch/x86/acpi/suspend.c xen/arch/x86/domain.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/traps.c xen/include/asm-x86/hvm/svm/vmcb.h xen/include/asm-x86/processor.h
line diff
     1.1 --- a/xen/arch/x86/acpi/suspend.c	Thu Nov 01 10:56:56 2007 +0000
     1.2 +++ b/xen/arch/x86/acpi/suspend.c	Thu Nov 01 16:16:25 2007 +0000
     1.3 @@ -29,9 +29,6 @@ void save_rest_processor_state(void)
     1.4  #endif
     1.5  }
     1.6  
     1.7 -#define loaddebug(_v,_reg) \
     1.8 -    __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
     1.9 -
    1.10  void restore_rest_processor_state(void)
    1.11  {
    1.12      int cpu = smp_processor_id();
    1.13 @@ -54,15 +51,15 @@ void restore_rest_processor_state(void)
    1.14  #endif
    1.15  
    1.16      /* Maybe load the debug registers. */
    1.17 +    BUG_ON(is_hvm_vcpu(v));
    1.18      if ( !is_idle_vcpu(v) && unlikely(v->arch.guest_context.debugreg[7]) )
    1.19      {
    1.20 -        loaddebug(&v->arch.guest_context, 0);
    1.21 -        loaddebug(&v->arch.guest_context, 1);
    1.22 -        loaddebug(&v->arch.guest_context, 2);
    1.23 -        loaddebug(&v->arch.guest_context, 3);
    1.24 -        /* no 4 and 5 */
    1.25 -        loaddebug(&v->arch.guest_context, 6);
    1.26 -        loaddebug(&v->arch.guest_context, 7);
    1.27 +        write_debugreg(0, v->arch.guest_context.debugreg[0]);
    1.28 +        write_debugreg(1, v->arch.guest_context.debugreg[1]);
    1.29 +        write_debugreg(2, v->arch.guest_context.debugreg[2]);
    1.30 +        write_debugreg(3, v->arch.guest_context.debugreg[3]);
    1.31 +        write_debugreg(6, v->arch.guest_context.debugreg[6]);
    1.32 +        write_debugreg(7, v->arch.guest_context.debugreg[7]);
    1.33      }
    1.34  
    1.35      /* Reload FPU state on next FPU use. */
     2.1 --- a/xen/arch/x86/domain.c	Thu Nov 01 10:56:56 2007 +0000
     2.2 +++ b/xen/arch/x86/domain.c	Thu Nov 01 16:16:25 2007 +0000
     2.3 @@ -687,14 +687,14 @@ int arch_set_info_guest(
     2.4      v->arch.guest_context.ctrlreg[4] =
     2.5          (cr4 == 0) ? mmu_cr4_features : pv_guest_cr4_fixup(cr4);
     2.6  
     2.7 -    if ( v->is_initialised )
     2.8 -        goto out;
     2.9 -
    2.10      memset(v->arch.guest_context.debugreg, 0,
    2.11             sizeof(v->arch.guest_context.debugreg));
    2.12      for ( i = 0; i < 8; i++ )
    2.13          (void)set_debugreg(v, i, c(debugreg[i]));
    2.14  
    2.15 +    if ( v->is_initialised )
    2.16 +        goto out;
    2.17 +
    2.18      if ( v->vcpu_id == 0 )
    2.19          d->vm_assist = c(vm_assist);
    2.20  
    2.21 @@ -1210,6 +1210,15 @@ static inline void switch_kernel_stack(s
    2.22  static void paravirt_ctxt_switch_from(struct vcpu *v)
    2.23  {
    2.24      save_segments(v);
    2.25 +
    2.26 +    /*
    2.27 +     * Disable debug breakpoints. We do this aggressively because if we switch
    2.28 +     * to an HVM guest we may load DR0-DR3 with values that can cause #DE
    2.29 +     * inside Xen, before we get a chance to reload DR7, and this cannot always
    2.30 +     * safely be handled.
    2.31 +     */
    2.32 +    if ( unlikely(v->arch.guest_context.debugreg[7]) )
    2.33 +        write_debugreg(7, 0);
    2.34  }
    2.35  
    2.36  static void paravirt_ctxt_switch_to(struct vcpu *v)
    2.37 @@ -1219,10 +1228,17 @@ static void paravirt_ctxt_switch_to(stru
    2.38  
    2.39      if ( unlikely(read_cr4() != v->arch.guest_context.ctrlreg[4]) )
    2.40          write_cr4(v->arch.guest_context.ctrlreg[4]);
    2.41 -}
    2.42  
    2.43 -#define loaddebug(_v,_reg) \
    2.44 -    asm volatile ( "mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]) )
    2.45 +    if ( unlikely(v->arch.guest_context.debugreg[7]) )
    2.46 +    {
    2.47 +        write_debugreg(0, v->arch.guest_context.debugreg[0]);
    2.48 +        write_debugreg(1, v->arch.guest_context.debugreg[1]);
    2.49 +        write_debugreg(2, v->arch.guest_context.debugreg[2]);
    2.50 +        write_debugreg(3, v->arch.guest_context.debugreg[3]);
    2.51 +        write_debugreg(6, v->arch.guest_context.debugreg[6]);
    2.52 +        write_debugreg(7, v->arch.guest_context.debugreg[7]);
    2.53 +    }
    2.54 +}
    2.55  
    2.56  static void __context_switch(void)
    2.57  {
    2.58 @@ -1248,18 +1264,6 @@ static void __context_switch(void)
    2.59          memcpy(stack_regs,
    2.60                 &n->arch.guest_context.user_regs,
    2.61                 CTXT_SWITCH_STACK_BYTES);
    2.62 -
    2.63 -        /* Maybe switch the debug registers. */
    2.64 -        if ( unlikely(n->arch.guest_context.debugreg[7]) )
    2.65 -        {
    2.66 -            loaddebug(&n->arch.guest_context, 0);
    2.67 -            loaddebug(&n->arch.guest_context, 1);
    2.68 -            loaddebug(&n->arch.guest_context, 2);
    2.69 -            loaddebug(&n->arch.guest_context, 3);
    2.70 -            /* no 4 and 5 */
    2.71 -            loaddebug(&n->arch.guest_context, 6);
    2.72 -            loaddebug(&n->arch.guest_context, 7);
    2.73 -        }
    2.74          n->arch.ctxt_switch_to(n);
    2.75      }
    2.76  
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Nov 01 10:56:56 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Nov 01 16:16:25 2007 +0000
     3.3 @@ -137,12 +137,6 @@ static enum handler_return long_mode_do_
     3.4      return HNDL_done;
     3.5  }
     3.6  
     3.7 -
     3.8 -#define loaddebug(_v,_reg) \
     3.9 -    asm volatile ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
    3.10 -#define savedebug(_v,_reg) \
    3.11 -    asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
    3.12 -
    3.13  static void svm_save_dr(struct vcpu *v)
    3.14  {
    3.15      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.16 @@ -152,26 +146,45 @@ static void svm_save_dr(struct vcpu *v)
    3.17  
    3.18      /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
    3.19      v->arch.hvm_vcpu.flag_dr_dirty = 0;
    3.20 -    v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
    3.21 +    v->arch.hvm_svm.vmcb->dr_intercepts = ~0u;
    3.22  
    3.23 -    savedebug(&v->arch.guest_context, 0);
    3.24 -    savedebug(&v->arch.guest_context, 1);
    3.25 -    savedebug(&v->arch.guest_context, 2);
    3.26 -    savedebug(&v->arch.guest_context, 3);
    3.27 +    v->arch.guest_context.debugreg[0] = read_debugreg(0);
    3.28 +    v->arch.guest_context.debugreg[1] = read_debugreg(1);
    3.29 +    v->arch.guest_context.debugreg[2] = read_debugreg(2);
    3.30 +    v->arch.guest_context.debugreg[3] = read_debugreg(3);
    3.31      v->arch.guest_context.debugreg[6] = vmcb->dr6;
    3.32      v->arch.guest_context.debugreg[7] = vmcb->dr7;
    3.33  }
    3.34  
    3.35 -
    3.36  static void __restore_debug_registers(struct vcpu *v)
    3.37  {
    3.38 -    loaddebug(&v->arch.guest_context, 0);
    3.39 -    loaddebug(&v->arch.guest_context, 1);
    3.40 -    loaddebug(&v->arch.guest_context, 2);
    3.41 -    loaddebug(&v->arch.guest_context, 3);
    3.42 -    /* DR6 and DR7 are loaded from the VMCB. */
    3.43 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.44 +
    3.45 +    ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty);
    3.46 +    v->arch.hvm_vcpu.flag_dr_dirty = 1;
    3.47 +    vmcb->dr_intercepts = 0;
    3.48 +
    3.49 +    write_debugreg(0, v->arch.guest_context.debugreg[0]);
    3.50 +    write_debugreg(1, v->arch.guest_context.debugreg[1]);
    3.51 +    write_debugreg(2, v->arch.guest_context.debugreg[2]);
    3.52 +    write_debugreg(3, v->arch.guest_context.debugreg[3]);
    3.53 +    vmcb->dr6 = v->arch.guest_context.debugreg[6];
    3.54 +    vmcb->dr7 = v->arch.guest_context.debugreg[7];
    3.55  }
    3.56  
    3.57 +/*
    3.58 + * DR7 is saved and restored on every vmexit.  Other debug registers only
    3.59 + * need to be restored if their value is going to affect execution -- i.e.,
    3.60 + * if one of the breakpoints is enabled.  So mask out all bits that don't
    3.61 + * enable some breakpoint functionality.
    3.62 + */
    3.63 +#define DR7_ACTIVE_MASK 0xff
    3.64 +
    3.65 +static void svm_restore_dr(struct vcpu *v)
    3.66 +{
    3.67 +    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
    3.68 +        __restore_debug_registers(v);
    3.69 +}
    3.70  
    3.71  int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
    3.72  {
    3.73 @@ -351,9 +364,6 @@ int svm_vmcb_restore(struct vcpu *v, str
    3.74          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
    3.75      }
    3.76  
    3.77 -    vmcb->dr6 = c->dr6;
    3.78 -    vmcb->dr7 = c->dr7;
    3.79 -
    3.80      if ( c->pending_valid ) 
    3.81      {
    3.82          gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n",
    3.83 @@ -421,12 +431,6 @@ static int svm_load_vmcb_ctxt(struct vcp
    3.84      return 0;
    3.85  }
    3.86  
    3.87 -static void svm_restore_dr(struct vcpu *v)
    3.88 -{
    3.89 -    if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
    3.90 -        __restore_debug_registers(v);
    3.91 -}
    3.92 -
    3.93  static enum hvm_intblk svm_interrupt_blocked(
    3.94      struct vcpu *v, struct hvm_intack intack)
    3.95  {
    3.96 @@ -1147,16 +1151,8 @@ static void set_reg(
    3.97  
    3.98  static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
    3.99  {
   3.100 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   3.101 -
   3.102      HVMTRACE_0D(DR_WRITE, v);
   3.103 -
   3.104 -    v->arch.hvm_vcpu.flag_dr_dirty = 1;
   3.105 -
   3.106      __restore_debug_registers(v);
   3.107 -
   3.108 -    /* allow the guest full access to the debug registers */
   3.109 -    vmcb->dr_intercepts = 0;
   3.110  }
   3.111  
   3.112  
     4.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu Nov 01 10:56:56 2007 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu Nov 01 16:16:25 2007 +0000
     4.3 @@ -130,7 +130,7 @@ static int construct_vmcb(struct vcpu *v
     4.4          GENERAL2_INTERCEPT_SKINIT      | GENERAL2_INTERCEPT_RDTSCP;
     4.5  
     4.6      /* Intercept all debug-register writes. */
     4.7 -    vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
     4.8 +    vmcb->dr_intercepts = ~0u;
     4.9  
    4.10      /* Intercept all control-register accesses except for CR2 and CR8. */
    4.11      vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 01 10:56:56 2007 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 01 16:16:25 2007 +0000
     5.3 @@ -381,11 +381,6 @@ static enum handler_return long_mode_do_
     5.4  
     5.5  #endif /* __i386__ */
     5.6  
     5.7 -#define loaddebug(_v,_reg)  \
     5.8 -    __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
     5.9 -#define savedebug(_v,_reg)  \
    5.10 -    __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
    5.11 -
    5.12  static int vmx_guest_x86_mode(struct vcpu *v)
    5.13  {
    5.14      unsigned int cs_ar_bytes;
    5.15 @@ -411,25 +406,43 @@ static void vmx_save_dr(struct vcpu *v)
    5.16      v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
    5.17      __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
    5.18  
    5.19 -    savedebug(&v->arch.guest_context, 0);
    5.20 -    savedebug(&v->arch.guest_context, 1);
    5.21 -    savedebug(&v->arch.guest_context, 2);
    5.22 -    savedebug(&v->arch.guest_context, 3);
    5.23 -    savedebug(&v->arch.guest_context, 6);
    5.24 +    v->arch.guest_context.debugreg[0] = read_debugreg(0);
    5.25 +    v->arch.guest_context.debugreg[1] = read_debugreg(1);
    5.26 +    v->arch.guest_context.debugreg[2] = read_debugreg(2);
    5.27 +    v->arch.guest_context.debugreg[3] = read_debugreg(3);
    5.28 +    v->arch.guest_context.debugreg[6] = read_debugreg(6);
    5.29 +    /* DR7 must be saved as it is used by vmx_restore_dr(). */
    5.30      v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7);
    5.31  }
    5.32  
    5.33  static void __restore_debug_registers(struct vcpu *v)
    5.34  {
    5.35 -    loaddebug(&v->arch.guest_context, 0);
    5.36 -    loaddebug(&v->arch.guest_context, 1);
    5.37 -    loaddebug(&v->arch.guest_context, 2);
    5.38 -    loaddebug(&v->arch.guest_context, 3);
    5.39 -    /* No 4 and 5 */
    5.40 -    loaddebug(&v->arch.guest_context, 6);
    5.41 +    ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty);
    5.42 +    v->arch.hvm_vcpu.flag_dr_dirty = 1;
    5.43 +
    5.44 +    write_debugreg(0, v->arch.guest_context.debugreg[0]);
    5.45 +    write_debugreg(1, v->arch.guest_context.debugreg[1]);
    5.46 +    write_debugreg(2, v->arch.guest_context.debugreg[2]);
    5.47 +    write_debugreg(3, v->arch.guest_context.debugreg[3]);
    5.48 +    write_debugreg(6, v->arch.guest_context.debugreg[6]);
    5.49      /* DR7 is loaded from the VMCS. */
    5.50  }
    5.51  
    5.52 +/*
    5.53 + * DR7 is saved and restored on every vmexit.  Other debug registers only
    5.54 + * need to be restored if their value is going to affect execution -- i.e.,
    5.55 + * if one of the breakpoints is enabled.  So mask out all bits that don't
    5.56 + * enable some breakpoint functionality.
    5.57 + */
    5.58 +#define DR7_ACTIVE_MASK 0xff
    5.59 +
    5.60 +static void vmx_restore_dr(struct vcpu *v)
    5.61 +{
    5.62 +    /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
    5.63 +    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
    5.64 +        __restore_debug_registers(v);
    5.65 +}
    5.66 +
    5.67  void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
    5.68  {
    5.69      uint32_t ev;
    5.70 @@ -703,21 +716,6 @@ static int vmx_load_vmcs_ctxt(struct vcp
    5.71      return 0;
    5.72  }
    5.73  
    5.74 -/*
    5.75 - * DR7 is saved and restored on every vmexit.  Other debug registers only
    5.76 - * need to be restored if their value is going to affect execution -- i.e.,
    5.77 - * if one of the breakpoints is enabled.  So mask out all bits that don't
    5.78 - * enable some breakpoint functionality.
    5.79 - */
    5.80 -#define DR7_ACTIVE_MASK 0xff
    5.81 -
    5.82 -static void vmx_restore_dr(struct vcpu *v)
    5.83 -{
    5.84 -    /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
    5.85 -    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
    5.86 -        __restore_debug_registers(v);
    5.87 -}
    5.88 -
    5.89  static void vmx_ctxt_switch_from(struct vcpu *v)
    5.90  {
    5.91      vmx_save_guest_msrs(v);
    5.92 @@ -1322,15 +1320,12 @@ static void vmx_dr_access(unsigned long 
    5.93  
    5.94      HVMTRACE_0D(DR_WRITE, v);
    5.95  
    5.96 -    v->arch.hvm_vcpu.flag_dr_dirty = 1;
    5.97 -
    5.98 -    /* We could probably be smarter about this */
    5.99 -    __restore_debug_registers(v);
   5.100 +    if ( !v->arch.hvm_vcpu.flag_dr_dirty )
   5.101 +        __restore_debug_registers(v);
   5.102  
   5.103      /* Allow guest direct access to DR registers */
   5.104      v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
   5.105 -    __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   5.106 -              v->arch.hvm_vmx.exec_control);
   5.107 +    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
   5.108  }
   5.109  
   5.110  /*
     6.1 --- a/xen/arch/x86/traps.c	Thu Nov 01 10:56:56 2007 +0000
     6.2 +++ b/xen/arch/x86/traps.c	Thu Nov 01 16:16:25 2007 +0000
     6.3 @@ -2493,50 +2493,44 @@ asmlinkage int do_device_not_available(s
     6.4  
     6.5  asmlinkage int do_debug(struct cpu_user_regs *regs)
     6.6  {
     6.7 -    unsigned long condition;
     6.8      struct vcpu *v = current;
     6.9  
    6.10 -    asm volatile ( "mov %%db6,%0" : "=r" (condition) );
    6.11 -
    6.12 -    /* Mask out spurious debug traps due to lazy DR7 setting */
    6.13 -    if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
    6.14 -         (v->arch.guest_context.debugreg[7] == 0) )
    6.15 -    {
    6.16 -        asm volatile ( "mov %0,%%db7" : : "r" (0UL) );
    6.17 -        goto out;
    6.18 -    }
    6.19 -
    6.20      DEBUGGER_trap_entry(TRAP_debug, regs);
    6.21  
    6.22      if ( !guest_mode(regs) )
    6.23      {
    6.24 +        if ( regs->eflags & EF_TF )
    6.25 +        {
    6.26  #ifdef __x86_64__
    6.27 -        void sysenter_entry(void);
    6.28 -        void sysenter_eflags_saved(void);
    6.29 -        /* In SYSENTER entry path we cannot zap TF until EFLAGS is saved. */
    6.30 -        if ( (regs->rip >= (unsigned long)sysenter_entry) &&
    6.31 -             (regs->rip < (unsigned long)sysenter_eflags_saved) )
    6.32 -            goto out;
    6.33 -        WARN_ON(regs->rip != (unsigned long)sysenter_eflags_saved);
    6.34 +            void sysenter_entry(void);
    6.35 +            void sysenter_eflags_saved(void);
    6.36 +            /* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
    6.37 +            if ( (regs->rip >= (unsigned long)sysenter_entry) &&
    6.38 +                 (regs->rip < (unsigned long)sysenter_eflags_saved) )
    6.39 +                goto out;
    6.40 +            WARN_ON(regs->rip != (unsigned long)sysenter_eflags_saved);
    6.41  #else
    6.42 -        WARN_ON(1);
    6.43 +            WARN_ON(1);
    6.44  #endif
    6.45 -        /* Clear TF just for absolute sanity. */
    6.46 -        regs->eflags &= ~EF_TF;
    6.47 -        /*
    6.48 -         * We ignore watchpoints when they trigger within Xen. This may happen
    6.49 -         * when a buffer is passed to us which previously had a watchpoint set
    6.50 -         * on it. No need to bump EIP; the only faulting trap is an instruction
    6.51 -         * breakpoint, which can't happen to us.
    6.52 -         */
    6.53 +            regs->eflags &= ~EF_TF;
    6.54 +        }
    6.55 +        else
    6.56 +        {
    6.57 +            /*
    6.58 +             * We ignore watchpoints when they trigger within Xen. This may
    6.59 +             * happen when a buffer is passed to us which previously had a
    6.60 +             * watchpoint set on it. No need to bump EIP; the only faulting
    6.61 +             * trap is an instruction breakpoint, which can't happen to us.
    6.62 +             */
    6.63 +            WARN_ON(!search_exception_table(regs->eip));
    6.64 +        }
    6.65          goto out;
    6.66 -    } 
    6.67 +    }
    6.68  
    6.69      /* Save debug status register where guest OS can peek at it */
    6.70 -    v->arch.guest_context.debugreg[6] = condition;
    6.71 +    v->arch.guest_context.debugreg[6] = read_debugreg(6);
    6.72  
    6.73      ler_enable();
    6.74 -
    6.75      return do_guest_trap(TRAP_debug, regs, 0);
    6.76  
    6.77   out:
    6.78 @@ -2750,25 +2744,25 @@ long set_debugreg(struct vcpu *v, int re
    6.79          if ( !access_ok(value, sizeof(long)) )
    6.80              return -EPERM;
    6.81          if ( v == curr ) 
    6.82 -            asm volatile ( "mov %0, %%db0" : : "r" (value) );
    6.83 +            write_debugreg(0, value);
    6.84          break;
    6.85      case 1: 
    6.86          if ( !access_ok(value, sizeof(long)) )
    6.87              return -EPERM;
    6.88          if ( v == curr ) 
    6.89 -            asm volatile ( "mov %0, %%db1" : : "r" (value) );
    6.90 +            write_debugreg(1, value);
    6.91          break;
    6.92      case 2: 
    6.93          if ( !access_ok(value, sizeof(long)) )
    6.94              return -EPERM;
    6.95          if ( v == curr ) 
    6.96 -            asm volatile ( "mov %0, %%db2" : : "r" (value) );
    6.97 +            write_debugreg(2, value);
    6.98          break;
    6.99      case 3:
   6.100          if ( !access_ok(value, sizeof(long)) )
   6.101              return -EPERM;
   6.102          if ( v == curr ) 
   6.103 -            asm volatile ( "mov %0, %%db3" : : "r" (value) );
   6.104 +            write_debugreg(3, value);
   6.105          break;
   6.106      case 6:
   6.107          /*
   6.108 @@ -2778,7 +2772,7 @@ long set_debugreg(struct vcpu *v, int re
   6.109          value &= 0xffffefff; /* reserved bits => 0 */
   6.110          value |= 0xffff0ff0; /* reserved bits => 1 */
   6.111          if ( v == curr ) 
   6.112 -            asm volatile ( "mov %0, %%db6" : : "r" (value) );
   6.113 +            write_debugreg(6, value);
   6.114          break;
   6.115      case 7:
   6.116          /*
   6.117 @@ -2797,9 +2791,22 @@ long set_debugreg(struct vcpu *v, int re
   6.118              if ( (value & (1<<13)) != 0 ) return -EPERM;
   6.119              for ( i = 0; i < 16; i += 2 )
   6.120                  if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
   6.121 +            /*
   6.122 +             * If DR7 was previously clear then we need to load all other
   6.123 +             * debug registers at this point as they were not restored during
   6.124 +             * context switch.
   6.125 +             */
   6.126 +            if ( (v == curr) && (v->arch.guest_context.debugreg[7] == 0) )
   6.127 +            {
   6.128 +                write_debugreg(0, v->arch.guest_context.debugreg[0]);
   6.129 +                write_debugreg(1, v->arch.guest_context.debugreg[1]);
   6.130 +                write_debugreg(2, v->arch.guest_context.debugreg[2]);
   6.131 +                write_debugreg(3, v->arch.guest_context.debugreg[3]);
   6.132 +                write_debugreg(6, v->arch.guest_context.debugreg[6]);
   6.133 +            }
   6.134          }
   6.135 -        if ( v == current ) 
   6.136 -            asm volatile ( "mov %0, %%db7" : : "r" (value) );
   6.137 +        if ( v == curr ) 
   6.138 +            write_debugreg(7, value);
   6.139          break;
   6.140      default:
   6.141          return -EINVAL;
     7.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Thu Nov 01 10:56:56 2007 +0000
     7.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Thu Nov 01 16:16:25 2007 +0000
     7.3 @@ -151,13 +151,6 @@ enum DRInterceptBits
     7.4      DR_INTERCEPT_DR15_WRITE = 1 << 31,
     7.5  };
     7.6  
     7.7 -/* for lazy save/restore we'd like to intercept all DR writes */
     7.8 -#define DR_INTERCEPT_ALL_WRITES \
     7.9 -    (DR_INTERCEPT_DR0_WRITE|DR_INTERCEPT_DR1_WRITE|DR_INTERCEPT_DR2_WRITE \
    7.10 -    |DR_INTERCEPT_DR3_WRITE|DR_INTERCEPT_DR4_WRITE|DR_INTERCEPT_DR5_WRITE \
    7.11 -    |DR_INTERCEPT_DR6_WRITE|DR_INTERCEPT_DR7_WRITE) 
    7.12 -
    7.13 -
    7.14  enum VMEXIT_EXITCODE
    7.15  {
    7.16      /* control register read exitcodes */
     8.1 --- a/xen/include/asm-x86/processor.h	Thu Nov 01 10:56:56 2007 +0000
     8.2 +++ b/xen/include/asm-x86/processor.h	Thu Nov 01 16:16:25 2007 +0000
     8.3 @@ -481,6 +481,15 @@ long set_gdt(struct vcpu *d,
     8.4               unsigned long *frames, 
     8.5               unsigned int entries);
     8.6  
     8.7 +#define write_debugreg(reg, val) do {                       \
     8.8 +    unsigned long __val = val;                              \
     8.9 +    asm volatile ( "mov %0,%%db" #reg : : "r" (__val) );    \
    8.10 +} while (0)
    8.11 +#define read_debugreg(reg) ({                               \
    8.12 +    unsigned long __val;                                    \
    8.13 +    asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) );  \
    8.14 +    __val;                                                  \
    8.15 +})
    8.16  long set_debugreg(struct vcpu *p, int reg, unsigned long value);
    8.17  
    8.18  struct microcode_header {