ia64/xen-unstable

changeset 16428:69b56d3289f5

x86: emulate I/O port access breakpoints

Emulate the trapping on I/O port accesses when emulating IN/OUT.

Also allow 8-byte breakpoints on x86-64 (and on i686 if the hardware
supports them), and tighten the condition for loading debug registers
during context switch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Nov 22 19:23:40 2007 +0000 (2007-11-22)
parents fd3f6d814f6d
children f2711b7eae95
files xen/arch/x86/domain.c xen/arch/x86/domctl.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/traps.c xen/include/asm-x86/debugreg.h xen/include/asm-x86/domain.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu Nov 22 18:28:47 2007 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Thu Nov 22 19:23:40 2007 +0000
     1.3 @@ -42,6 +42,7 @@
     1.4  #include <asm/hypercall.h>
     1.5  #include <asm/hvm/hvm.h>
     1.6  #include <asm/hvm/support.h>
     1.7 +#include <asm/debugreg.h>
     1.8  #include <asm/msr.h>
     1.9  #include <asm/nmi.h>
    1.10  #include <asm/iommu.h>
    1.11 @@ -583,7 +584,7 @@ unsigned long pv_guest_cr4_fixup(unsigne
    1.12      if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) )
    1.13          gdprintk(XENLOG_WARNING,
    1.14                   "Attempt to change CR4 flags %08lx -> %08lx\n",
    1.15 -                 hv_cr4 & ~(X86_CR4_PGE|X86_CR4_PSE), guest_cr4);
    1.16 +                 hv_cr4, guest_cr4);
    1.17  
    1.18      return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask);
    1.19  }
    1.20 @@ -1219,7 +1220,7 @@ static void paravirt_ctxt_switch_from(st
    1.21       * inside Xen, before we get a chance to reload DR7, and this cannot always
    1.22       * safely be handled.
    1.23       */
    1.24 -    if ( unlikely(v->arch.guest_context.debugreg[7]) )
    1.25 +    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
    1.26          write_debugreg(7, 0);
    1.27  }
    1.28  
    1.29 @@ -1234,7 +1235,7 @@ static void paravirt_ctxt_switch_to(stru
    1.30      if ( unlikely(cr4 != read_cr4()) )
    1.31          write_cr4(cr4);
    1.32  
    1.33 -    if ( unlikely(v->arch.guest_context.debugreg[7]) )
    1.34 +    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
    1.35      {
    1.36          write_debugreg(0, v->arch.guest_context.debugreg[0]);
    1.37          write_debugreg(1, v->arch.guest_context.debugreg[1]);
     2.1 --- a/xen/arch/x86/domctl.c	Thu Nov 22 18:28:47 2007 +0000
     2.2 +++ b/xen/arch/x86/domctl.c	Thu Nov 22 19:23:40 2007 +0000
     2.3 @@ -825,12 +825,20 @@ void arch_get_info_guest(struct vcpu *v,
     2.4                  c.nat->ctrlreg[1] = xen_pfn_to_cr3(
     2.5                      pagetable_get_pfn(v->arch.guest_table_user));
     2.6  #endif
     2.7 +
     2.8 +            /* Merge shadow DR7 bits into real DR7. */
     2.9 +            c.nat->debugreg[7] |= c.nat->debugreg[5];
    2.10 +            c.nat->debugreg[5] = 0;
    2.11          }
    2.12  #ifdef CONFIG_COMPAT
    2.13          else
    2.14          {
    2.15              l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
    2.16              c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
    2.17 +
    2.18 +            /* Merge shadow DR7 bits into real DR7. */
    2.19 +            c.cmp->debugreg[7] |= c.cmp->debugreg[5];
    2.20 +            c.cmp->debugreg[5] = 0;
    2.21          }
    2.22  #endif
    2.23  
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Nov 22 18:28:47 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Nov 22 19:23:40 2007 +0000
     3.3 @@ -34,6 +34,7 @@
     3.4  #include <asm/cpufeature.h>
     3.5  #include <asm/processor.h>
     3.6  #include <asm/types.h>
     3.7 +#include <asm/debugreg.h>
     3.8  #include <asm/msr.h>
     3.9  #include <asm/spinlock.h>
    3.10  #include <asm/hvm/hvm.h>
    3.11 @@ -176,8 +177,6 @@ static void __restore_debug_registers(st
    3.12   * if one of the breakpoints is enabled.  So mask out all bits that don't
    3.13   * enable some breakpoint functionality.
    3.14   */
    3.15 -#define DR7_ACTIVE_MASK 0xff
    3.16 -
    3.17  static void svm_restore_dr(struct vcpu *v)
    3.18  {
    3.19      if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
     4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 22 18:28:47 2007 +0000
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 22 19:23:40 2007 +0000
     4.3 @@ -32,6 +32,7 @@
     4.4  #include <asm/cpufeature.h>
     4.5  #include <asm/processor.h>
     4.6  #include <asm/types.h>
     4.7 +#include <asm/debugreg.h>
     4.8  #include <asm/msr.h>
     4.9  #include <asm/spinlock.h>
    4.10  #include <asm/paging.h>
    4.11 @@ -435,8 +436,6 @@ static void __restore_debug_registers(st
    4.12   * if one of the breakpoints is enabled.  So mask out all bits that don't
    4.13   * enable some breakpoint functionality.
    4.14   */
    4.15 -#define DR7_ACTIVE_MASK 0xff
    4.16 -
    4.17  static void vmx_restore_dr(struct vcpu *v)
    4.18  {
    4.19      /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
     5.1 --- a/xen/arch/x86/traps.c	Thu Nov 22 18:28:47 2007 +0000
     5.2 +++ b/xen/arch/x86/traps.c	Thu Nov 22 19:23:40 2007 +0000
     5.3 @@ -414,17 +414,55 @@ static int do_guest_trap(
     5.4      return 0;
     5.5  }
     5.6  
     5.7 -static void instruction_done(struct cpu_user_regs *regs, unsigned long eip)
     5.8 +static void instruction_done(
     5.9 +    struct cpu_user_regs *regs, unsigned long eip, unsigned int bpmatch)
    5.10  {
    5.11      regs->eip = eip;
    5.12      regs->eflags &= ~X86_EFLAGS_RF;
    5.13 -    if ( regs->eflags & X86_EFLAGS_TF )
    5.14 +    if ( bpmatch || (regs->eflags & X86_EFLAGS_TF) )
    5.15      {
    5.16 -        current->arch.guest_context.debugreg[6] |= 0xffff4ff0;
    5.17 +        current->arch.guest_context.debugreg[6] |= bpmatch | 0xffff0ff0;
    5.18 +        if ( regs->eflags & X86_EFLAGS_TF )
    5.19 +            current->arch.guest_context.debugreg[6] |= 0x4000;
    5.20          do_guest_trap(TRAP_debug, regs, 0);
    5.21      }
    5.22  }
    5.23  
    5.24 +static unsigned int check_guest_io_breakpoint(struct vcpu *v,
    5.25 +    unsigned int port, unsigned int len)
    5.26 +{
    5.27 +    unsigned int width, i, match = 0;
    5.28 +    unsigned long start;
    5.29 +
    5.30 +    if ( !(v->arch.guest_context.debugreg[5]) || 
    5.31 +         !(v->arch.guest_context.ctrlreg[4] & X86_CR4_DE) )
    5.32 +        return 0;
    5.33 +
    5.34 +    for ( i = 0; i < 4; i++ )
    5.35 +    {
    5.36 +        if ( !(v->arch.guest_context.debugreg[5] &
    5.37 +               (3 << (i * DR_ENABLE_SIZE))) )
    5.38 +            continue;
    5.39 +
    5.40 +        start = v->arch.guest_context.debugreg[i];
    5.41 +        width = 0;
    5.42 +
    5.43 +        switch ( (v->arch.guest_context.debugreg[7] >>
    5.44 +                  (DR_CONTROL_SHIFT + i * DR_CONTROL_SIZE)) & 0xc )
    5.45 +        {
    5.46 +        case DR_LEN_1: width = 1; break;
    5.47 +        case DR_LEN_2: width = 2; break;
    5.48 +        case DR_LEN_4: width = 4; break;
    5.49 +        case DR_LEN_8: width = 8; break;
    5.50 +        }
    5.51 +
    5.52 +        if ( (start < (port + len)) && ((start + width) > port) )
    5.53 +            match |= 1 << i;
    5.54 +    }
    5.55 +
    5.56 +    return match;
    5.57 +}
    5.58 +
    5.59  /*
    5.60   * Called from asm to set up the NMI trapbounce info.
    5.61   * Returns 0 if no callback is set up, else 1.
    5.62 @@ -639,7 +677,6 @@ static int emulate_forced_invalid_op(str
    5.63      {
    5.64          /* Modify Feature Information. */
    5.65          clear_bit(X86_FEATURE_VME, &d);
    5.66 -        clear_bit(X86_FEATURE_DE,  &d);
    5.67          clear_bit(X86_FEATURE_PSE, &d);
    5.68          clear_bit(X86_FEATURE_PGE, &d);
    5.69          if ( !cpu_has_sep )
    5.70 @@ -669,7 +706,7 @@ static int emulate_forced_invalid_op(str
    5.71      regs->ecx = c;
    5.72      regs->edx = d;
    5.73  
    5.74 -    instruction_done(regs, eip);
    5.75 +    instruction_done(regs, eip, 0);
    5.76  
    5.77      trace_trap_one_addr(TRC_PV_FORCED_INVALID_OP, regs->eip);
    5.78  
    5.79 @@ -1329,7 +1366,7 @@ static int emulate_privileged_op(struct 
    5.80      unsigned long *reg, eip = regs->eip, res;
    5.81      u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, lock = 0, rex = 0;
    5.82      enum { lm_seg_none, lm_seg_fs, lm_seg_gs } lm_ovr = lm_seg_none;
    5.83 -    unsigned int port, i, data_sel, ar, data, rc;
    5.84 +    unsigned int port, i, data_sel, ar, data, rc, bpmatch = 0;
    5.85      unsigned int op_bytes, op_default, ad_bytes, ad_default;
    5.86  #define rd_ad(reg) (ad_bytes >= sizeof(regs->reg) \
    5.87                      ? regs->reg \
    5.88 @@ -1479,6 +1516,8 @@ static int emulate_privileged_op(struct 
    5.89          }
    5.90  #endif
    5.91  
    5.92 +        port = (u16)regs->edx;
    5.93 +
    5.94      continue_io_string:
    5.95          switch ( opcode )
    5.96          {
    5.97 @@ -1487,9 +1526,8 @@ static int emulate_privileged_op(struct 
    5.98          case 0x6d: /* INSW/INSL */
    5.99              if ( data_limit < op_bytes - 1 ||
   5.100                   rd_ad(edi) > data_limit - (op_bytes - 1) ||
   5.101 -                 !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
   5.102 +                 !guest_io_okay(port, op_bytes, v, regs) )
   5.103                  goto fail;
   5.104 -            port = (u16)regs->edx;
   5.105              switch ( op_bytes )
   5.106              {
   5.107              case 1:
   5.108 @@ -1519,7 +1557,7 @@ static int emulate_privileged_op(struct 
   5.109          case 0x6f: /* OUTSW/OUTSL */
   5.110              if ( data_limit < op_bytes - 1 ||
   5.111                   rd_ad(esi) > data_limit - (op_bytes - 1) ||
   5.112 -                 !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
   5.113 +                 !guest_io_okay(port, op_bytes, v, regs) )
   5.114                  goto fail;
   5.115              rc = copy_from_user(&data, (void *)data_base + rd_ad(esi), op_bytes);
   5.116              if ( rc != 0 )
   5.117 @@ -1527,7 +1565,6 @@ static int emulate_privileged_op(struct 
   5.118                  propagate_page_fault(data_base + rd_ad(esi) + op_bytes - rc, 0);
   5.119                  return EXCRET_fault_fixed;
   5.120              }
   5.121 -            port = (u16)regs->edx;
   5.122              switch ( op_bytes )
   5.123              {
   5.124              case 1:
   5.125 @@ -1553,9 +1590,11 @@ static int emulate_privileged_op(struct 
   5.126              break;
   5.127          }
   5.128  
   5.129 +        bpmatch = check_guest_io_breakpoint(v, port, op_bytes);
   5.130 +
   5.131          if ( rep_prefix && (wr_ad(ecx, regs->ecx - 1) != 0) )
   5.132          {
   5.133 -            if ( !hypercall_preempt_check() )
   5.134 +            if ( !bpmatch && !hypercall_preempt_check() )
   5.135                  goto continue_io_string;
   5.136              eip = regs->eip;
   5.137          }
   5.138 @@ -1634,6 +1673,7 @@ static int emulate_privileged_op(struct 
   5.139                  regs->eax = (u32)~0;
   5.140              break;
   5.141          }
   5.142 +        bpmatch = check_guest_io_breakpoint(v, port, op_bytes);
   5.143          goto done;
   5.144  
   5.145      case 0xec: /* IN %dx,%al */
   5.146 @@ -1671,6 +1711,7 @@ static int emulate_privileged_op(struct 
   5.147                  io_emul(regs);
   5.148              break;
   5.149          }
   5.150 +        bpmatch = check_guest_io_breakpoint(v, port, op_bytes);
   5.151          goto done;
   5.152  
   5.153      case 0xee: /* OUT %al,%dx */
   5.154 @@ -1964,7 +2005,7 @@ static int emulate_privileged_op(struct 
   5.155  #undef rd_ad
   5.156  
   5.157   done:
   5.158 -    instruction_done(regs, eip);
   5.159 +    instruction_done(regs, eip, bpmatch);
   5.160      return EXCRET_fault_fixed;
   5.161  
   5.162   fail:
   5.163 @@ -2295,7 +2336,7 @@ static int emulate_gate_op(struct cpu_us
   5.164          sel |= (regs->cs & 3);
   5.165  
   5.166      regs->cs = sel;
   5.167 -    instruction_done(regs, off);
   5.168 +    instruction_done(regs, off, 0);
   5.169  #endif
   5.170  
   5.171      return 0;
   5.172 @@ -2805,25 +2846,47 @@ long set_debugreg(struct vcpu *v, int re
   5.173          /*
   5.174           * DR7: Bit 10 reserved (set to 1).
   5.175           *      Bits 11-12,14-15 reserved (set to 0).
   5.176 +         */
   5.177 +        value &= ~DR_CONTROL_RESERVED_ZERO; /* reserved bits => 0 */
   5.178 +        value |=  DR_CONTROL_RESERVED_ONE;  /* reserved bits => 1 */
   5.179 +        /*
   5.180           * Privileged bits:
   5.181           *      GD (bit 13): must be 0.
   5.182 -         *      R/Wn (bits 16-17,20-21,24-25,28-29): mustn't be 10.
   5.183 -         *      LENn (bits 18-19,22-23,26-27,30-31): mustn't be 10.
   5.184           */
   5.185 -        /* DR7 == 0 => debugging disabled for this domain. */
   5.186 -        if ( value != 0 )
   5.187 +        if ( value & DR_GENERAL_DETECT )
   5.188 +            return -EPERM;
   5.189 +        /* DR7.{G,L}E = 0 => debugging disabled for this domain. */
   5.190 +        if ( value & DR7_ACTIVE_MASK )
   5.191          {
   5.192 -            value &= 0xffff27ff; /* reserved bits => 0 */
   5.193 -            value |= 0x00000400; /* reserved bits => 1 */
   5.194 -            if ( (value & (1<<13)) != 0 ) return -EPERM;
   5.195 -            for ( i = 0; i < 16; i += 2 )
   5.196 -                if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
   5.197 +            unsigned int io_enable = 0;
   5.198 +
   5.199 +            for ( i = DR_CONTROL_SHIFT; i < 32; i += DR_CONTROL_SIZE )
   5.200 +            {
   5.201 +                if ( ((value >> i) & 3) == DR_IO )
   5.202 +                {
   5.203 +                    if ( !(v->arch.guest_context.ctrlreg[4] & X86_CR4_DE) )
   5.204 +                        return -EPERM;
   5.205 +                    io_enable |= value & (3 << ((i - 16) >> 1));
   5.206 +                }
   5.207 +#ifdef __i386__
   5.208 +                if ( ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ||
   5.209 +                      !boot_cpu_has(X86_FEATURE_LM)) &&
   5.210 +                     (((value >> i) & 0xc) == DR_LEN_8) )
   5.211 +                    return -EPERM;
   5.212 +#endif
   5.213 +            }
   5.214 +
   5.215 +            /* Guest DR5 is a handy stash for I/O intercept information. */
   5.216 +            v->arch.guest_context.debugreg[5] = io_enable;
   5.217 +            value &= ~io_enable;
   5.218 +
   5.219              /*
   5.220               * If DR7 was previously clear then we need to load all other
   5.221               * debug registers at this point as they were not restored during
   5.222               * context switch.
   5.223               */
   5.224 -            if ( (v == curr) && (v->arch.guest_context.debugreg[7] == 0) )
   5.225 +            if ( (v == curr) &&
   5.226 +                 !(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
   5.227              {
   5.228                  write_debugreg(0, v->arch.guest_context.debugreg[0]);
   5.229                  write_debugreg(1, v->arch.guest_context.debugreg[1]);
   5.230 @@ -2832,7 +2895,7 @@ long set_debugreg(struct vcpu *v, int re
   5.231                  write_debugreg(6, v->arch.guest_context.debugreg[6]);
   5.232              }
   5.233          }
   5.234 -        if ( v == curr ) 
   5.235 +        if ( v == curr )
   5.236              write_debugreg(7, value);
   5.237          break;
   5.238      default:
   5.239 @@ -2850,8 +2913,22 @@ long do_set_debugreg(int reg, unsigned l
   5.240  
   5.241  unsigned long do_get_debugreg(int reg)
   5.242  {
   5.243 -    if ( (reg < 0) || (reg > 7) ) return -EINVAL;
   5.244 -    return current->arch.guest_context.debugreg[reg];
   5.245 +    struct vcpu *curr = current;
   5.246 +
   5.247 +    switch ( reg )
   5.248 +    {
   5.249 +    case 0 ... 3:
   5.250 +    case 6:
   5.251 +        return curr->arch.guest_context.debugreg[reg];
   5.252 +    case 7:
   5.253 +        return (curr->arch.guest_context.debugreg[7] |
   5.254 +                curr->arch.guest_context.debugreg[5]);
   5.255 +    case 4 ... 5:
   5.256 +        return ((curr->arch.guest_context.ctrlreg[4] & X86_CR4_DE) ?
   5.257 +                curr->arch.guest_context.debugreg[reg + 2] : 0);
   5.258 +    }
   5.259 +
   5.260 +    return -EINVAL;
   5.261  }
   5.262  
   5.263  /*
     6.1 --- a/xen/include/asm-x86/debugreg.h	Thu Nov 22 18:28:47 2007 +0000
     6.2 +++ b/xen/include/asm-x86/debugreg.h	Thu Nov 22 19:23:40 2007 +0000
     6.3 @@ -4,23 +4,22 @@
     6.4  
     6.5  /* Indicate the register numbers for a number of the specific
     6.6     debug registers.  Registers 0-3 contain the addresses we wish to trap on */
     6.7 -#define DR_FIRSTADDR 0        /* u_debugreg[DR_FIRSTADDR] */
     6.8 -#define DR_LASTADDR 3         /* u_debugreg[DR_LASTADDR]  */
     6.9  
    6.10 -#define DR_STATUS 6           /* u_debugreg[DR_STATUS]     */
    6.11 -#define DR_CONTROL 7          /* u_debugreg[DR_CONTROL] */
    6.12 +#define DR_FIRSTADDR 0
    6.13 +#define DR_LASTADDR  3
    6.14 +#define DR_STATUS    6
    6.15 +#define DR_CONTROL   7
    6.16  
    6.17  /* Define a few things for the status register.  We can use this to determine
    6.18     which debugging register was responsible for the trap.  The other bits
    6.19     are either reserved or not of interest to us. */
    6.20  
    6.21 -#define DR_TRAP0	(0x1)		/* db0 */
    6.22 -#define DR_TRAP1	(0x2)		/* db1 */
    6.23 -#define DR_TRAP2	(0x4)		/* db2 */
    6.24 -#define DR_TRAP3	(0x8)		/* db3 */
    6.25 -
    6.26 -#define DR_STEP		(0x4000)	/* single-step */
    6.27 -#define DR_SWITCH	(0x8000)	/* task switch */
    6.28 +#define DR_TRAP0        (0x1)           /* db0 */
    6.29 +#define DR_TRAP1        (0x2)           /* db1 */
    6.30 +#define DR_TRAP2        (0x4)           /* db2 */
    6.31 +#define DR_TRAP3        (0x8)           /* db3 */
    6.32 +#define DR_STEP         (0x4000)        /* single-step */
    6.33 +#define DR_SWITCH       (0x8000)        /* task switch */
    6.34  
    6.35  /* Now define a bunch of things for manipulating the control register.
    6.36     The top two bytes of the control register consist of 4 fields of 4
    6.37 @@ -29,36 +28,40 @@
    6.38     field is that we are looking at */
    6.39  
    6.40  #define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
    6.41 -#define DR_CONTROL_SIZE 4   /* 4 control bits per register */
    6.42 +#define DR_CONTROL_SIZE   4 /* 4 control bits per register */
    6.43  
    6.44 -#define DR_RW_EXECUTE (0x0)   /* Settings for the access types to trap on */
    6.45 -#define DR_RW_WRITE (0x1)
    6.46 -#define DR_RW_READ (0x3)
    6.47 +#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
    6.48 +#define DR_RW_WRITE   (0x1)
    6.49 +#define DR_IO         (0x2)
    6.50 +#define DR_RW_READ    (0x3)
    6.51  
    6.52 -#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
    6.53 -#define DR_LEN_2 (0x4)
    6.54 -#define DR_LEN_4 (0xC)
    6.55 +#define DR_LEN_1      (0x0) /* Settings for data length to trap on */
    6.56 +#define DR_LEN_2      (0x4)
    6.57 +#define DR_LEN_4      (0xC)
    6.58 +#define DR_LEN_8      (0x8)
    6.59  
    6.60  /* The low byte to the control register determine which registers are
    6.61     enabled.  There are 4 fields of two bits.  One bit is "local", meaning
    6.62     that the processor will reset the bit after a task switch and the other
    6.63 -   is global meaning that we have to explicitly reset the bit.  With linux,
    6.64 -   you can use either one, since we explicitly zero the register when we enter
    6.65 -   kernel mode. */
    6.66 +   is global meaning that we have to explicitly reset the bit. */
    6.67  
    6.68 -#define DR_LOCAL_ENABLE_SHIFT 0    /* Extra shift to the local enable bit */
    6.69 +#define DR_LOCAL_ENABLE_SHIFT  0   /* Extra shift to the local enable bit */
    6.70  #define DR_GLOBAL_ENABLE_SHIFT 1   /* Extra shift to the global enable bit */
    6.71 -#define DR_ENABLE_SIZE 2           /* 2 enable bits per register */
    6.72 +#define DR_ENABLE_SIZE         2   /* 2 enable bits per register */
    6.73  
    6.74  #define DR_LOCAL_ENABLE_MASK (0x55)  /* Set  local bits for all 4 regs */
    6.75  #define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
    6.76  
    6.77 +#define DR7_ACTIVE_MASK (DR_LOCAL_ENABLE_MASK|DR_GLOBAL_ENABLE_MASK)
    6.78 +
    6.79  /* The second byte to the control register has a few special things.
    6.80     We can slow the instruction pipeline for instructions coming via the
    6.81     gdt or the ldt if we want to.  I am not sure why this is an advantage */
    6.82  
    6.83 -#define DR_CONTROL_RESERVED (~0xFFFF03FFUL) /* Reserved by Intel */
    6.84 -#define DR_LOCAL_SLOWDOWN (0x100)   /* Local slow the pipeline */
    6.85 -#define DR_GLOBAL_SLOWDOWN (0x200)  /* Global slow the pipeline */
    6.86 +#define DR_CONTROL_RESERVED_ZERO (0x0000d800ul) /* Reserved, read as zero */
    6.87 +#define DR_CONTROL_RESERVED_ONE  (0x00000400ul) /* Reserved, read as one */
    6.88 +#define DR_LOCAL_EXACT_ENABLE    (0x00000100ul) /* Local exact enable */
    6.89 +#define DR_GLOBAL_EXACT_ENABLE   (0x00000200ul) /* Global exact enable */
    6.90 +#define DR_GENERAL_DETECT        (0x00002000ul) /* General detect enable */
    6.91  
    6.92  #endif /* _X86_DEBUGREG_H */
     7.1 --- a/xen/include/asm-x86/domain.h	Thu Nov 22 18:28:47 2007 +0000
     7.2 +++ b/xen/include/asm-x86/domain.h	Thu Nov 22 19:23:40 2007 +0000
     7.3 @@ -356,7 +356,7 @@ unsigned long pv_guest_cr4_fixup(unsigne
     7.4  
     7.5  /* Convert between guest-visible and real CR4 values. */
     7.6  #define pv_guest_cr4_to_real_cr4(c) \
     7.7 -    ((c) | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE)))
     7.8 +    (((c) | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE))) & ~X86_CR4_DE)
     7.9  #define real_cr4_to_pv_guest_cr4(c) \
    7.10      ((c) & ~(X86_CR4_PGE | X86_CR4_PSE))
    7.11