ia64/xen-unstable

changeset 15735:9ef1c3e6c48e

hvm: Handle hw_cr[] array a bit more sanely.
SVM for the most part does not need to use it at all, and this makes
the code clearer.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 08 17:28:13 2007 +0100 (2007-08-08)
parents 25e5c1b9faad
children 484848f240e8
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vcpu.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Aug 08 16:09:17 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Aug 08 17:28:13 2007 +0100
     1.3 @@ -596,9 +596,6 @@ int hvm_set_cr0(unsigned long value)
     1.4      }
     1.5  
     1.6      v->arch.hvm_vcpu.guest_cr[0] = value;
     1.7 -    v->arch.hvm_vcpu.hw_cr[0] = value;
     1.8 -    if ( !paging_mode_hap(v->domain) )
     1.9 -        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_PG | X86_CR0_WP;
    1.10      hvm_update_guest_cr(v, 0);
    1.11  
    1.12      if ( (value ^ old_value) & X86_CR0_PG )
    1.13 @@ -672,10 +669,6 @@ int hvm_set_cr4(unsigned long value)
    1.14  
    1.15      old_cr = v->arch.hvm_vcpu.guest_cr[4];
    1.16      v->arch.hvm_vcpu.guest_cr[4] = value;
    1.17 -    v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
    1.18 -    if ( paging_mode_hap(v->domain) )
    1.19 -        v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
    1.20 -    v->arch.hvm_vcpu.hw_cr[4] |= value;
    1.21      hvm_update_guest_cr(v, 4);
    1.22    
    1.23      /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Aug 08 16:09:17 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Aug 08 17:28:13 2007 +0100
     2.3 @@ -59,8 +59,9 @@ int inst_copy_from_guest(unsigned char *
     2.4                           int inst_len);
     2.5  asmlinkage void do_IRQ(struct cpu_user_regs *);
     2.6  
     2.7 -static int svm_reset_to_realmode(struct vcpu *v,
     2.8 -                                 struct cpu_user_regs *regs);
     2.9 +static int svm_reset_to_realmode(
    2.10 +    struct vcpu *v, struct cpu_user_regs *regs);
    2.11 +static void svm_update_guest_cr(struct vcpu *v, unsigned int cr);
    2.12  
    2.13  /* va of hardware host save area     */
    2.14  static void *hsa[NR_CPUS] __read_mostly;
    2.15 @@ -343,48 +344,26 @@ int svm_vmcb_restore(struct vcpu *v, str
    2.16      vmcb->rsp    = c->rsp;
    2.17      vmcb->rflags = c->rflags;
    2.18  
    2.19 -    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
    2.20 -    vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = 
    2.21 -        c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
    2.22 +    v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
    2.23 +    svm_update_guest_cr(v, 0);
    2.24  
    2.25      v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
    2.26 +    svm_update_guest_cr(v, 2);
    2.27  
    2.28 +    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
    2.29 +    svm_update_guest_cr(v, 4);
    2.30 +    
    2.31  #ifdef HVM_DEBUG_SUSPEND
    2.32      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
    2.33 -           __func__,
    2.34 -            c->cr3,
    2.35 -            c->cr0,
    2.36 -            c->cr4);
    2.37 +           __func__, c->cr3, c->cr0, c->cr4);
    2.38  #endif
    2.39  
    2.40 -    if ( !hvm_paging_enabled(v) ) 
    2.41 -    {
    2.42 -        printk("%s: paging not enabled.\n", __func__);
    2.43 -        goto skip_cr3;
    2.44 -    }
    2.45 -
    2.46 -    if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] ) 
    2.47 +    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
    2.48      {
    2.49 -        /*
    2.50 -         * This is simple TLB flush, implying the guest has
    2.51 -         * removed some translation or changed page attributes.
    2.52 -         * We simply invalidate the shadow.
    2.53 -         */
    2.54 -        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
    2.55 -        if ( mfn != pagetable_get_pfn(v->arch.guest_table) ) 
    2.56 -            goto bad_cr3;
    2.57 -    } 
    2.58 -    else 
    2.59 -    {
    2.60 -        /*
    2.61 -         * If different, make a shadow. Check if the PDBR is valid
    2.62 -         * first.
    2.63 -         */
    2.64          HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
    2.65          mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
    2.66          if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) 
    2.67              goto bad_cr3;
    2.68 -
    2.69          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    2.70          v->arch.guest_table = pagetable_from_pfn(mfn);
    2.71          if (old_base_mfn)
    2.72 @@ -392,10 +371,6 @@ int svm_vmcb_restore(struct vcpu *v, str
    2.73          v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
    2.74      }
    2.75  
    2.76 - skip_cr3:
    2.77 -    vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] = c->cr4 | HVM_CR4_HOST_MASK;
    2.78 -    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
    2.79 -    
    2.80      vmcb->idtr.limit = c->idtr_limit;
    2.81      vmcb->idtr.base  = c->idtr_base;
    2.82  
    2.83 @@ -449,10 +424,6 @@ int svm_vmcb_restore(struct vcpu *v, str
    2.84  
    2.85      if ( paging_mode_hap(v->domain) )
    2.86      {
    2.87 -        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = v->arch.hvm_vcpu.guest_cr[0];
    2.88 -        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] =
    2.89 -            v->arch.hvm_vcpu.guest_cr[4] | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
    2.90 -        vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3] = c->cr3;
    2.91          vmcb->np_enable = 1;
    2.92          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
    2.93          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
    2.94 @@ -586,17 +557,22 @@ static void svm_update_guest_cr(struct v
    2.95      switch ( cr )
    2.96      {
    2.97      case 0:
    2.98 -        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0];
    2.99 +        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
   2.100 +        if ( !paging_mode_hap(v->domain) )
   2.101 +            vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
   2.102          break;
   2.103      case 2:
   2.104 -        vmcb->cr2 = v->arch.hvm_vcpu.hw_cr[2];
   2.105 +        vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2];
   2.106          break;
   2.107      case 3:
   2.108          vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
   2.109          svm_asid_inv_asid(v);
   2.110          break;
   2.111      case 4:
   2.112 -        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
   2.113 +        vmcb->cr4 = HVM_CR4_HOST_MASK;
   2.114 +        if ( paging_mode_hap(v->domain) )
   2.115 +            vmcb->cr4 &= ~X86_CR4_PAE;
   2.116 +        vmcb->cr4 |= v->arch.hvm_vcpu.guest_cr[4];
   2.117          break;
   2.118      default:
   2.119          BUG();
   2.120 @@ -724,7 +700,7 @@ static void svm_stts(struct vcpu *v)
   2.121      if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   2.122      {
   2.123          v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
   2.124 -        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
   2.125 +        vmcb->cr0 |= X86_CR0_TS;
   2.126      }
   2.127  }
   2.128  
   2.129 @@ -1045,7 +1021,7 @@ static void svm_do_no_device_fault(struc
   2.130      vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
   2.131  
   2.132      if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   2.133 -        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
   2.134 +        vmcb->cr0 &= ~X86_CR0_TS;
   2.135  }
   2.136  
   2.137  /* Reserved bits ECX: [31:14], [12:4], [2:1]*/
   2.138 @@ -1774,7 +1750,7 @@ static void svm_cr_access(
   2.139          /* TS being cleared means that it's time to restore fpu state. */
   2.140          setup_fpu(current);
   2.141          vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
   2.142 -        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
   2.143 +        vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
   2.144          v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
   2.145          break;
   2.146  
   2.147 @@ -2085,22 +2061,16 @@ static int svm_reset_to_realmode(struct 
   2.148  
   2.149      memset(regs, 0, sizeof(struct cpu_user_regs));
   2.150  
   2.151 -    vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] =
   2.152 -        X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
   2.153      v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
   2.154 -
   2.155 -    vmcb->cr2 = 0;
   2.156 -    vmcb->efer = EFER_SVME;
   2.157 +    svm_update_guest_cr(v, 0);
   2.158  
   2.159 -    vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
   2.160 -    v->arch.hvm_vcpu.guest_cr[4] = 0;
   2.161 +    v->arch.hvm_vcpu.guest_cr[2] = 0;
   2.162 +    svm_update_guest_cr(v, 2);
   2.163  
   2.164 -    if ( paging_mode_hap(v->domain) )
   2.165 -    {
   2.166 -        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = v->arch.hvm_vcpu.guest_cr[0];
   2.167 -        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] =
   2.168 -            v->arch.hvm_vcpu.guest_cr[4] | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
   2.169 -    }
   2.170 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
   2.171 +    svm_update_guest_cr(v, 4);
   2.172 +
   2.173 +    vmcb->efer = EFER_SVME;
   2.174  
   2.175      /* This will jump to ROMBIOS */
   2.176      vmcb->rip = 0xFFF0;
   2.177 @@ -2231,7 +2201,7 @@ asmlinkage void svm_vmexit_handler(struc
   2.178          unsigned long va;
   2.179          va = vmcb->exitinfo2;
   2.180          regs->error_code = vmcb->exitinfo1;
   2.181 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, 
   2.182 +        HVM_DBG_LOG(DBG_LEVEL_VMMU,
   2.183                      "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
   2.184                      (unsigned long)regs->eax, (unsigned long)regs->ebx,
   2.185                      (unsigned long)regs->ecx, (unsigned long)regs->edx,
     3.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Aug 08 16:09:17 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Aug 08 17:28:13 2007 +0100
     3.3 @@ -216,28 +216,19 @@ static int construct_vmcb(struct vcpu *v
     3.4      vmcb->tr.base = 0;
     3.5      vmcb->tr.limit = 0xff;
     3.6  
     3.7 -    /* Guest CR0. */
     3.8 -    vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = read_cr0();
     3.9 -    v->arch.hvm_vcpu.guest_cr[0] =
    3.10 -        v->arch.hvm_vcpu.hw_cr[0] & ~(X86_CR0_PG | X86_CR0_TS);
    3.11 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_TS;
    3.12 +    hvm_update_guest_cr(v, 0);
    3.13  
    3.14 -    /* Guest CR4. */
    3.15 -    v->arch.hvm_vcpu.guest_cr[4] =
    3.16 -        read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
    3.17 -    vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] =
    3.18 -        v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
    3.19 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
    3.20 +    hvm_update_guest_cr(v, 4);
    3.21  
    3.22      paging_update_paging_modes(v);
    3.23 -    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3]; 
    3.24  
    3.25      if ( paging_mode_hap(v->domain) )
    3.26      {
    3.27 -        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = v->arch.hvm_vcpu.guest_cr[0];
    3.28          vmcb->np_enable = 1; /* enable nested paging */
    3.29          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
    3.30          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
    3.31 -        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] = v->arch.hvm_vcpu.guest_cr[4] =
    3.32 -            HVM_CR4_HOST_MASK & ~X86_CR4_PAE;
    3.33          vmcb->exception_intercepts = HVM_TRAP_MASK;
    3.34  
    3.35          /* No point in intercepting CR3/4 reads, because the hardware 
     4.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Aug 08 16:09:17 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Aug 08 17:28:13 2007 +0100
     4.3 @@ -315,34 +315,69 @@ void vmx_cpu_down(void)
     4.4      local_irq_restore(flags);
     4.5  }
     4.6  
     4.7 +struct foreign_vmcs {
     4.8 +    struct vcpu *v;
     4.9 +    unsigned int count;
    4.10 +};
    4.11 +static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs);
    4.12 +
    4.13  void vmx_vmcs_enter(struct vcpu *v)
    4.14  {
    4.15 +    struct foreign_vmcs *fv;
    4.16 +
    4.17      /*
    4.18       * NB. We must *always* run an HVM VCPU on its own VMCS, except for
    4.19       * vmx_vmcs_enter/exit critical regions.
    4.20       */
    4.21 -    if ( v == current )
    4.22 +    if ( likely(v == current) )
    4.23          return;
    4.24  
    4.25 -    vcpu_pause(v);
    4.26 -    spin_lock(&v->arch.hvm_vmx.vmcs_lock);
    4.27 +    fv = &this_cpu(foreign_vmcs);
    4.28  
    4.29 -    vmx_clear_vmcs(v);
    4.30 -    vmx_load_vmcs(v);
    4.31 +    if ( fv->v == v )
    4.32 +    {
    4.33 +        BUG_ON(fv->count == 0);
    4.34 +    }
    4.35 +    else
    4.36 +    {
    4.37 +        BUG_ON(fv->v != NULL);
    4.38 +        BUG_ON(fv->count != 0);
    4.39 +
    4.40 +        vcpu_pause(v);
    4.41 +        spin_lock(&v->arch.hvm_vmx.vmcs_lock);
    4.42 +
    4.43 +        vmx_clear_vmcs(v);
    4.44 +        vmx_load_vmcs(v);
    4.45 +
    4.46 +        fv->v = v;
    4.47 +    }
    4.48 +
    4.49 +    fv->count++;
    4.50  }
    4.51  
    4.52  void vmx_vmcs_exit(struct vcpu *v)
    4.53  {
    4.54 -    if ( v == current )
    4.55 +    struct foreign_vmcs *fv;
    4.56 +
    4.57 +    if ( likely(v == current) )
    4.58          return;
    4.59  
    4.60 -    /* Don't confuse vmx_do_resume (for @v or @current!) */
    4.61 -    vmx_clear_vmcs(v);
    4.62 -    if ( is_hvm_vcpu(current) )
    4.63 -        vmx_load_vmcs(current);
    4.64 +    fv = &this_cpu(foreign_vmcs);
    4.65 +    BUG_ON(fv->v != v);
    4.66 +    BUG_ON(fv->count == 0);
    4.67  
    4.68 -    spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
    4.69 -    vcpu_unpause(v);
    4.70 +    if ( --fv->count == 0 )
    4.71 +    {
    4.72 +        /* Don't confuse vmx_do_resume (for @v or @current!) */
    4.73 +        vmx_clear_vmcs(v);
    4.74 +        if ( is_hvm_vcpu(current) )
    4.75 +            vmx_load_vmcs(current);
    4.76 +
    4.77 +        spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
    4.78 +        vcpu_unpause(v);
    4.79 +
    4.80 +        fv->v = NULL;
    4.81 +    }
    4.82  }
    4.83  
    4.84  struct xgt_desc {
    4.85 @@ -380,7 +415,6 @@ static void vmx_set_host_env(struct vcpu
    4.86  
    4.87  static void construct_vmcs(struct vcpu *v)
    4.88  {
    4.89 -    unsigned long cr0, cr4;
    4.90      union vmcs_arbytes arbytes;
    4.91  
    4.92      vmx_vmcs_enter(v);
    4.93 @@ -504,19 +538,11 @@ static void construct_vmcs(struct vcpu *
    4.94  
    4.95      __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
    4.96  
    4.97 -    /* Guest CR0. */
    4.98 -    cr0 = read_cr0();
    4.99 -    v->arch.hvm_vcpu.hw_cr[0] = cr0;
   4.100 -    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   4.101 -    v->arch.hvm_vcpu.guest_cr[0] = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
   4.102 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
   4.103 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
   4.104 +    hvm_update_guest_cr(v, 0);
   4.105  
   4.106 -    /* Guest CR4. */
   4.107 -    cr4 = read_cr4();
   4.108 -    __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
   4.109 -    v->arch.hvm_vcpu.guest_cr[4] =
   4.110 -        cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
   4.111 -    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
   4.112 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
   4.113 +    hvm_update_guest_cr(v, 4);
   4.114  
   4.115      if ( cpu_has_vmx_tpr_shadow )
   4.116      {
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 08 16:09:17 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 08 17:28:13 2007 +0100
     5.3 @@ -963,6 +963,9 @@ static void vmx_get_segment_register(str
     5.4      }
     5.5  
     5.6      reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
     5.7 +    /* Unusable flag is folded into Present flag. */
     5.8 +    if ( attr & (1u<<16) )
     5.9 +        reg->attr.fields.p = 0;
    5.10  }
    5.11  
    5.12  /* Make sure that xen intercepts any FP accesses from current */
    5.13 @@ -1062,7 +1065,9 @@ static void vmx_update_guest_cr(struct v
    5.14      switch ( cr )
    5.15      {
    5.16      case 0:
    5.17 -        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_PE | X86_CR0_NE;
    5.18 +        v->arch.hvm_vcpu.hw_cr[0] =
    5.19 +            v->arch.hvm_vcpu.guest_cr[0] |
    5.20 +            X86_CR0_PE | X86_CR0_NE | X86_CR0_PG | X86_CR0_WP;
    5.21          __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
    5.22          __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
    5.23          break;
    5.24 @@ -1073,6 +1078,8 @@ static void vmx_update_guest_cr(struct v
    5.25          __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
    5.26          break;
    5.27      case 4:
    5.28 +        v->arch.hvm_vcpu.hw_cr[4] =
    5.29 +            v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
    5.30          __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
    5.31          __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
    5.32          break;
     6.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Wed Aug 08 16:09:17 2007 +0100
     6.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Wed Aug 08 17:28:13 2007 +0100
     6.3 @@ -33,7 +33,12 @@ struct hvm_vcpu {
     6.4      unsigned long       guest_cr[5];
     6.5      unsigned long       guest_efer;
     6.6  
     6.7 -    /* Processor-visible control-register values, while guest executes. */
     6.8 +    /*
     6.9 +     * Processor-visible control-register values, while guest executes.
    6.10 +     *  CR0, CR4: Used as a cache of VMCS contents by VMX only.
    6.11 +     *  CR1, CR2: Never used (guest_cr[2] is always processor-visible CR2).
    6.12 +     *  CR3:      Always used and kept up to date by paging subsystem.
    6.13 +     */
    6.14      unsigned long       hw_cr[5];
    6.15  
    6.16      struct hvm_io_op    io_op;