ia64/xen-unstable

changeset 15731:35337d5c83f9

Merge
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Aug 08 12:27:23 2007 +0100 (2007-08-08)
parents 123ad31e9c3b da2c7dab1a3a
children 00aa18fd722e
files
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Aug 08 12:26:21 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Aug 08 12:27:23 2007 +0100
     1.3 @@ -520,6 +520,87 @@ void hvm_triple_fault(void)
     1.4      domain_shutdown(v->domain, SHUTDOWN_reboot);
     1.5  }
     1.6  
     1.7 +int hvm_set_cr3(unsigned long value)
     1.8 +{
     1.9 +    unsigned long old_base_mfn, mfn;
    1.10 +    struct vcpu *v = current;
    1.11 +
    1.12 +    if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) )
    1.13 +    {
    1.14 +        /* Nothing to do. */
    1.15 +    }
    1.16 +    else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
    1.17 +    {
    1.18 +        /* Shadow-mode TLB flush. Invalidate the shadow. */
    1.19 +        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
    1.20 +        if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
    1.21 +            goto bad_cr3;
    1.22 +    }
    1.23 +    else 
    1.24 +    {
    1.25 +        /* Shadow-mode CR3 change. Check PDBR and then make a new shadow. */
    1.26 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
    1.27 +        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
    1.28 +        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
    1.29 +            goto bad_cr3;
    1.30 +
    1.31 +        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    1.32 +        v->arch.guest_table = pagetable_from_pfn(mfn);
    1.33 +
    1.34 +        if ( old_base_mfn )
    1.35 +            put_page(mfn_to_page(old_base_mfn));
    1.36 +
    1.37 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
    1.38 +    }
    1.39 +
    1.40 +    v->arch.hvm_vcpu.guest_cr[3] = value;
    1.41 +    paging_update_cr3(v);
    1.42 +    return 1;
    1.43 +
    1.44 + bad_cr3:
    1.45 +    gdprintk(XENLOG_ERR, "Invalid CR3\n");
    1.46 +    domain_crash(v->domain);
    1.47 +    return 0;
    1.48 +}
    1.49 +
    1.50 +int hvm_set_cr4(unsigned long value)
    1.51 +{
    1.52 +    struct vcpu *v = current;
    1.53 +    unsigned long old_cr;
    1.54 +
    1.55 +    if ( value & HVM_CR4_GUEST_RESERVED_BITS )
    1.56 +    {
    1.57 +        HVM_DBG_LOG(DBG_LEVEL_1,
    1.58 +                    "Guest attempts to set reserved bit in CR4: %lx",
    1.59 +                    value);
    1.60 +        goto gpf;
    1.61 +    }
    1.62 +
    1.63 +    if ( !(value & X86_CR4_PAE) && hvm_long_mode_enabled(v) )
    1.64 +    {
    1.65 +        HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
    1.66 +                    "EFER.LMA is set");
    1.67 +        goto gpf;
    1.68 +    }
    1.69 +
    1.70 +    old_cr = v->arch.hvm_vcpu.guest_cr[4];
    1.71 +    v->arch.hvm_vcpu.guest_cr[4] = value;
    1.72 +    v->arch.hvm_vcpu.hw_cr[4] = value | HVM_CR4_HOST_MASK;
    1.73 +    if ( paging_mode_hap(v->domain) )
    1.74 +        v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
    1.75 +    hvm_update_guest_cr(v, 4);
    1.76 +  
    1.77 +    /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
    1.78 +    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
    1.79 +        paging_update_paging_modes(v);
    1.80 +
    1.81 +    return 1;
    1.82 +
    1.83 + gpf:
    1.84 +    hvm_inject_exception(TRAP_gp_fault, 0, 0);
    1.85 +    return 0;
    1.86 +}
    1.87 +
    1.88  /*
    1.89   * __hvm_copy():
    1.90   *  @buf  = hypervisor buffer
    1.91 @@ -668,7 +749,6 @@ typedef unsigned long hvm_hypercall_t(
    1.92  static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
    1.93      HYPERCALL(memory_op),
    1.94      [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
    1.95 -    HYPERCALL(multicall),
    1.96      HYPERCALL(xen_version),
    1.97      HYPERCALL(grant_table_op),
    1.98      HYPERCALL(event_channel_op),
    1.99 @@ -813,12 +893,6 @@ int hvm_do_hypercall(struct cpu_user_reg
   1.100              flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
   1.101  }
   1.102  
   1.103 -void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
   1.104 -{
   1.105 -    v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
   1.106 -    hvm_funcs.update_guest_cr3(v);
   1.107 -}
   1.108 -
   1.109  static void hvm_latch_shinfo_size(struct domain *d)
   1.110  {
   1.111      /*
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Aug 08 12:26:21 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Aug 08 12:27:23 2007 +0100
     2.3 @@ -78,7 +78,7 @@ static void svm_inject_exception(
     2.4      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     2.5  
     2.6      if ( trap == TRAP_page_fault )
     2.7 -        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
     2.8 +        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
     2.9      else
    2.10          HVMTRACE_2D(INJ_EXC, v, trap, error_code);
    2.11  
    2.12 @@ -97,55 +97,14 @@ static void svm_cpu_down(void)
    2.13      write_efer(read_efer() & ~EFER_SVME);
    2.14  }
    2.15  
    2.16 -#ifdef __x86_64__
    2.17 -
    2.18  static int svm_lme_is_set(struct vcpu *v)
    2.19  {
    2.20 -    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    2.21 +#ifdef __x86_64__
    2.22 +    u64 guest_efer = v->arch.hvm_vcpu.guest_efer;
    2.23      return guest_efer & EFER_LME;
    2.24 -}
    2.25 -
    2.26 -static int svm_long_mode_enabled(struct vcpu *v)
    2.27 -{
    2.28 -    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    2.29 -    return guest_efer & EFER_LMA;
    2.30 -}
    2.31 -
    2.32 -#else /* __i386__ */
    2.33 -
    2.34 -static int svm_lme_is_set(struct vcpu *v)
    2.35 -{ return 0; }
    2.36 -static int svm_long_mode_enabled(struct vcpu *v)
    2.37 -{ return 0; }
    2.38 -
    2.39 +#else
    2.40 +    return 0;
    2.41  #endif
    2.42 -
    2.43 -static int svm_cr4_pae_is_set(struct vcpu *v)
    2.44 -{
    2.45 -    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    2.46 -    return guest_cr4 & X86_CR4_PAE;
    2.47 -}
    2.48 -
    2.49 -static int svm_paging_enabled(struct vcpu *v)
    2.50 -{
    2.51 -    unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    2.52 -    return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
    2.53 -}
    2.54 -
    2.55 -static int svm_pae_enabled(struct vcpu *v)
    2.56 -{
    2.57 -    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    2.58 -    return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
    2.59 -}
    2.60 -
    2.61 -static int svm_nx_enabled(struct vcpu *v)
    2.62 -{
    2.63 -    return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
    2.64 -}
    2.65 -
    2.66 -static int svm_pgbit_test(struct vcpu *v)
    2.67 -{
    2.68 -    return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
    2.69  }
    2.70  
    2.71  static void svm_store_cpu_guest_regs(
    2.72 @@ -165,10 +124,10 @@ static void svm_store_cpu_guest_regs(
    2.73      if ( crs != NULL )
    2.74      {
    2.75          /* Returning the guest's regs */
    2.76 -        crs[0] = v->arch.hvm_svm.cpu_shadow_cr0;
    2.77 -        crs[2] = v->arch.hvm_svm.cpu_cr2;
    2.78 -        crs[3] = v->arch.hvm_svm.cpu_cr3;
    2.79 -        crs[4] = v->arch.hvm_svm.cpu_shadow_cr4;
    2.80 +        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
    2.81 +        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
    2.82 +        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
    2.83 +        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
    2.84      }
    2.85  }
    2.86  
    2.87 @@ -202,7 +161,8 @@ static enum handler_return long_mode_do_
    2.88          if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
    2.89          {
    2.90              /* EFER.LME transition from 0 to 1. */
    2.91 -            if ( svm_paging_enabled(v) || !svm_cr4_pae_is_set(v) )
    2.92 +            if ( hvm_paging_enabled(v) ||
    2.93 +                 !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
    2.94              {
    2.95                  gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
    2.96                           "in paging mode or PAE bit is not set\n");
    2.97 @@ -212,7 +172,7 @@ static enum handler_return long_mode_do_
    2.98          else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
    2.99          {
   2.100              /* EFER.LME transistion from 1 to 0. */
   2.101 -            if ( svm_paging_enabled(v) )
   2.102 +            if ( hvm_paging_enabled(v) )
   2.103              {
   2.104                  gdprintk(XENLOG_WARNING, 
   2.105                           "Trying to clear EFER.LME while paging enabled\n");
   2.106 @@ -220,9 +180,9 @@ static enum handler_return long_mode_do_
   2.107              }
   2.108          }
   2.109  
   2.110 -        v->arch.hvm_svm.cpu_shadow_efer = msr_content;
   2.111 +        v->arch.hvm_vcpu.guest_efer = msr_content;
   2.112          vmcb->efer = msr_content | EFER_SVME;
   2.113 -        if ( !svm_paging_enabled(v) )
   2.114 +        if ( !hvm_paging_enabled(v) )
   2.115              vmcb->efer &= ~(EFER_LME | EFER_LMA);
   2.116  
   2.117          break;
   2.118 @@ -297,10 +257,10 @@ int svm_vmcb_save(struct vcpu *v, struct
   2.119      c->rsp = vmcb->rsp;
   2.120      c->rflags = vmcb->rflags;
   2.121  
   2.122 -    c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   2.123 -    c->cr2 = v->arch.hvm_svm.cpu_cr2;
   2.124 -    c->cr3 = v->arch.hvm_svm.cpu_cr3;
   2.125 -    c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
   2.126 +    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
   2.127 +    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
   2.128 +    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
   2.129 +    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
   2.130  
   2.131  #ifdef HVM_DEBUG_SUSPEND
   2.132      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   2.133 @@ -383,10 +343,10 @@ int svm_vmcb_restore(struct vcpu *v, str
   2.134      vmcb->rsp    = c->rsp;
   2.135      vmcb->rflags = c->rflags;
   2.136  
   2.137 -    v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
   2.138 +    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
   2.139      vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
   2.140  
   2.141 -    v->arch.hvm_svm.cpu_cr2 = c->cr2;
   2.142 +    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
   2.143  
   2.144  #ifdef HVM_DEBUG_SUSPEND
   2.145      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   2.146 @@ -396,13 +356,13 @@ int svm_vmcb_restore(struct vcpu *v, str
   2.147              c->cr4);
   2.148  #endif
   2.149  
   2.150 -    if ( !svm_paging_enabled(v) ) 
   2.151 +    if ( !hvm_paging_enabled(v) ) 
   2.152      {
   2.153          printk("%s: paging not enabled.\n", __func__);
   2.154          goto skip_cr3;
   2.155      }
   2.156  
   2.157 -    if ( c->cr3 == v->arch.hvm_svm.cpu_cr3 ) 
   2.158 +    if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] ) 
   2.159      {
   2.160          /*
   2.161           * This is simple TLB flush, implying the guest has
   2.162 @@ -428,12 +388,12 @@ int svm_vmcb_restore(struct vcpu *v, str
   2.163          v->arch.guest_table = pagetable_from_pfn(mfn);
   2.164          if (old_base_mfn)
   2.165               put_page(mfn_to_page(old_base_mfn));
   2.166 -        v->arch.hvm_svm.cpu_cr3 = c->cr3;
   2.167 +        v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
   2.168      }
   2.169  
   2.170   skip_cr3:
   2.171      vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
   2.172 -    v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4;
   2.173 +    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
   2.174      
   2.175      vmcb->idtr.limit = c->idtr_limit;
   2.176      vmcb->idtr.base  = c->idtr_base;
   2.177 @@ -488,8 +448,8 @@ int svm_vmcb_restore(struct vcpu *v, str
   2.178  
   2.179      if ( paging_mode_hap(v->domain) )
   2.180      {
   2.181 -        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   2.182 -        vmcb->cr4 = (v->arch.hvm_svm.cpu_shadow_cr4 |
   2.183 +        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
   2.184 +        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
   2.185                       (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
   2.186          vmcb->cr3 = c->cr3;
   2.187          vmcb->np_enable = 1;
   2.188 @@ -521,7 +481,6 @@ int svm_vmcb_restore(struct vcpu *v, str
   2.189      }
   2.190  
   2.191      paging_update_paging_modes(v);
   2.192 -    svm_asid_g_update_paging(v);
   2.193  
   2.194      return 0;
   2.195   
   2.196 @@ -540,7 +499,7 @@ static void svm_save_cpu_state(struct vc
   2.197      data->msr_star         = vmcb->star;
   2.198      data->msr_cstar        = vmcb->cstar;
   2.199      data->msr_syscall_mask = vmcb->sfmask;
   2.200 -    data->msr_efer         = v->arch.hvm_svm.cpu_shadow_efer;
   2.201 +    data->msr_efer         = v->arch.hvm_vcpu.guest_efer;
   2.202      data->msr_flags        = -1ULL;
   2.203  
   2.204      data->tsc = hvm_get_guest_time(v);
   2.205 @@ -556,7 +515,7 @@ static void svm_load_cpu_state(struct vc
   2.206      vmcb->star       = data->msr_star;
   2.207      vmcb->cstar      = data->msr_cstar;
   2.208      vmcb->sfmask     = data->msr_syscall_mask;
   2.209 -    v->arch.hvm_svm.cpu_shadow_efer = data->msr_efer;
   2.210 +    v->arch.hvm_vcpu.guest_efer = data->msr_efer;
   2.211      vmcb->efer       = data->msr_efer | EFER_SVME;
   2.212      /* VMCB's EFER.LME isn't set unless we're actually in long mode
   2.213       * (see long_mode_do_msr_write()) */
   2.214 @@ -605,11 +564,11 @@ static int svm_guest_x86_mode(struct vcp
   2.215  {
   2.216      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.217  
   2.218 -    if ( unlikely(!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PE)) )
   2.219 +    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
   2.220          return 0;
   2.221      if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
   2.222          return 1;
   2.223 -    if ( svm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
   2.224 +    if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
   2.225          return 8;
   2.226      return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
   2.227  }
   2.228 @@ -619,9 +578,20 @@ static void svm_update_host_cr3(struct v
   2.229      /* SVM doesn't have a HOST_CR3 equivalent to update. */
   2.230  }
   2.231  
   2.232 -static void svm_update_guest_cr3(struct vcpu *v)
   2.233 +static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
   2.234  {
   2.235 -    v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
   2.236 +    switch ( cr )
   2.237 +    {
   2.238 +    case 3:
   2.239 +        v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
   2.240 +        svm_asid_inv_asid(v);
   2.241 +        break;
   2.242 +    case 4:
   2.243 +        v->arch.hvm_svm.vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
   2.244 +        break;
   2.245 +    default:
   2.246 +        BUG();
   2.247 +    }
   2.248  }
   2.249  
   2.250  static void svm_flush_guest_tlbs(void)
   2.251 @@ -639,24 +609,6 @@ static void svm_update_vtpr(struct vcpu 
   2.252      vmcb->vintr.fields.tpr = value & 0x0f;
   2.253  }
   2.254  
   2.255 -static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
   2.256 -{
   2.257 -    switch ( num )
   2.258 -    {
   2.259 -    case 0:
   2.260 -        return v->arch.hvm_svm.cpu_shadow_cr0;
   2.261 -    case 2:
   2.262 -        return v->arch.hvm_svm.cpu_cr2;
   2.263 -    case 3:
   2.264 -        return v->arch.hvm_svm.cpu_cr3;
   2.265 -    case 4:
   2.266 -        return v->arch.hvm_svm.cpu_shadow_cr4;
   2.267 -    default:
   2.268 -        BUG();
   2.269 -    }
   2.270 -    return 0;                   /* dummy */
   2.271 -}
   2.272 -
   2.273  static void svm_sync_vmcb(struct vcpu *v)
   2.274  {
   2.275      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
   2.276 @@ -674,7 +626,7 @@ static void svm_sync_vmcb(struct vcpu *v
   2.277  static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
   2.278  {
   2.279      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.280 -    int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
   2.281 +    int long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
   2.282  
   2.283      switch ( seg )
   2.284      {
   2.285 @@ -748,7 +700,7 @@ static void svm_stts(struct vcpu *v)
   2.286       * then this is not necessary: no FPU activity can occur until the guest 
   2.287       * clears CR0.TS, and we will initialise the FPU when that happens.
   2.288       */
   2.289 -    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
   2.290 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   2.291      {
   2.292          v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
   2.293          vmcb->cr0 |= X86_CR0_TS;
   2.294 @@ -949,7 +901,7 @@ static void svm_hvm_inject_exception(
   2.295  {
   2.296      struct vcpu *v = current;
   2.297      if ( trapnr == TRAP_page_fault )
   2.298 -        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
   2.299 +        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2] = cr2;
   2.300      svm_inject_exception(v, trapnr, (errcode != -1), errcode);
   2.301  }
   2.302  
   2.303 @@ -970,17 +922,12 @@ static struct hvm_function_table svm_fun
   2.304      .load_cpu_guest_regs  = svm_load_cpu_guest_regs,
   2.305      .save_cpu_ctxt        = svm_save_vmcb_ctxt,
   2.306      .load_cpu_ctxt        = svm_load_vmcb_ctxt,
   2.307 -    .paging_enabled       = svm_paging_enabled,
   2.308 -    .long_mode_enabled    = svm_long_mode_enabled,
   2.309 -    .pae_enabled          = svm_pae_enabled,
   2.310 -    .nx_enabled           = svm_nx_enabled,
   2.311      .interrupts_enabled   = svm_interrupts_enabled,
   2.312      .guest_x86_mode       = svm_guest_x86_mode,
   2.313 -    .get_guest_ctrl_reg   = svm_get_ctrl_reg,
   2.314      .get_segment_base     = svm_get_segment_base,
   2.315      .get_segment_register = svm_get_segment_register,
   2.316      .update_host_cr3      = svm_update_host_cr3,
   2.317 -    .update_guest_cr3     = svm_update_guest_cr3,
   2.318 +    .update_guest_cr      = svm_update_guest_cr,
   2.319      .flush_guest_tlbs     = svm_flush_guest_tlbs,
   2.320      .update_vtpr          = svm_update_vtpr,
   2.321      .stts                 = svm_stts,
   2.322 @@ -1075,7 +1022,7 @@ static void svm_do_no_device_fault(struc
   2.323      setup_fpu(v);    
   2.324      vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
   2.325  
   2.326 -    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
   2.327 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   2.328          vmcb->cr0 &= ~X86_CR0_TS;
   2.329  }
   2.330  
   2.331 @@ -1347,7 +1294,7 @@ static int svm_get_io_address(
   2.332      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.333  
   2.334      /* If we're in long mode, don't check the segment presence & limit */
   2.335 -    long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
   2.336 +    long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
   2.337  
   2.338      /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 
   2.339       * l field combined with EFER_LMA says whether it's 16 or 64 bit. 
   2.340 @@ -1650,7 +1597,7 @@ static void svm_io_instruction(struct vc
   2.341  static int svm_set_cr0(unsigned long value)
   2.342  {
   2.343      struct vcpu *v = current;
   2.344 -    unsigned long mfn, old_value = v->arch.hvm_svm.cpu_shadow_cr0;
   2.345 +    unsigned long mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
   2.346      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.347      unsigned long old_base_mfn;
   2.348    
   2.349 @@ -1687,25 +1634,25 @@ static int svm_set_cr0(unsigned long val
   2.350      {
   2.351          if ( svm_lme_is_set(v) )
   2.352          {
   2.353 -            if ( !svm_cr4_pae_is_set(v) )
   2.354 +            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
   2.355              {
   2.356                  HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
   2.357                  svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   2.358                  return 0;
   2.359              }
   2.360              HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
   2.361 -            v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
   2.362 +            v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
   2.363              vmcb->efer |= EFER_LMA | EFER_LME;
   2.364          }
   2.365  
   2.366          if ( !paging_mode_hap(v->domain) )
   2.367          {
   2.368              /* The guest CR3 must be pointing to the guest physical. */
   2.369 -            mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
   2.370 +            mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
   2.371              if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
   2.372              {
   2.373                  gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
   2.374 -                         v->arch.hvm_svm.cpu_cr3, mfn);
   2.375 +                         v->arch.hvm_vcpu.guest_cr[3], mfn);
   2.376                  domain_crash(v->domain);
   2.377                  return 0;
   2.378              }
   2.379 @@ -1717,42 +1664,36 @@ static int svm_set_cr0(unsigned long val
   2.380                  put_page(mfn_to_page(old_base_mfn));
   2.381  
   2.382              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
   2.383 -                        v->arch.hvm_vmx.cpu_cr3, mfn);
   2.384 +                        v->arch.hvm_vcpu.guest_cr[3], mfn);
   2.385          }
   2.386      }
   2.387      else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
   2.388      {
   2.389          /* When CR0.PG is cleared, LMA is cleared immediately. */
   2.390 -        if ( svm_long_mode_enabled(v) )
   2.391 +        if ( hvm_long_mode_enabled(v) )
   2.392          {
   2.393              vmcb->efer &= ~(EFER_LME | EFER_LMA);
   2.394 -            v->arch.hvm_svm.cpu_shadow_efer &= ~EFER_LMA;
   2.395 +            v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
   2.396          }
   2.397  
   2.398 -        if ( !paging_mode_hap(v->domain) && v->arch.hvm_svm.cpu_cr3 )
   2.399 +        if ( !paging_mode_hap(v->domain) && v->arch.hvm_vcpu.guest_cr[3] )
   2.400          {
   2.401              put_page(mfn_to_page(get_mfn_from_gpfn(
   2.402 -                v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
   2.403 +                v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
   2.404              v->arch.guest_table = pagetable_null();
   2.405          }
   2.406      }
   2.407  
   2.408 -    vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0 = value;
   2.409 +    vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] = value;
   2.410      if ( !paging_mode_hap(v->domain) )
   2.411          vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
   2.412  
   2.413      if ( (value ^ old_value) & X86_CR0_PG )
   2.414 -    {
   2.415          paging_update_paging_modes(v);
   2.416 -        svm_asid_g_update_paging(v);
   2.417 -    }
   2.418  
   2.419      return 1;
   2.420  }
   2.421  
   2.422 -/*
   2.423 - * Read from control registers. CR0 and CR4 are read from the shadow.
   2.424 - */
   2.425  static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
   2.426  {
   2.427      unsigned long value = 0;
   2.428 @@ -1763,16 +1704,16 @@ static void mov_from_cr(int cr, int gp, 
   2.429      switch ( cr )
   2.430      {
   2.431      case 0:
   2.432 -        value = v->arch.hvm_svm.cpu_shadow_cr0;
   2.433 +        value = v->arch.hvm_vcpu.guest_cr[0];
   2.434          break;
   2.435      case 2:
   2.436          value = vmcb->cr2;
   2.437          break;
   2.438      case 3:
   2.439 -        value = (unsigned long)v->arch.hvm_svm.cpu_cr3;
   2.440 +        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
   2.441          break;
   2.442      case 4:
   2.443 -        value = (unsigned long)v->arch.hvm_svm.cpu_shadow_cr4;
   2.444 +        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[4];
   2.445          break;
   2.446      case 8:
   2.447          value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
   2.448 @@ -1791,13 +1732,9 @@ static void mov_from_cr(int cr, int gp, 
   2.449      HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
   2.450  }
   2.451  
   2.452 -
   2.453 -/*
   2.454 - * Write to control registers
   2.455 - */
   2.456  static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
   2.457  {
   2.458 -    unsigned long value, old_cr, old_base_mfn, mfn;
   2.459 +    unsigned long value;
   2.460      struct vcpu *v = current;
   2.461      struct vlapic *vlapic = vcpu_vlapic(v);
   2.462      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.463 @@ -1815,131 +1752,10 @@ static int mov_to_cr(int gpreg, int cr, 
   2.464          return svm_set_cr0(value);
   2.465  
   2.466      case 3:
   2.467 -        if ( paging_mode_hap(v->domain) )
   2.468 -        {
   2.469 -            vmcb->cr3 = v->arch.hvm_svm.cpu_cr3 = value;
   2.470 -            break;
   2.471 -        }
   2.472 -
   2.473 -        /* If paging is not enabled yet, simply copy the value to CR3. */
   2.474 -        if ( !svm_paging_enabled(v) )
   2.475 -        {
   2.476 -            v->arch.hvm_svm.cpu_cr3 = value;
   2.477 -            break;
   2.478 -        }
   2.479 -
   2.480 -        /* We make a new one if the shadow does not exist. */
   2.481 -        if ( value == v->arch.hvm_svm.cpu_cr3 )
   2.482 -        {
   2.483 -            /* 
   2.484 -             * This is simple TLB flush, implying the guest has 
   2.485 -             * removed some translation or changed page attributes.
   2.486 -             * We simply invalidate the shadow.
   2.487 -             */
   2.488 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   2.489 -            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
   2.490 -                goto bad_cr3;
   2.491 -            paging_update_cr3(v);
   2.492 -            /* signal paging update to ASID handler */
   2.493 -            svm_asid_g_mov_to_cr3 (v);
   2.494 -        }
   2.495 -        else 
   2.496 -        {
   2.497 -            /*
   2.498 -             * If different, make a shadow. Check if the PDBR is valid
   2.499 -             * first.
   2.500 -             */
   2.501 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   2.502 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   2.503 -            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   2.504 -                goto bad_cr3;
   2.505 -
   2.506 -            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   2.507 -            v->arch.guest_table = pagetable_from_pfn(mfn);
   2.508 -
   2.509 -            if ( old_base_mfn )
   2.510 -                put_page(mfn_to_page(old_base_mfn));
   2.511 -
   2.512 -            v->arch.hvm_svm.cpu_cr3 = value;
   2.513 -            update_cr3(v);
   2.514 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
   2.515 -            /* signal paging update to ASID handler */
   2.516 -            svm_asid_g_mov_to_cr3 (v);
   2.517 -        }
   2.518 -        break;
   2.519 -
   2.520 -    case 4: /* CR4 */
   2.521 -        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
   2.522 -        {
   2.523 -            HVM_DBG_LOG(DBG_LEVEL_1,
   2.524 -                        "Guest attempts to set reserved bit in CR4: %lx",
   2.525 -                        value);
   2.526 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   2.527 -            break;
   2.528 -        }
   2.529 +        return hvm_set_cr3(value);
   2.530  
   2.531 -        if ( paging_mode_hap(v->domain) )
   2.532 -        {
   2.533 -            v->arch.hvm_svm.cpu_shadow_cr4 = value;
   2.534 -            vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
   2.535 -            paging_update_paging_modes(v);
   2.536 -            /* signal paging update to ASID handler */
   2.537 -            svm_asid_g_update_paging (v);
   2.538 -            break;
   2.539 -        }
   2.540 -
   2.541 -        old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
   2.542 -        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
   2.543 -        {
   2.544 -            if ( svm_pgbit_test(v) )
   2.545 -            {
   2.546 -#if CONFIG_PAGING_LEVELS >= 3
   2.547 -                /* The guest is a 32-bit PAE guest. */
   2.548 -                unsigned long mfn, old_base_mfn;
   2.549 -                mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
   2.550 -                if ( !mfn_valid(mfn) || 
   2.551 -                     !get_page(mfn_to_page(mfn), v->domain) )
   2.552 -                    goto bad_cr3;
   2.553 -
   2.554 -                /*
   2.555 -                 * Now arch.guest_table points to machine physical.
   2.556 -                 */
   2.557 -                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   2.558 -                v->arch.guest_table = pagetable_from_pfn(mfn);
   2.559 -                if ( old_base_mfn )
   2.560 -                    put_page(mfn_to_page(old_base_mfn));
   2.561 -                paging_update_paging_modes(v);
   2.562 -                /* signal paging update to ASID handler */
   2.563 -                svm_asid_g_update_paging (v);
   2.564 -
   2.565 -                HVM_DBG_LOG(DBG_LEVEL_VMMU, 
   2.566 -                            "Update CR3 value = %lx, mfn = %lx",
   2.567 -                            v->arch.hvm_svm.cpu_cr3, mfn);
   2.568 -#endif
   2.569 -            }
   2.570 -        } 
   2.571 -        else if ( !(value & X86_CR4_PAE) )
   2.572 -        {
   2.573 -            if ( svm_long_mode_enabled(v) )
   2.574 -            {
   2.575 -                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   2.576 -            }
   2.577 -        }
   2.578 -
   2.579 -        v->arch.hvm_svm.cpu_shadow_cr4 = value;
   2.580 -        vmcb->cr4 = value | HVM_CR4_HOST_MASK;
   2.581 -  
   2.582 -        /*
   2.583 -         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
   2.584 -         * all TLB entries except global entries.
   2.585 -         */
   2.586 -        if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
   2.587 -        {
   2.588 -            paging_update_paging_modes(v);
   2.589 -            /* signal paging update to ASID handler */
   2.590 -            svm_asid_g_update_paging (v);
   2.591 -        }
   2.592 -        break;
   2.593 +    case 4:
   2.594 +        return hvm_set_cr4(value);
   2.595  
   2.596      case 8:
   2.597          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
   2.598 @@ -1953,19 +1769,11 @@ static int mov_to_cr(int gpreg, int cr, 
   2.599      }
   2.600  
   2.601      return 1;
   2.602 -
   2.603 - bad_cr3:
   2.604 -    gdprintk(XENLOG_ERR, "Invalid CR3\n");
   2.605 -    domain_crash(v->domain);
   2.606 -    return 0;
   2.607  }
   2.608  
   2.609 -
   2.610 -#define ARR_SIZE(x) (sizeof(x) / sizeof(x[0]))
   2.611 -
   2.612 -
   2.613 -static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
   2.614 -                         struct cpu_user_regs *regs)
   2.615 +static void svm_cr_access(
   2.616 +    struct vcpu *v, unsigned int cr, unsigned int type,
   2.617 +    struct cpu_user_regs *regs)
   2.618  {
   2.619      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.620      int inst_len = 0;
   2.621 @@ -1990,12 +1798,12 @@ static int svm_cr_access(struct vcpu *v,
   2.622      if ( type == TYPE_MOV_TO_CR )
   2.623      {
   2.624          inst_len = __get_instruction_length_from_list(
   2.625 -            v, list_a, ARR_SIZE(list_a), &buffer[index], &match);
   2.626 +            v, list_a, ARRAY_SIZE(list_a), &buffer[index], &match);
   2.627      }
   2.628      else /* type == TYPE_MOV_FROM_CR */
   2.629      {
   2.630          inst_len = __get_instruction_length_from_list(
   2.631 -            v, list_b, ARR_SIZE(list_b), &buffer[index], &match);
   2.632 +            v, list_b, ARRAY_SIZE(list_b), &buffer[index], &match);
   2.633      }
   2.634  
   2.635      ASSERT(inst_len > 0);
   2.636 @@ -2008,7 +1816,8 @@ static int svm_cr_access(struct vcpu *v,
   2.637  
   2.638      HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
   2.639  
   2.640 -    switch (match) 
   2.641 +    switch ( match )
   2.642 +
   2.643      {
   2.644      case INSTR_MOV2CR:
   2.645          gpreg = decode_src_reg(prefix, buffer[index+2]);
   2.646 @@ -2025,18 +1834,18 @@ static int svm_cr_access(struct vcpu *v,
   2.647          setup_fpu(current);
   2.648          vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
   2.649          vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
   2.650 -        v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
   2.651 +        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
   2.652          break;
   2.653  
   2.654      case INSTR_LMSW:
   2.655          gpreg = decode_src_reg(prefix, buffer[index+2]);
   2.656          value = get_reg(gpreg, regs, vmcb) & 0xF;
   2.657 -        value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value;
   2.658 +        value = (v->arch.hvm_vcpu.guest_cr[0] & ~0xF) | value;
   2.659          result = svm_set_cr0(value);
   2.660          break;
   2.661  
   2.662      case INSTR_SMSW:
   2.663 -        value = v->arch.hvm_svm.cpu_shadow_cr0 & 0xFFFF;
   2.664 +        value = v->arch.hvm_vcpu.guest_cr[0] & 0xFFFF;
   2.665          modrm = buffer[index+2];
   2.666          addr_size = svm_guest_x86_mode(v);
   2.667          if ( addr_size < 2 )
   2.668 @@ -2099,9 +1908,8 @@ static int svm_cr_access(struct vcpu *v,
   2.669  
   2.670      ASSERT(inst_len);
   2.671  
   2.672 -    __update_guest_eip(vmcb, inst_len);
   2.673 -    
   2.674 -    return result;
   2.675 +    if ( result )
   2.676 +        __update_guest_eip(vmcb, inst_len);
   2.677  }
   2.678  
   2.679  static void svm_do_msr_access(
   2.680 @@ -2129,7 +1937,7 @@ static void svm_do_msr_access(
   2.681              break;
   2.682  
   2.683          case MSR_EFER:
   2.684 -            msr_content = v->arch.hvm_svm.cpu_shadow_efer;
   2.685 +            msr_content = v->arch.hvm_vcpu.guest_efer;
   2.686              break;
   2.687  
   2.688          case MSR_K8_MC4_MISC: /* Threshold register */
   2.689 @@ -2319,8 +2127,7 @@ void svm_handle_invlpg(const short invlp
   2.690      HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
   2.691  
   2.692      paging_invlpg(v, g_vaddr);
   2.693 -    /* signal invplg to ASID handler */
   2.694 -    svm_asid_g_invlpg (v, g_vaddr);
   2.695 +    svm_asid_g_invlpg(v, g_vaddr);
   2.696  }
   2.697  
   2.698  
   2.699 @@ -2335,29 +2142,28 @@ static int svm_reset_to_realmode(struct 
   2.700  {
   2.701      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.702  
   2.703 -    /* clear the vmcb and user regs */
   2.704      memset(regs, 0, sizeof(struct cpu_user_regs));
   2.705 -   
   2.706 -    /* VMCB State */
   2.707 +
   2.708      vmcb->cr0 = X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
   2.709 -    v->arch.hvm_svm.cpu_shadow_cr0 = X86_CR0_ET;
   2.710 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
   2.711  
   2.712      vmcb->cr2 = 0;
   2.713      vmcb->efer = EFER_SVME;
   2.714  
   2.715      vmcb->cr4 = HVM_CR4_HOST_MASK;
   2.716 -    v->arch.hvm_svm.cpu_shadow_cr4 = 0;
   2.717 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
   2.718  
   2.719 -    if ( paging_mode_hap(v->domain) ) {
   2.720 -        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   2.721 -        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
   2.722 -                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
   2.723 +    if ( paging_mode_hap(v->domain) )
   2.724 +    {
   2.725 +        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
   2.726 +        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
   2.727 +                     (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
   2.728      }
   2.729  
   2.730      /* This will jump to ROMBIOS */
   2.731      vmcb->rip = 0xFFF0;
   2.732  
   2.733 -    /* setup the segment registers and all their hidden states */
   2.734 +    /* Set up the segment registers and all their hidden states. */
   2.735      vmcb->cs.sel = 0xF000;
   2.736      vmcb->cs.attr.bytes = 0x089b;
   2.737      vmcb->cs.limit = 0xffff;
   2.738 @@ -2495,7 +2301,7 @@ asmlinkage void svm_vmexit_handler(struc
   2.739              break;
   2.740          }
   2.741  
   2.742 -        v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
   2.743 +        v->arch.hvm_vcpu.guest_cr[2] = vmcb->cr2 = va;
   2.744          svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
   2.745          break;
   2.746      }
     3.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Aug 08 12:26:21 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Aug 08 12:27:23 2007 +0100
     3.3 @@ -111,7 +111,7 @@ static int construct_vmcb(struct vcpu *v
     3.4      svm_segment_attributes_t attrib;
     3.5  
     3.6      /* TLB control, and ASID assigment. */
     3.7 -    svm_asid_init_vcpu (v);
     3.8 +    svm_asid_init_vcpu(v);
     3.9  
    3.10      vmcb->general1_intercepts = 
    3.11          GENERAL1_INTERCEPT_INTR         | GENERAL1_INTERCEPT_NMI         |
    3.12 @@ -218,25 +218,24 @@ static int construct_vmcb(struct vcpu *v
    3.13  
    3.14      /* Guest CR0. */
    3.15      vmcb->cr0 = read_cr0();
    3.16 -    arch_svm->cpu_shadow_cr0 = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
    3.17 -    vmcb->cr0 |= X86_CR0_WP;
    3.18 +    v->arch.hvm_vcpu.guest_cr[0] = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
    3.19  
    3.20      /* Guest CR4. */
    3.21 -    arch_svm->cpu_shadow_cr4 =
    3.22 +    v->arch.hvm_vcpu.guest_cr[4] =
    3.23          read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
    3.24 -    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | HVM_CR4_HOST_MASK;
    3.25 +    vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
    3.26  
    3.27      paging_update_paging_modes(v);
    3.28 -    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
    3.29 +    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3]; 
    3.30  
    3.31      if ( paging_mode_hap(v->domain) )
    3.32      {
    3.33 -        vmcb->cr0 = arch_svm->cpu_shadow_cr0;
    3.34 +        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
    3.35          vmcb->np_enable = 1; /* enable nested paging */
    3.36          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
    3.37          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
    3.38 -        vmcb->cr4 = arch_svm->cpu_shadow_cr4 =
    3.39 -                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
    3.40 +        vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] =
    3.41 +            HVM_CR4_HOST_MASK & ~X86_CR4_PAE;
    3.42          vmcb->exception_intercepts = HVM_TRAP_MASK;
    3.43  
    3.44          /* No point in intercepting CR3/4 reads, because the hardware 
     4.1 --- a/xen/arch/x86/hvm/vioapic.c	Wed Aug 08 12:26:21 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/vioapic.c	Wed Aug 08 12:27:23 2007 +0100
     4.3 @@ -43,10 +43,6 @@
     4.4  /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
     4.5  #define IRQ0_SPECIAL_ROUTING 1
     4.6  
     4.7 -#if defined(__ia64__)
     4.8 -#define opt_hvm_debug_level opt_vmx_debug_level
     4.9 -#endif
    4.10 -
    4.11  static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);
    4.12  
    4.13  static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
     5.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Aug 08 12:26:21 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Aug 08 12:27:23 2007 +0100
     5.3 @@ -506,17 +506,17 @@ static void construct_vmcs(struct vcpu *
     5.4  
     5.5      /* Guest CR0. */
     5.6      cr0 = read_cr0();
     5.7 -    v->arch.hvm_vmx.cpu_cr0 = cr0;
     5.8 -    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
     5.9 -    v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
    5.10 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
    5.11 +    v->arch.hvm_vcpu.hw_cr[0] = cr0;
    5.12 +    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
    5.13 +    v->arch.hvm_vcpu.guest_cr[0] = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
    5.14 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
    5.15  
    5.16      /* Guest CR4. */
    5.17      cr4 = read_cr4();
    5.18      __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
    5.19 -    v->arch.hvm_vmx.cpu_shadow_cr4 =
    5.20 +    v->arch.hvm_vcpu.guest_cr[4] =
    5.21          cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
    5.22 -    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
    5.23 +    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
    5.24  
    5.25      if ( cpu_has_vmx_tpr_shadow )
    5.26      {
     6.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 08 12:26:21 2007 +0100
     6.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 08 12:27:23 2007 +0100
     6.3 @@ -100,39 +100,11 @@ static void vmx_vcpu_destroy(struct vcpu
     6.4      vmx_destroy_vmcs(v);
     6.5  }
     6.6  
     6.7 -static int vmx_paging_enabled(struct vcpu *v)
     6.8 -{
     6.9 -    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    6.10 -    return (cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
    6.11 -}
    6.12 -
    6.13 -static int vmx_pgbit_test(struct vcpu *v)
    6.14 -{
    6.15 -    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    6.16 -    return cr0 & X86_CR0_PG;
    6.17 -}
    6.18 -
    6.19 -static int vmx_pae_enabled(struct vcpu *v)
    6.20 -{
    6.21 -    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
    6.22 -    return vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE);
    6.23 -}
    6.24 -
    6.25 -static int vmx_nx_enabled(struct vcpu *v)
    6.26 -{
    6.27 -    return v->arch.hvm_vmx.efer & EFER_NX;
    6.28 -}
    6.29 -
    6.30  #ifdef __x86_64__
    6.31  
    6.32  static int vmx_lme_is_set(struct vcpu *v)
    6.33  {
    6.34 -    return v->arch.hvm_vmx.efer & EFER_LME;
    6.35 -}
    6.36 -
    6.37 -static int vmx_long_mode_enabled(struct vcpu *v)
    6.38 -{
    6.39 -    return v->arch.hvm_vmx.efer & EFER_LMA;
    6.40 +    return v->arch.hvm_vcpu.guest_efer & EFER_LME;
    6.41  }
    6.42  
    6.43  static void vmx_enable_long_mode(struct vcpu *v)
    6.44 @@ -143,7 +115,7 @@ static void vmx_enable_long_mode(struct 
    6.45      vm_entry_value |= VM_ENTRY_IA32E_MODE;
    6.46      __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
    6.47  
    6.48 -    v->arch.hvm_vmx.efer |= EFER_LMA;
    6.49 +    v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
    6.50  }
    6.51  
    6.52  static void vmx_disable_long_mode(struct vcpu *v)
    6.53 @@ -154,7 +126,7 @@ static void vmx_disable_long_mode(struct
    6.54      vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
    6.55      __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
    6.56  
    6.57 -    v->arch.hvm_vmx.efer &= ~EFER_LMA;
    6.58 +    v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
    6.59  }
    6.60  
    6.61  static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
    6.62 @@ -190,7 +162,7 @@ static enum handler_return long_mode_do_
    6.63      switch ( ecx )
    6.64      {
    6.65      case MSR_EFER:
    6.66 -        msr_content = v->arch.hvm_vmx.efer;
    6.67 +        msr_content = v->arch.hvm_vcpu.guest_efer;
    6.68          break;
    6.69  
    6.70      case MSR_FS_BASE:
    6.71 @@ -204,7 +176,7 @@ static enum handler_return long_mode_do_
    6.72      case MSR_SHADOW_GS_BASE:
    6.73          msr_content = v->arch.hvm_vmx.shadow_gs;
    6.74      check_long_mode:
    6.75 -        if ( !(vmx_long_mode_enabled(v)) )
    6.76 +        if ( !(hvm_long_mode_enabled(v)) )
    6.77          {
    6.78              vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    6.79              return HNDL_exception_raised;
    6.80 @@ -263,9 +235,9 @@ static enum handler_return long_mode_do_
    6.81          }
    6.82  
    6.83          if ( (msr_content & EFER_LME)
    6.84 -             &&  !(v->arch.hvm_vmx.efer & EFER_LME) )
    6.85 +             &&  !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
    6.86          {
    6.87 -            if ( unlikely(vmx_paging_enabled(v)) )
    6.88 +            if ( unlikely(hvm_paging_enabled(v)) )
    6.89              {
    6.90                  gdprintk(XENLOG_WARNING,
    6.91                           "Trying to set EFER.LME with paging enabled\n");
    6.92 @@ -273,9 +245,9 @@ static enum handler_return long_mode_do_
    6.93              }
    6.94          }
    6.95          else if ( !(msr_content & EFER_LME)
    6.96 -                  && (v->arch.hvm_vmx.efer & EFER_LME) )
    6.97 +                  && (v->arch.hvm_vcpu.guest_efer & EFER_LME) )
    6.98          {
    6.99 -            if ( unlikely(vmx_paging_enabled(v)) )
   6.100 +            if ( unlikely(hvm_paging_enabled(v)) )
   6.101              {
   6.102                  gdprintk(XENLOG_WARNING,
   6.103                           "Trying to clear EFER.LME with paging enabled\n");
   6.104 @@ -283,17 +255,17 @@ static enum handler_return long_mode_do_
   6.105              }
   6.106          }
   6.107  
   6.108 -        if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) )
   6.109 +        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) )
   6.110              write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
   6.111                         (msr_content & (EFER_NX|EFER_SCE)));
   6.112  
   6.113 -        v->arch.hvm_vmx.efer = msr_content;
   6.114 +        v->arch.hvm_vcpu.guest_efer = msr_content;
   6.115          break;
   6.116  
   6.117      case MSR_FS_BASE:
   6.118      case MSR_GS_BASE:
   6.119      case MSR_SHADOW_GS_BASE:
   6.120 -        if ( !vmx_long_mode_enabled(v) )
   6.121 +        if ( !hvm_long_mode_enabled(v) )
   6.122              goto gp_fault;
   6.123  
   6.124          if ( !is_canonical_address(msr_content) )
   6.125 @@ -394,13 +366,13 @@ static void vmx_restore_guest_msrs(struc
   6.126          clear_bit(i, &guest_flags);
   6.127      }
   6.128  
   6.129 -    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
   6.130 +    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
   6.131      {
   6.132          HVM_DBG_LOG(DBG_LEVEL_2,
   6.133                      "restore guest's EFER with value %lx",
   6.134 -                    v->arch.hvm_vmx.efer);
   6.135 +                    v->arch.hvm_vcpu.guest_efer);
   6.136          write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
   6.137 -                   (v->arch.hvm_vmx.efer & (EFER_NX | EFER_SCE)));
   6.138 +                   (v->arch.hvm_vcpu.guest_efer & (EFER_NX | EFER_SCE)));
   6.139      }
   6.140  }
   6.141  
   6.142 @@ -408,8 +380,6 @@ static void vmx_restore_guest_msrs(struc
   6.143  
   6.144  static int vmx_lme_is_set(struct vcpu *v)
   6.145  { return 0; }
   6.146 -static int vmx_long_mode_enabled(struct vcpu *v)
   6.147 -{ return 0; }
   6.148  static void vmx_enable_long_mode(struct vcpu *v)
   6.149  { BUG(); }
   6.150  static void vmx_disable_long_mode(struct vcpu *v)
   6.151 @@ -427,13 +397,13 @@ static void vmx_restore_host_msrs(void)
   6.152  
   6.153  static void vmx_restore_guest_msrs(struct vcpu *v)
   6.154  {
   6.155 -    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX )
   6.156 +    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & EFER_NX )
   6.157      {
   6.158          HVM_DBG_LOG(DBG_LEVEL_2,
   6.159                      "restore guest's EFER with value %lx",
   6.160 -                    v->arch.hvm_vmx.efer);
   6.161 +                    v->arch.hvm_vcpu.guest_efer);
   6.162          write_efer((read_efer() & ~EFER_NX) |
   6.163 -                   (v->arch.hvm_vmx.efer & EFER_NX));
   6.164 +                   (v->arch.hvm_vcpu.guest_efer & EFER_NX));
   6.165      }
   6.166  }
   6.167  
   6.168 @@ -444,7 +414,7 @@ static enum handler_return long_mode_do_
   6.169  
   6.170      switch ( regs->ecx ) {
   6.171      case MSR_EFER:
   6.172 -        msr_content = v->arch.hvm_vmx.efer;
   6.173 +        msr_content = v->arch.hvm_vcpu.guest_efer;
   6.174          break;
   6.175  
   6.176      default:
   6.177 @@ -475,10 +445,10 @@ static enum handler_return long_mode_do_
   6.178              return HNDL_exception_raised;
   6.179          }
   6.180  
   6.181 -        if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX )
   6.182 +        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX )
   6.183              write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
   6.184  
   6.185 -        v->arch.hvm_vmx.efer = msr_content;
   6.186 +        v->arch.hvm_vcpu.guest_efer = msr_content;
   6.187          break;
   6.188  
   6.189      default:
   6.190 @@ -501,12 +471,12 @@ static int vmx_guest_x86_mode(struct vcp
   6.191  
   6.192      ASSERT(v == current);
   6.193  
   6.194 -    if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
   6.195 +    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
   6.196          return 0;
   6.197      if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
   6.198          return 1;
   6.199      cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   6.200 -    if ( vmx_long_mode_enabled(v) &&
   6.201 +    if ( hvm_long_mode_enabled(v) &&
   6.202           likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
   6.203          return 8;
   6.204      return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
   6.205 @@ -551,12 +521,12 @@ void vmx_vmcs_save(struct vcpu *v, struc
   6.206      c->rsp = __vmread(GUEST_RSP);
   6.207      c->rflags = __vmread(GUEST_RFLAGS);
   6.208  
   6.209 -    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   6.210 -    c->cr2 = v->arch.hvm_vmx.cpu_cr2;
   6.211 -    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
   6.212 -    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
   6.213 -
   6.214 -    c->msr_efer = v->arch.hvm_vmx.efer;
   6.215 +    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
   6.216 +    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
   6.217 +    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
   6.218 +    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
   6.219 +
   6.220 +    c->msr_efer = v->arch.hvm_vcpu.guest_efer;
   6.221  
   6.222  #ifdef HVM_DEBUG_SUSPEND
   6.223      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   6.224 @@ -635,22 +605,22 @@ int vmx_vmcs_restore(struct vcpu *v, str
   6.225      __vmwrite(GUEST_RSP, c->rsp);
   6.226      __vmwrite(GUEST_RFLAGS, c->rflags);
   6.227  
   6.228 -    v->arch.hvm_vmx.cpu_cr0 = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
   6.229 -                               X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
   6.230 -    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   6.231 -    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
   6.232 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   6.233 -
   6.234 -    v->arch.hvm_vmx.cpu_cr2 = c->cr2;
   6.235 -
   6.236 -    v->arch.hvm_vmx.efer = c->msr_efer;
   6.237 +    v->arch.hvm_vcpu.hw_cr[0] = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
   6.238 +                                 X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
   6.239 +    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   6.240 +    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
   6.241 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
   6.242 +
   6.243 +    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
   6.244 +
   6.245 +    v->arch.hvm_vcpu.guest_efer = c->msr_efer;
   6.246  
   6.247  #ifdef HVM_DEBUG_SUSPEND
   6.248      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   6.249             __func__, c->cr3, c->cr0, c->cr4);
   6.250  #endif
   6.251  
   6.252 -    if ( !vmx_paging_enabled(v) )
   6.253 +    if ( !hvm_paging_enabled(v) )
   6.254      {
   6.255          HVM_DBG_LOG(DBG_LEVEL_VMMU, "%s: paging not enabled.", __func__);
   6.256          goto skip_cr3;
   6.257 @@ -672,14 +642,14 @@ int vmx_vmcs_restore(struct vcpu *v, str
   6.258          put_page(mfn_to_page(old_base_mfn));
   6.259  
   6.260   skip_cr3:
   6.261 -    v->arch.hvm_vmx.cpu_cr3 = c->cr3;
   6.262 -
   6.263 -    if ( vmx_long_mode_enabled(v) )
   6.264 +    v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
   6.265 +
   6.266 +    if ( hvm_long_mode_enabled(v) )
   6.267          vmx_enable_long_mode(v);
   6.268  
   6.269      __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
   6.270 -    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
   6.271 -    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   6.272 +    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
   6.273 +    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
   6.274  
   6.275      __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   6.276      __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   6.277 @@ -884,10 +854,10 @@ static void vmx_store_cpu_guest_regs(
   6.278  
   6.279      if ( crs != NULL )
   6.280      {
   6.281 -        crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
   6.282 -        crs[2] = v->arch.hvm_vmx.cpu_cr2;
   6.283 -        crs[3] = v->arch.hvm_vmx.cpu_cr3;
   6.284 -        crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
   6.285 +        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
   6.286 +        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
   6.287 +        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
   6.288 +        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
   6.289      }
   6.290  
   6.291      vmx_vmcs_exit(v);
   6.292 @@ -928,24 +898,6 @@ static void vmx_load_cpu_guest_regs(stru
   6.293      vmx_vmcs_exit(v);
   6.294  }
   6.295  
   6.296 -static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
   6.297 -{
   6.298 -    switch ( num )
   6.299 -    {
   6.300 -    case 0:
   6.301 -        return v->arch.hvm_vmx.cpu_cr0;
   6.302 -    case 2:
   6.303 -        return v->arch.hvm_vmx.cpu_cr2;
   6.304 -    case 3:
   6.305 -        return v->arch.hvm_vmx.cpu_cr3;
   6.306 -    case 4:
   6.307 -        return v->arch.hvm_vmx.cpu_shadow_cr4;
   6.308 -    default:
   6.309 -        BUG();
   6.310 -    }
   6.311 -    return 0;                   /* dummy */
   6.312 -}
   6.313 -
   6.314  static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
   6.315  {
   6.316      unsigned long base = 0;
   6.317 @@ -953,7 +905,7 @@ static unsigned long vmx_get_segment_bas
   6.318  
   6.319      ASSERT(v == current);
   6.320  
   6.321 -    if ( vmx_long_mode_enabled(v) &&
   6.322 +    if ( hvm_long_mode_enabled(v) &&
   6.323           (__vmread(GUEST_CS_AR_BYTES) & X86_SEG_AR_CS_LM_ACTIVE) )
   6.324          long_mode = 1;
   6.325  
   6.326 @@ -1059,10 +1011,10 @@ static void vmx_stts(struct vcpu *v)
   6.327       * then this is not necessary: no FPU activity can occur until the guest
   6.328       * clears CR0.TS, and we will initialise the FPU when that happens.
   6.329       */
   6.330 -    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
   6.331 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   6.332      {
   6.333 -        v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
   6.334 -        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   6.335 +        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
   6.336 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   6.337          __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
   6.338      }
   6.339  }
   6.340 @@ -1135,11 +1087,25 @@ static void vmx_update_host_cr3(struct v
   6.341      vmx_vmcs_exit(v);
   6.342  }
   6.343  
   6.344 -static void vmx_update_guest_cr3(struct vcpu *v)
   6.345 +static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
   6.346  {
   6.347      ASSERT((v == current) || !vcpu_runnable(v));
   6.348 +
   6.349      vmx_vmcs_enter(v);
   6.350 -    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
   6.351 +
   6.352 +    switch ( cr )
   6.353 +    {
   6.354 +    case 3:
   6.355 +        __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
   6.356 +        break;
   6.357 +    case 4:
   6.358 +        __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
   6.359 +        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
   6.360 +        break;
   6.361 +    default:
   6.362 +        BUG();
   6.363 +    }
   6.364 +
   6.365      vmx_vmcs_exit(v);
   6.366  }
   6.367  
   6.368 @@ -1156,7 +1122,7 @@ static void vmx_inject_exception(
   6.369      struct vcpu *v = current;
   6.370      vmx_inject_hw_exception(v, trapnr, errcode);
   6.371      if ( trapnr == TRAP_page_fault )
   6.372 -        v->arch.hvm_vmx.cpu_cr2 = cr2;
   6.373 +        v->arch.hvm_vcpu.guest_cr[2] = cr2;
   6.374  }
   6.375  
   6.376  static void vmx_update_vtpr(struct vcpu *v, unsigned long value)
   6.377 @@ -1200,17 +1166,12 @@ static struct hvm_function_table vmx_fun
   6.378      .load_cpu_guest_regs  = vmx_load_cpu_guest_regs,
   6.379      .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
   6.380      .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
   6.381 -    .paging_enabled       = vmx_paging_enabled,
   6.382 -    .long_mode_enabled    = vmx_long_mode_enabled,
   6.383 -    .pae_enabled          = vmx_pae_enabled,
   6.384 -    .nx_enabled           = vmx_nx_enabled,
   6.385      .interrupts_enabled   = vmx_interrupts_enabled,
   6.386      .guest_x86_mode       = vmx_guest_x86_mode,
   6.387 -    .get_guest_ctrl_reg   = vmx_get_ctrl_reg,
   6.388      .get_segment_base     = vmx_get_segment_base,
   6.389      .get_segment_register = vmx_get_segment_register,
   6.390      .update_host_cr3      = vmx_update_host_cr3,
   6.391 -    .update_guest_cr3     = vmx_update_guest_cr3,
   6.392 +    .update_guest_cr      = vmx_update_guest_cr,
   6.393      .flush_guest_tlbs     = vmx_flush_guest_tlbs,
   6.394      .update_vtpr          = vmx_update_vtpr,
   6.395      .stts                 = vmx_stts,
   6.396 @@ -1315,10 +1276,10 @@ static void vmx_do_no_device_fault(void)
   6.397      __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
   6.398  
   6.399      /* Disable TS in guest CR0 unless the guest wants the exception too. */
   6.400 -    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
   6.401 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   6.402      {
   6.403 -        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
   6.404 -        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   6.405 +        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
   6.406 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   6.407      }
   6.408  }
   6.409  
   6.410 @@ -1773,7 +1734,7 @@ static void vmx_do_str_pio(unsigned long
   6.411  
   6.412      sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
   6.413      ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   6.414 -    if ( vmx_long_mode_enabled(current) &&
   6.415 +    if ( hvm_long_mode_enabled(current) &&
   6.416           (ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
   6.417          long_mode = 1;
   6.418      addr = __vmread(GUEST_LINEAR_ADDRESS);
   6.419 @@ -1900,9 +1861,9 @@ static void vmx_world_save(struct vcpu *
   6.420      c->esp = __vmread(GUEST_RSP);
   6.421      c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;
   6.422  
   6.423 -    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   6.424 -    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
   6.425 -    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
   6.426 +    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
   6.427 +    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
   6.428 +    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
   6.429  
   6.430      c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
   6.431      c->idtr_base = __vmread(GUEST_IDTR_BASE);
   6.432 @@ -1959,13 +1920,13 @@ static int vmx_world_restore(struct vcpu
   6.433      __vmwrite(GUEST_RSP, c->esp);
   6.434      __vmwrite(GUEST_RFLAGS, c->eflags);
   6.435  
   6.436 -    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
   6.437 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   6.438 -
   6.439 -    if ( !vmx_paging_enabled(v) )
   6.440 +    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
   6.441 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
   6.442 +
   6.443 +    if ( !hvm_paging_enabled(v) )
   6.444          goto skip_cr3;
   6.445  
   6.446 -    if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
   6.447 +    if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] )
   6.448      {
   6.449          /*
   6.450           * This is simple TLB flush, implying the guest has
   6.451 @@ -1990,18 +1951,18 @@ static int vmx_world_restore(struct vcpu
   6.452          v->arch.guest_table = pagetable_from_pfn(mfn);
   6.453          if ( old_base_mfn )
   6.454               put_page(mfn_to_page(old_base_mfn));
   6.455 -        v->arch.hvm_vmx.cpu_cr3 = c->cr3;
   6.456 +        v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
   6.457      }
   6.458  
   6.459   skip_cr3:
   6.460 -    if ( !vmx_paging_enabled(v) )
   6.461 +    if ( !hvm_paging_enabled(v) )
   6.462          HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
   6.463      else
   6.464          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
   6.465  
   6.466      __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
   6.467 -    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
   6.468 -    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   6.469 +    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
   6.470 +    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
   6.471  
   6.472      __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   6.473      __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   6.474 @@ -2184,22 +2145,22 @@ static int vmx_set_cr0(unsigned long val
   6.475          __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
   6.476      }
   6.477  
   6.478 -    old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   6.479 +    old_cr0 = v->arch.hvm_vcpu.guest_cr[0];
   6.480      paging_enabled = old_cr0 & X86_CR0_PG;
   6.481  
   6.482 -    v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG
   6.483 -                               | X86_CR0_NE | X86_CR0_WP);
   6.484 -    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   6.485 -
   6.486 -    v->arch.hvm_vmx.cpu_shadow_cr0 = value;
   6.487 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   6.488 +    v->arch.hvm_vcpu.hw_cr[0] = (value | X86_CR0_PE | X86_CR0_PG |
   6.489 +                                 X86_CR0_NE | X86_CR0_WP);
   6.490 +    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   6.491 +
   6.492 +    v->arch.hvm_vcpu.guest_cr[0] = value;
   6.493 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
   6.494  
   6.495      /* Trying to enable paging. */
   6.496      if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
   6.497      {
   6.498 -        if ( vmx_lme_is_set(v) && !vmx_long_mode_enabled(v) )
   6.499 +        if ( vmx_lme_is_set(v) && !hvm_long_mode_enabled(v) )
   6.500          {
   6.501 -            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
   6.502 +            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
   6.503              {
   6.504                  HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
   6.505                              "with EFER.LME set but not CR4.PAE");
   6.506 @@ -2214,11 +2175,11 @@ static int vmx_set_cr0(unsigned long val
   6.507          /*
   6.508           * The guest CR3 must be pointing to the guest physical.
   6.509           */
   6.510 -        mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
   6.511 +        mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
   6.512          if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   6.513          {
   6.514              gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
   6.515 -                     v->arch.hvm_vmx.cpu_cr3, mfn);
   6.516 +                     v->arch.hvm_vcpu.guest_cr[3], mfn);
   6.517              domain_crash(v->domain);
   6.518              return 0;
   6.519          }
   6.520 @@ -2232,7 +2193,7 @@ static int vmx_set_cr0(unsigned long val
   6.521              put_page(mfn_to_page(old_base_mfn));
   6.522  
   6.523          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
   6.524 -                    v->arch.hvm_vmx.cpu_cr3, mfn);
   6.525 +                    v->arch.hvm_vcpu.guest_cr[3], mfn);
   6.526  
   6.527          paging_update_paging_modes(v);
   6.528      }
   6.529 @@ -2242,13 +2203,13 @@ static int vmx_set_cr0(unsigned long val
   6.530           paging_enabled )
   6.531      {
   6.532          /* When CR0.PG is cleared, LMA is cleared immediately. */
   6.533 -        if ( vmx_long_mode_enabled(v) )
   6.534 +        if ( hvm_long_mode_enabled(v) )
   6.535              vmx_disable_long_mode(v);
   6.536  
   6.537 -        if ( v->arch.hvm_vmx.cpu_cr3 )
   6.538 +        if ( v->arch.hvm_vcpu.guest_cr[3] )
   6.539          {
   6.540              put_page(mfn_to_page(get_mfn_from_gpfn(
   6.541 -                      v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
   6.542 +                      v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
   6.543              v->arch.guest_table = pagetable_null();
   6.544          }
   6.545      }
   6.546 @@ -2316,12 +2277,9 @@ static int vmx_set_cr0(unsigned long val
   6.547      CASE_ ## T ## ET_REG(R15, r15)
   6.548  #endif
   6.549  
   6.550 -/*
   6.551 - * Write to control registers
   6.552 - */
   6.553  static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
   6.554  {
   6.555 -    unsigned long value, old_cr, old_base_mfn, mfn;
   6.556 +    unsigned long value;
   6.557      struct vcpu *v = current;
   6.558      struct vlapic *vlapic = vcpu_vlapic(v);
   6.559  
   6.560 @@ -2353,108 +2311,10 @@ static int mov_to_cr(int gp, int cr, str
   6.561          return vmx_set_cr0(value);
   6.562  
   6.563      case 3:
   6.564 -        /*
   6.565 -         * If paging is not enabled yet, simply copy the value to CR3.
   6.566 -         */
   6.567 -        if ( !vmx_paging_enabled(v) )
   6.568 -        {
   6.569 -            v->arch.hvm_vmx.cpu_cr3 = value;
   6.570 -            break;
   6.571 -        }
   6.572 -
   6.573 -        /*
   6.574 -         * We make a new one if the shadow does not exist.
   6.575 -         */
   6.576 -        if ( value == v->arch.hvm_vmx.cpu_cr3 ) {
   6.577 -            /*
   6.578 -             * This is simple TLB flush, implying the guest has
   6.579 -             * removed some translation or changed page attributes.
   6.580 -             * We simply invalidate the shadow.
   6.581 -             */
   6.582 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   6.583 -            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
   6.584 -                goto bad_cr3;
   6.585 -            paging_update_cr3(v);
   6.586 -        } else {
   6.587 -            /*
   6.588 -             * If different, make a shadow. Check if the PDBR is valid
   6.589 -             * first.
   6.590 -             */
   6.591 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   6.592 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   6.593 -            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   6.594 -                goto bad_cr3;
   6.595 -            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   6.596 -            v->arch.guest_table = pagetable_from_pfn(mfn);
   6.597 -            if ( old_base_mfn )
   6.598 -                put_page(mfn_to_page(old_base_mfn));
   6.599 -            v->arch.hvm_vmx.cpu_cr3 = value;
   6.600 -            update_cr3(v);
   6.601 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
   6.602 -        }
   6.603 -        break;
   6.604 -
   6.605 -    case 4: /* CR4 */
   6.606 -        old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
   6.607 -
   6.608 -        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
   6.609 -        {
   6.610 -            HVM_DBG_LOG(DBG_LEVEL_1,
   6.611 -                        "Guest attempts to set reserved bit in CR4: %lx",
   6.612 -                        value);
   6.613 -            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   6.614 -            return 0;
   6.615 -        }
   6.616 -
   6.617 -        if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
   6.618 -        {
   6.619 -            if ( vmx_pgbit_test(v) )
   6.620 -            {
   6.621 -#if CONFIG_PAGING_LEVELS >= 3
   6.622 -                /* The guest is a 32-bit PAE guest. */
   6.623 -                unsigned long mfn, old_base_mfn;
   6.624 -                mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
   6.625 -                if ( !mfn_valid(mfn) ||
   6.626 -                     !get_page(mfn_to_page(mfn), v->domain) )
   6.627 -                    goto bad_cr3;
   6.628 -
   6.629 -                /*
   6.630 -                 * Now arch.guest_table points to machine physical.
   6.631 -                 */
   6.632 -                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   6.633 -                v->arch.guest_table = pagetable_from_pfn(mfn);
   6.634 -                if ( old_base_mfn )
   6.635 -                    put_page(mfn_to_page(old_base_mfn));
   6.636 -
   6.637 -                HVM_DBG_LOG(DBG_LEVEL_VMMU,
   6.638 -                            "Update CR3 value = %lx, mfn = %lx",
   6.639 -                            v->arch.hvm_vmx.cpu_cr3, mfn);
   6.640 -#endif
   6.641 -            }
   6.642 -        }
   6.643 -        else if ( !(value & X86_CR4_PAE) )
   6.644 -        {
   6.645 -            if ( unlikely(vmx_long_mode_enabled(v)) )
   6.646 -            {
   6.647 -                HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
   6.648 -                            "EFER.LMA is set");
   6.649 -                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   6.650 -                return 0;
   6.651 -            }
   6.652 -        }
   6.653 -
   6.654 -        __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
   6.655 -        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
   6.656 -        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   6.657 -
   6.658 -        /*
   6.659 -         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
   6.660 -         * all TLB entries except global entries.
   6.661 -         */
   6.662 -        if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
   6.663 -            paging_update_paging_modes(v);
   6.664 -
   6.665 -        break;
   6.666 +        return hvm_set_cr3(value);
   6.667 +
   6.668 +    case 4:
   6.669 +        return hvm_set_cr4(value);
   6.670  
   6.671      case 8:
   6.672          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
   6.673 @@ -2462,14 +2322,11 @@ static int mov_to_cr(int gp, int cr, str
   6.674  
   6.675      default:
   6.676          gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
   6.677 -        domain_crash(v->domain);
   6.678 -        return 0;
   6.679 +        goto exit_and_crash;
   6.680      }
   6.681  
   6.682      return 1;
   6.683  
   6.684 - bad_cr3:
   6.685 -    gdprintk(XENLOG_ERR, "Invalid CR3\n");
   6.686   exit_and_crash:
   6.687      domain_crash(v->domain);
   6.688      return 0;
   6.689 @@ -2487,7 +2344,7 @@ static void mov_from_cr(int cr, int gp, 
   6.690      switch ( cr )
   6.691      {
   6.692      case 3:
   6.693 -        value = (unsigned long)v->arch.hvm_vmx.cpu_cr3;
   6.694 +        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
   6.695          break;
   6.696      case 8:
   6.697          value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
   6.698 @@ -2530,7 +2387,8 @@ static int vmx_cr_access(unsigned long e
   6.699      unsigned long value;
   6.700      struct vcpu *v = current;
   6.701  
   6.702 -    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) {
   6.703 +    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
   6.704 +    {
   6.705      case TYPE_MOV_TO_CR:
   6.706          gp = exit_qualification & CONTROL_REG_ACCESS_REG;
   6.707          cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
   6.708 @@ -2545,14 +2403,14 @@ static int vmx_cr_access(unsigned long e
   6.709          setup_fpu(v);
   6.710          __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
   6.711  
   6.712 -        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
   6.713 -        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   6.714 -
   6.715 -        v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
   6.716 -        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   6.717 +        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
   6.718 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   6.719 +
   6.720 +        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
   6.721 +        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
   6.722          break;
   6.723      case TYPE_LMSW:
   6.724 -        value = v->arch.hvm_vmx.cpu_shadow_cr0;
   6.725 +        value = v->arch.hvm_vcpu.guest_cr[0];
   6.726          value = (value & ~0xF) |
   6.727              (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
   6.728          return vmx_set_cr0(value);
   6.729 @@ -2943,7 +2801,7 @@ asmlinkage void vmx_vmexit_handler(struc
   6.730                  break;
   6.731              }
   6.732  
   6.733 -            v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
   6.734 +            v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
   6.735              vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
   6.736              break;
   6.737          case TRAP_nmi:
     7.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Wed Aug 08 12:26:21 2007 +0100
     7.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Wed Aug 08 12:27:23 2007 +0100
     7.3 @@ -74,7 +74,7 @@ ENTRY(vmx_asm_do_vmentry)
     7.4          jnz  vmx_process_softirqs
     7.5  
     7.6          call vmx_intr_assist
     7.7 -        movl VCPU_vmx_cr2(%ebx),%eax
     7.8 +        movl VCPU_hvm_guest_cr2(%ebx),%eax
     7.9          movl %eax,%cr2
    7.10          call vmx_trace_vmentry
    7.11  
     8.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Wed Aug 08 12:26:21 2007 +0100
     8.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Wed Aug 08 12:27:23 2007 +0100
     8.3 @@ -88,7 +88,7 @@ ENTRY(vmx_asm_do_vmentry)
     8.4          jnz   vmx_process_softirqs
     8.5  
     8.6          call vmx_intr_assist
     8.7 -        movq VCPU_vmx_cr2(%rbx),%rax
     8.8 +        movq VCPU_hvm_guest_cr2(%rbx),%rax
     8.9          movq %rax,%cr2
    8.10          call vmx_trace_vmentry
    8.11  
     9.1 --- a/xen/arch/x86/mm.c	Wed Aug 08 12:26:21 2007 +0100
     9.2 +++ b/xen/arch/x86/mm.c	Wed Aug 08 12:27:23 2007 +0100
     9.3 @@ -394,8 +394,8 @@ void write_ptbase(struct vcpu *v)
     9.4      write_cr3(v->arch.cr3);
     9.5  }
     9.6  
     9.7 -/* Should be called after CR3 is updated.
     9.8 - * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
     9.9 +/*
    9.10 + * Should be called after CR3 is updated.
    9.11   * 
    9.12   * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
    9.13   * for HVM guests, arch.monitor_table and hvm's guest CR3.
    10.1 --- a/xen/arch/x86/mm/hap/guest_walk.c	Wed Aug 08 12:26:21 2007 +0100
    10.2 +++ b/xen/arch/x86/mm/hap/guest_walk.c	Wed Aug 08 12:27:23 2007 +0100
    10.3 @@ -62,7 +62,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
    10.4  unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
    10.5      struct vcpu *v, unsigned long gva)
    10.6  {
    10.7 -    unsigned long gcr3 = hvm_get_guest_ctrl_reg(v, 3);
    10.8 +    unsigned long gcr3 = v->arch.hvm_vcpu.guest_cr[3];
    10.9      int mode = GUEST_PAGING_LEVELS;
   10.10      int lev, index;
   10.11      paddr_t gpa = 0;
    11.1 --- a/xen/arch/x86/mm/hap/hap.c	Wed Aug 08 12:26:21 2007 +0100
    11.2 +++ b/xen/arch/x86/mm/hap/hap.c	Wed Aug 08 12:27:23 2007 +0100
    11.3 @@ -603,48 +603,37 @@ static int hap_invlpg(struct vcpu *v, un
    11.4      return 0;
    11.5  }
    11.6  
    11.7 -/*
    11.8 - * HAP guests do not need to take any action on CR3 writes (they are still
    11.9 - * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
   11.10 - */
   11.11  static void hap_update_cr3(struct vcpu *v, int do_locking)
   11.12  {
   11.13 +    v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
   11.14 +    hvm_update_guest_cr(v, 3);
   11.15  }
   11.16  
   11.17  static void hap_update_paging_modes(struct vcpu *v)
   11.18  {
   11.19 -    struct domain *d;
   11.20 +    struct domain *d = v->domain;
   11.21  
   11.22 -    d = v->domain;
   11.23      hap_lock(d);
   11.24  
   11.25 -    /* update guest paging mode. Note that we rely on hvm functions to detect
   11.26 -     * guest's paging mode. So, make sure the shadow registers (CR0, CR4, EFER)
   11.27 -     * reflect guest's status correctly.
   11.28 -     */
   11.29 -    if ( hvm_paging_enabled(v) )
   11.30 -    {
   11.31 -        if ( hvm_long_mode_enabled(v) )
   11.32 -            v->arch.paging.mode = &hap_paging_long_mode;
   11.33 -        else if ( hvm_pae_enabled(v) )
   11.34 -            v->arch.paging.mode = &hap_paging_pae_mode;
   11.35 -        else
   11.36 -            v->arch.paging.mode = &hap_paging_protected_mode;
   11.37 -    }
   11.38 -    else
   11.39 -    {
   11.40 -        v->arch.paging.mode = &hap_paging_real_mode;
   11.41 -    }
   11.42 +    v->arch.paging.mode =
   11.43 +        !hvm_paging_enabled(v)   ? &hap_paging_real_mode :
   11.44 +        hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
   11.45 +        hvm_pae_enabled(v)       ? &hap_paging_pae_mode  :
   11.46 +                                   &hap_paging_protected_mode;
   11.47  
   11.48 -    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
   11.49 +    v->arch.paging.translate_enabled = hvm_paging_enabled(v);
   11.50  
   11.51      if ( pagetable_is_null(v->arch.monitor_table) )
   11.52      {
   11.53          mfn_t mmfn = hap_make_monitor_table(v);
   11.54          v->arch.monitor_table = pagetable_from_mfn(mmfn);
   11.55          make_cr3(v, mfn_x(mmfn));
   11.56 +        hvm_update_host_cr3(v);
   11.57      }
   11.58  
   11.59 +    /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
   11.60 +    hap_update_cr3(v, 0);
   11.61 +
   11.62      hap_unlock(d);
   11.63  }
   11.64  
    12.1 --- a/xen/arch/x86/mm/shadow/common.c	Wed Aug 08 12:26:21 2007 +0100
    12.2 +++ b/xen/arch/x86/mm/shadow/common.c	Wed Aug 08 12:27:23 2007 +0100
    12.3 @@ -2266,7 +2266,7 @@ static void sh_update_paging_modes(struc
    12.4          ASSERT(shadow_mode_translate(d));
    12.5          ASSERT(shadow_mode_external(d));
    12.6  
    12.7 -        v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
    12.8 +        v->arch.paging.translate_enabled = hvm_paging_enabled(v);
    12.9          if ( !v->arch.paging.translate_enabled )
   12.10          {
   12.11              /* Set v->arch.guest_table to use the p2m map, and choose
   12.12 @@ -2347,7 +2347,7 @@ static void sh_update_paging_modes(struc
   12.13              SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
   12.14                            "(was g=%u s=%u)\n",
   12.15                            d->domain_id, v->vcpu_id,
   12.16 -                          is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
   12.17 +                          is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
   12.18                            v->arch.paging.mode->guest_levels,
   12.19                            v->arch.paging.mode->shadow.shadow_levels,
   12.20                            old_mode ? old_mode->guest_levels : 0,
    13.1 --- a/xen/arch/x86/mm/shadow/multi.c	Wed Aug 08 12:26:21 2007 +0100
    13.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Wed Aug 08 12:27:23 2007 +0100
    13.3 @@ -175,7 +175,7 @@ guest_supports_superpages(struct vcpu *v
    13.4      /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
    13.5       * CR4.PSE is set or the guest is in PAE or long mode */
    13.6      return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2 
    13.7 -                             || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
    13.8 +                             || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)));
    13.9  }
   13.10  
   13.11  static inline int
   13.12 @@ -3483,7 +3483,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
   13.13   * Paravirtual guests should set v->arch.guest_table (and guest_table_user,
   13.14   * if appropriate).
   13.15   * HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
   13.16 - * this function will call hvm_update_guest_cr3() to tell them where the 
   13.17 + * this function will call hvm_update_guest_cr(v, 3) to tell them where the 
   13.18   * shadow tables are.
   13.19   * If do_locking != 0, assume we are being called from outside the 
   13.20   * shadow code, and must take and release the shadow lock; otherwise 
   13.21 @@ -3525,7 +3525,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
   13.22          // Is paging enabled on this vcpu?
   13.23          if ( paging_vcpu_mode_translate(v) )
   13.24          {
   13.25 -            gfn = _gfn(paddr_to_pfn(hvm_get_guest_ctrl_reg(v, 3)));
   13.26 +            gfn = _gfn(paddr_to_pfn(v->arch.hvm_vcpu.guest_cr[3]));
   13.27              gmfn = vcpu_gfn_to_mfn(v, gfn);
   13.28              ASSERT(mfn_valid(gmfn));
   13.29              ASSERT(pagetable_get_pfn(v->arch.guest_table) == mfn_x(gmfn));
   13.30 @@ -3576,11 +3576,11 @@ sh_update_cr3(struct vcpu *v, int do_loc
   13.31   
   13.32       if ( shadow_mode_external(d) && paging_vcpu_mode_translate(v) ) 
   13.33           /* Paging enabled: find where in the page the l3 table is */
   13.34 -         guest_idx = guest_index((void *)hvm_get_guest_ctrl_reg(v, 3));
   13.35 -    else
   13.36 -        /* Paging disabled or PV: l3 is at the start of a page */ 
   13.37 -        guest_idx = 0; 
   13.38 -     
   13.39 +         guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
   13.40 +     else
   13.41 +         /* Paging disabled or PV: l3 is at the start of a page */ 
   13.42 +         guest_idx = 0; 
   13.43 +
   13.44       // Ignore the low 2 bits of guest_idx -- they are really just
   13.45       // cache control.
   13.46       guest_idx &= ~3;
   13.47 @@ -3718,18 +3718,21 @@ sh_update_cr3(struct vcpu *v, int do_loc
   13.48  
   13.49  
   13.50      ///
   13.51 -    /// v->arch.hvm_vcpu.hw_cr3
   13.52 +    /// v->arch.hvm_vcpu.hw_cr[3]
   13.53      ///
   13.54      if ( shadow_mode_external(d) )
   13.55      {
   13.56          ASSERT(is_hvm_domain(d));
   13.57  #if SHADOW_PAGING_LEVELS == 3
   13.58          /* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
   13.59 -        hvm_update_guest_cr3(v, virt_to_maddr(&v->arch.paging.shadow.l3table));
   13.60 +        v->arch.hvm_vcpu.hw_cr[3] =
   13.61 +            virt_to_maddr(&v->arch.paging.shadow.l3table);
   13.62  #else
   13.63          /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
   13.64 -        hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.shadow_table[0]));
   13.65 +        v->arch.hvm_vcpu.hw_cr[3] =
   13.66 +            pagetable_get_paddr(v->arch.shadow_table[0]);
   13.67  #endif
   13.68 +        hvm_update_guest_cr(v, 3);
   13.69      }
   13.70  
   13.71      /* Fix up the linear pagetable mappings */
    14.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Wed Aug 08 12:26:21 2007 +0100
    14.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Wed Aug 08 12:27:23 2007 +0100
    14.3 @@ -85,7 +85,7 @@ void __dummy__(void)
    14.4      BLANK();
    14.5  
    14.6      OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    14.7 -    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
    14.8 +    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    14.9      BLANK();
   14.10  
   14.11      OFFSET(VMCB_rax, struct vmcb_struct, rax);
    15.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Wed Aug 08 12:26:21 2007 +0100
    15.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Wed Aug 08 12:27:23 2007 +0100
    15.3 @@ -88,7 +88,7 @@ void __dummy__(void)
    15.4      BLANK();
    15.5  
    15.6      OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    15.7 -    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
    15.8 +    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    15.9      BLANK();
   15.10  
   15.11      OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
    16.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Aug 08 12:26:21 2007 +0100
    16.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Aug 08 12:27:23 2007 +0100
    16.3 @@ -95,36 +95,26 @@ struct hvm_function_table {
    16.4  
    16.5      /*
    16.6       * Examine specifics of the guest state:
    16.7 -     * 1) determine whether paging is enabled,
    16.8 -     * 2) determine whether long mode is enabled,
    16.9 -     * 3) determine whether PAE paging is enabled,
   16.10 -     * 4) determine whether NX is enabled,
   16.11 -     * 5) determine whether interrupts are enabled or not,
   16.12 -     * 6) determine the mode the guest is running in,
   16.13 -     * 7) return the current guest control-register value
   16.14 -     * 8) return the current guest segment descriptor base
   16.15 -     * 9) return the current guest segment descriptor
   16.16 +     * 1) determine whether interrupts are enabled or not
   16.17 +     * 2) determine the mode the guest is running in
   16.18 +     * 3) return the current guest segment descriptor base
   16.19 +     * 4) return the current guest segment descriptor
   16.20       */
   16.21 -    int (*paging_enabled)(struct vcpu *v);
   16.22 -    int (*long_mode_enabled)(struct vcpu *v);
   16.23 -    int (*pae_enabled)(struct vcpu *v);
   16.24 -    int (*nx_enabled)(struct vcpu *v);
   16.25      int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
   16.26      int (*guest_x86_mode)(struct vcpu *v);
   16.27 -    unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
   16.28      unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
   16.29      void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
   16.30                                   struct segment_register *reg);
   16.31  
   16.32      /* 
   16.33 -     * Re-set the value of CR3 that Xen runs on when handling VM exits
   16.34 +     * Re-set the value of CR3 that Xen runs on when handling VM exits.
   16.35       */
   16.36      void (*update_host_cr3)(struct vcpu *v);
   16.37  
   16.38      /*
   16.39 -     * Called to inform HVM layer that a guest cr3 has changed
   16.40 +     * Called to inform HVM layer that a guest control register has changed.
   16.41       */
   16.42 -    void (*update_guest_cr3)(struct vcpu *v);
   16.43 +    void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
   16.44  
   16.45      /*
   16.46       * Called to ensure than all guest-specific mappings in a tagged TLB
   16.47 @@ -189,41 +179,27 @@ hvm_load_cpu_guest_regs(struct vcpu *v, 
   16.48  void hvm_set_guest_time(struct vcpu *v, u64 gtime);
   16.49  u64 hvm_get_guest_time(struct vcpu *v);
   16.50  
   16.51 -static inline int
   16.52 -hvm_paging_enabled(struct vcpu *v)
   16.53 -{
   16.54 -    return hvm_funcs.paging_enabled(v);
   16.55 -}
   16.56 +#define hvm_paging_enabled(v) \
   16.57 +    (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
   16.58 +#define hvm_pae_enabled(v) \
   16.59 +    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
   16.60 +#define hvm_nx_enabled(v) \
   16.61 +    (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
   16.62  
   16.63  #ifdef __x86_64__
   16.64 -static inline int
   16.65 -hvm_long_mode_enabled(struct vcpu *v)
   16.66 -{
   16.67 -    return hvm_funcs.long_mode_enabled(v);
   16.68 -}
   16.69 +#define hvm_long_mode_enabled(v) \
   16.70 +    ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
   16.71  #else
   16.72  #define hvm_long_mode_enabled(v) (v,0)
   16.73  #endif
   16.74  
   16.75  static inline int
   16.76 -hvm_pae_enabled(struct vcpu *v)
   16.77 -{
   16.78 -    return hvm_funcs.pae_enabled(v);
   16.79 -}
   16.80 -
   16.81 -static inline int
   16.82  hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
   16.83  {
   16.84      return hvm_funcs.interrupts_enabled(v, type);
   16.85  }
   16.86  
   16.87  static inline int
   16.88 -hvm_nx_enabled(struct vcpu *v)
   16.89 -{
   16.90 -    return hvm_funcs.nx_enabled(v);
   16.91 -}
   16.92 -
   16.93 -static inline int
   16.94  hvm_guest_x86_mode(struct vcpu *v)
   16.95  {
   16.96      return hvm_funcs.guest_x86_mode(v);
   16.97 @@ -244,7 +220,10 @@ hvm_update_vtpr(struct vcpu *v, unsigned
   16.98      hvm_funcs.update_vtpr(v, value);
   16.99  }
  16.100  
  16.101 -void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
  16.102 +static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
  16.103 +{
  16.104 +    hvm_funcs.update_guest_cr(v, cr);
  16.105 +}
  16.106  
  16.107  static inline void 
  16.108  hvm_flush_guest_tlbs(void)
  16.109 @@ -257,12 +236,6 @@ void hvm_hypercall_page_initialise(struc
  16.110                                     void *hypercall_page);
  16.111  
  16.112  static inline unsigned long
  16.113 -hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
  16.114 -{
  16.115 -    return hvm_funcs.get_guest_ctrl_reg(v, num);
  16.116 -}
  16.117 -
  16.118 -static inline unsigned long
  16.119  hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
  16.120  {
  16.121      return hvm_funcs.get_segment_base(v, seg);
    17.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Aug 08 12:26:21 2007 +0100
    17.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Aug 08 12:27:23 2007 +0100
    17.3 @@ -234,4 +234,7 @@ int hvm_do_hypercall(struct cpu_user_reg
    17.4  void hvm_hlt(unsigned long rflags);
    17.5  void hvm_triple_fault(void);
    17.6  
    17.7 +int hvm_set_cr3(unsigned long value);
    17.8 +int hvm_set_cr4(unsigned long value);
    17.9 +
   17.10  #endif /* __ASM_X86_HVM_SUPPORT_H__ */
    18.1 --- a/xen/include/asm-x86/hvm/svm/asid.h	Wed Aug 08 12:26:21 2007 +0100
    18.2 +++ b/xen/include/asm-x86/hvm/svm/asid.h	Wed Aug 08 12:27:23 2007 +0100
    18.3 @@ -32,20 +32,6 @@ void svm_asid_init_vcpu(struct vcpu *v);
    18.4  void svm_asid_inv_asid(struct vcpu *v);
    18.5  void svm_asid_inc_generation(void);
    18.6  
    18.7 -/*
    18.8 - * ASID related, guest triggered events.
    18.9 - */
   18.10 -
   18.11 -static inline void svm_asid_g_update_paging(struct vcpu *v)
   18.12 -{
   18.13 -    svm_asid_inv_asid(v);
   18.14 -}
   18.15 -
   18.16 -static inline void svm_asid_g_mov_to_cr3(struct vcpu *v)
   18.17 -{
   18.18 -    svm_asid_inv_asid(v);
   18.19 -}
   18.20 -
   18.21  static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
   18.22  {
   18.23  #if 0
    19.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Wed Aug 08 12:26:21 2007 +0100
    19.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Wed Aug 08 12:27:23 2007 +0100
    19.3 @@ -440,11 +440,6 @@ struct arch_svm_struct {
    19.4      u32                *msrpm;
    19.5      int                 launch_core;
    19.6      bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
    19.7 -    unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */
    19.8 -    unsigned long       cpu_shadow_cr4;   /* Guest value for CR4 */
    19.9 -    unsigned long       cpu_shadow_efer;  /* Guest value for EFER */
   19.10 -    unsigned long       cpu_cr2;
   19.11 -    unsigned long       cpu_cr3;
   19.12  };
   19.13  
   19.14  struct vmcb_struct *alloc_vmcb(void);
    20.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Wed Aug 08 12:26:21 2007 +0100
    20.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Wed Aug 08 12:27:23 2007 +0100
    20.3 @@ -29,7 +29,17 @@
    20.4  #define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI     1
    20.5  
    20.6  struct hvm_vcpu {
    20.7 -    unsigned long       hw_cr3;     /* value we give to HW to use */
    20.8 +    /* Guest control-register and EFER values, just as the guest sees them. */
    20.9 +    unsigned long       guest_cr[5];
   20.10 +    unsigned long       guest_efer;
   20.11 +
   20.12 +    /*
   20.13 +     * Processor-visible CR0-4 while guest executes.
   20.14 +     * Only CR3 is guaranteed to be valid: all other array entries are private
   20.15 +     * to the specific HVM implementation (e.g., VMX, SVM).
   20.16 +     */
   20.17 +    unsigned long       hw_cr[5];
   20.18 +
   20.19      struct hvm_io_op    io_op;
   20.20      struct vlapic       vlapic;
   20.21      s64                 cache_tsc_offset;
    21.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Aug 08 12:26:21 2007 +0100
    21.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Aug 08 12:27:23 2007 +0100
    21.3 @@ -67,17 +67,11 @@ struct arch_vmx_struct {
    21.4      /* Cache of cpu execution control. */
    21.5      u32                  exec_control;
    21.6  
    21.7 -    unsigned long        cpu_cr0; /* copy of guest CR0 */
    21.8 -    unsigned long        cpu_shadow_cr0; /* copy of guest read shadow CR0 */
    21.9 -    unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
   21.10 -    unsigned long        cpu_cr2; /* save CR2 */
   21.11 -    unsigned long        cpu_cr3;
   21.12  #ifdef __x86_64__
   21.13      struct vmx_msr_state msr_state;
   21.14      unsigned long        shadow_gs;
   21.15      unsigned long        cstar;
   21.16  #endif
   21.17 -    unsigned long        efer;
   21.18  
   21.19      /* Following fields are all specific to vmxassist. */
   21.20      unsigned long        vmxassist_enabled:1;
    22.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Wed Aug 08 12:26:21 2007 +0100
    22.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Wed Aug 08 12:27:23 2007 +0100
    22.3 @@ -279,8 +279,8 @@ static inline void __vmx_inject_exceptio
    22.4  
    22.5      __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
    22.6  
    22.7 -    if (trap == TRAP_page_fault)
    22.8 -        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
    22.9 +    if ( trap == TRAP_page_fault )
   22.10 +        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
   22.11      else
   22.12          HVMTRACE_2D(INJ_EXC, v, trap, error_code);
   22.13  }