ia64/xen-unstable

changeset 12263:1db00df48218

[VMX] Get rid of special vm_launch schedule tail.
This required various hacking, including getting rid
of implicit vcpu==current assumption in __vmwrite()
and a couple of tweaks to the shadow code.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Nov 06 15:40:30 2006 +0000 (2006-11-06)
parents b2668cc03914
children a910bf123e58
files xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Nov 06 13:13:04 2006 +0000
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Mon Nov 06 15:40:30 2006 +0000
     1.3 @@ -285,12 +285,9 @@ static void vmx_set_host_env(struct vcpu
     1.4      error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
     1.5  }
     1.6  
     1.7 -/* Update CR3, CR0, CR4, GDT, LDT, TR */
     1.8 +#if 0
     1.9  static void vmx_do_launch(struct vcpu *v)
    1.10  {
    1.11 -    unsigned int  error = 0;
    1.12 -    unsigned long cr0, cr4;
    1.13 -
    1.14      if ( v->vcpu_id != 0 )
    1.15      {
    1.16          /* Sync AP's TSC with BSP's */
    1.17 @@ -298,62 +295,13 @@ static void vmx_do_launch(struct vcpu *v
    1.18              v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
    1.19          hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
    1.20      }
    1.21 -
    1.22 -    __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
    1.23 -
    1.24 -    error |= __vmwrite(GUEST_CR0, cr0);
    1.25 -    cr0 &= ~X86_CR0_PG;
    1.26 -    error |= __vmwrite(CR0_READ_SHADOW, cr0);
    1.27 -    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
    1.28 -    v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
    1.29 -
    1.30 -    __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
    1.31 -
    1.32 -    error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
    1.33 -    cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
    1.34 -
    1.35 -    error |= __vmwrite(CR4_READ_SHADOW, cr4);
    1.36 -
    1.37 -    hvm_stts(v);
    1.38 -
    1.39 -    if ( vlapic_init(v) == 0 )
    1.40 -    {
    1.41 -#ifdef __x86_64__ 
    1.42 -        u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
    1.43 -        u64  vapic_page_addr = 
    1.44 -                        page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page);
    1.45 -
    1.46 -        *cpu_exec_control   |= CPU_BASED_TPR_SHADOW;
    1.47 -        *cpu_exec_control   &= ~CPU_BASED_CR8_STORE_EXITING;
    1.48 -        *cpu_exec_control   &= ~CPU_BASED_CR8_LOAD_EXITING;
    1.49 -        error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
    1.50 -        error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR, vapic_page_addr);
    1.51 -        error |= __vmwrite(TPR_THRESHOLD, 0);
    1.52 +}
    1.53  #endif
    1.54 -    }
    1.55 -
    1.56 -    vmx_set_host_env(v);
    1.57 -    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
    1.58 -
    1.59 -    error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
    1.60 -    error |= __vmwrite(GUEST_LDTR_BASE, 0);
    1.61 -    error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
    1.62 -
    1.63 -    error |= __vmwrite(GUEST_TR_BASE, 0);
    1.64 -    error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
    1.65 -
    1.66 -    shadow_update_paging_modes(v);
    1.67 -
    1.68 -    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
    1.69 -    __vmwrite(HOST_CR3, v->arch.cr3);
    1.70 -
    1.71 -    v->arch.schedule_tail = arch_vmx_do_resume;
    1.72 -}
    1.73  
    1.74  static int construct_vmcs(struct vcpu *v)
    1.75  {
    1.76      int error = 0;
    1.77 -    unsigned long tmp;
    1.78 +    unsigned long tmp, cr0, cr4;
    1.79      union vmcs_arbytes arbytes;
    1.80  
    1.81      vmx_vmcs_enter(v);
    1.82 @@ -362,6 +310,8 @@ static int construct_vmcs(struct vcpu *v
    1.83      error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
    1.84      error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
    1.85      error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
    1.86 +    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
    1.87 +    v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
    1.88  
    1.89      /* Host data selectors. */
    1.90      error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
    1.91 @@ -465,6 +415,48 @@ static int construct_vmcs(struct vcpu *v
    1.92      error |= __vmwrite(EXCEPTION_BITMAP,
    1.93                         MONITOR_DEFAULT_EXCEPTION_BITMAP);
    1.94  
    1.95 +    /* Guest CR0. */
    1.96 +    cr0 = read_cr0();
    1.97 +    v->arch.hvm_vmx.cpu_cr0 = cr0;
    1.98 +    error |= __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
    1.99 +    v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
   1.100 +    error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   1.101 +
   1.102 +    /* Guest CR4. */
   1.103 +    cr4 = read_cr4();
   1.104 +    error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
   1.105 +    v->arch.hvm_vmx.cpu_shadow_cr4 =
   1.106 +        cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
   1.107 +    error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   1.108 +
   1.109 +    /* XXX Move this out. */
   1.110 +    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
   1.111 +    if ( vlapic_init(v) != 0 )
   1.112 +        return -1;
   1.113 +
   1.114 +#ifdef __x86_64__ 
   1.115 +    /* VLAPIC TPR optimisation. */
   1.116 +    v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
   1.117 +    v->arch.hvm_vcpu.u.vmx.exec_control &=
   1.118 +        ~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
   1.119 +    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   1.120 +                       v->arch.hvm_vcpu.u.vmx.exec_control);
   1.121 +    error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
   1.122 +                       page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page));
   1.123 +    error |= __vmwrite(TPR_THRESHOLD, 0);
   1.124 +#endif
   1.125 +
   1.126 +    error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
   1.127 +    error |= __vmwrite(GUEST_LDTR_BASE, 0);
   1.128 +    error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
   1.129 +
   1.130 +    error |= __vmwrite(GUEST_TR_BASE, 0);
   1.131 +    error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
   1.132 +
   1.133 +    shadow_update_paging_modes(v);
   1.134 +    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
   1.135 +    __vmwrite(HOST_CR3, v->arch.cr3);
   1.136 +
   1.137      vmx_vmcs_exit(v);
   1.138  
   1.139      return error;
   1.140 @@ -534,14 +526,6 @@ void arch_vmx_do_resume(struct vcpu *v)
   1.141      reset_stack_and_jump(vmx_asm_do_vmentry);
   1.142  }
   1.143  
   1.144 -void arch_vmx_do_launch(struct vcpu *v)
   1.145 -{
   1.146 -    vmx_load_vmcs(v);
   1.147 -    vmx_do_launch(v);
   1.148 -    reset_stack_and_jump(vmx_asm_do_vmentry);
   1.149 -}
   1.150 -
   1.151 -
   1.152  /* Dump a section of VMCS */
   1.153  static void print_section(char *header, uint32_t start, 
   1.154                            uint32_t end, int incr)
     2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Nov 06 13:13:04 2006 +0000
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Nov 06 15:40:30 2006 +0000
     2.3 @@ -59,7 +59,7 @@ static int vmx_vcpu_initialise(struct vc
     2.4  
     2.5      spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
     2.6  
     2.7 -    v->arch.schedule_tail    = arch_vmx_do_launch;
     2.8 +    v->arch.schedule_tail    = arch_vmx_do_resume;
     2.9      v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
    2.10      v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
    2.11  
    2.12 @@ -474,10 +474,10 @@ static void vmx_store_cpu_guest_regs(
    2.13  
    2.14      if ( crs != NULL )
    2.15      {
    2.16 -        __vmread(CR0_READ_SHADOW, &crs[0]);
    2.17 +        crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
    2.18          crs[2] = v->arch.hvm_vmx.cpu_cr2;
    2.19          __vmread(GUEST_CR3, &crs[3]);
    2.20 -        __vmread(CR4_READ_SHADOW, &crs[4]);
    2.21 +        crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
    2.22      }
    2.23  
    2.24      vmx_vmcs_exit(v);
    2.25 @@ -570,8 +570,6 @@ static unsigned long vmx_get_ctrl_reg(st
    2.26  /* Make sure that xen intercepts any FP accesses from current */
    2.27  static void vmx_stts(struct vcpu *v)
    2.28  {
    2.29 -    unsigned long cr0;
    2.30 -
    2.31      /* VMX depends on operating on the current vcpu */
    2.32      ASSERT(v == current);
    2.33  
    2.34 @@ -581,11 +579,10 @@ static void vmx_stts(struct vcpu *v)
    2.35       * then this is not necessary: no FPU activity can occur until the guest
    2.36       * clears CR0.TS, and we will initialise the FPU when that happens.
    2.37       */
    2.38 -    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
    2.39 -    if ( !(cr0 & X86_CR0_TS) )
    2.40 +    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
    2.41      {
    2.42 -        __vmread_vcpu(v, GUEST_CR0, &cr0);
    2.43 -        __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
    2.44 +        v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
    2.45 +        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
    2.46          __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
    2.47      }
    2.48  }
    2.49 @@ -662,6 +659,12 @@ static int vmx_guest_x86_mode(struct vcp
    2.50              X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16);
    2.51  }
    2.52  
    2.53 +static int vmx_pae_enabled(struct vcpu *v)
    2.54 +{
    2.55 +    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
    2.56 +    return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
    2.57 +}
    2.58 +
    2.59  /* Setup HVM interfaces */
    2.60  static void vmx_setup_hvm_funcs(void)
    2.61  {
    2.62 @@ -811,19 +814,16 @@ static int vmx_do_page_fault(unsigned lo
    2.63  
    2.64  static void vmx_do_no_device_fault(void)
    2.65  {
    2.66 -    unsigned long cr0;
    2.67      struct vcpu *v = current;
    2.68  
    2.69      setup_fpu(current);
    2.70      __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
    2.71  
    2.72      /* Disable TS in guest CR0 unless the guest wants the exception too. */
    2.73 -    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
    2.74 -    if ( !(cr0 & X86_CR0_TS) )
    2.75 +    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
    2.76      {
    2.77 -        __vmread_vcpu(v, GUEST_CR0, &cr0);
    2.78 -        cr0 &= ~X86_CR0_TS;
    2.79 -        __vmwrite(GUEST_CR0, cr0);
    2.80 +        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
    2.81 +        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
    2.82      }
    2.83  }
    2.84  
    2.85 @@ -1158,9 +1158,9 @@ static int vmx_world_save(struct vcpu *v
    2.86      error |= __vmread(GUEST_RSP, &c->esp);
    2.87      error |= __vmread(GUEST_RFLAGS, &c->eflags);
    2.88  
    2.89 -    error |= __vmread(CR0_READ_SHADOW, &c->cr0);
    2.90 +    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    2.91      c->cr3 = v->arch.hvm_vmx.cpu_cr3;
    2.92 -    error |= __vmread(CR4_READ_SHADOW, &c->cr4);
    2.93 +    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
    2.94  
    2.95      error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
    2.96      error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
    2.97 @@ -1220,7 +1220,8 @@ static int vmx_world_restore(struct vcpu
    2.98      error |= __vmwrite(GUEST_RSP, c->esp);
    2.99      error |= __vmwrite(GUEST_RFLAGS, c->eflags);
   2.100  
   2.101 -    error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
   2.102 +    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
   2.103 +    error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   2.104  
   2.105      if (!vmx_paging_enabled(v))
   2.106          goto skip_cr3;
   2.107 @@ -1270,7 +1271,8 @@ static int vmx_world_restore(struct vcpu
   2.108          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
   2.109  
   2.110      error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
   2.111 -    error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
   2.112 +    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
   2.113 +    error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   2.114  
   2.115      error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   2.116      error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   2.117 @@ -1408,7 +1410,7 @@ static int vmx_set_cr0(unsigned long val
   2.118      /*
   2.119       * CR0: We don't want to lose PE and PG.
   2.120       */
   2.121 -    __vmread_vcpu(v, CR0_READ_SHADOW, &old_cr0);
   2.122 +    old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   2.123      paging_enabled = (old_cr0 & X86_CR0_PE) && (old_cr0 & X86_CR0_PG);
   2.124  
   2.125      /* TS cleared? Then initialise FPU now. */
   2.126 @@ -1418,8 +1420,11 @@ static int vmx_set_cr0(unsigned long val
   2.127          __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
   2.128      }
   2.129  
   2.130 -    __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE);
   2.131 -    __vmwrite(CR0_READ_SHADOW, value);
   2.132 +    v->arch.hvm_vmx.cpu_cr0 = value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE;
   2.133 +    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   2.134 +
   2.135 +    v->arch.hvm_vmx.cpu_shadow_cr0 = value;
   2.136 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   2.137  
   2.138      HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
   2.139  
   2.140 @@ -1655,9 +1660,9 @@ static int mov_to_cr(int gp, int cr, str
   2.141      }
   2.142      case 4: /* CR4 */
   2.143      {
   2.144 -        __vmread(CR4_READ_SHADOW, &old_cr);
   2.145 +        old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
   2.146  
   2.147 -        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
   2.148 +        if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
   2.149          {
   2.150              if ( vmx_pgbit_test(v) )
   2.151              {
   2.152 @@ -1706,7 +1711,8 @@ static int mov_to_cr(int gp, int cr, str
   2.153          }
   2.154  
   2.155          __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
   2.156 -        __vmwrite(CR4_READ_SHADOW, value);
   2.157 +        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
   2.158 +        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   2.159  
   2.160          /*
   2.161           * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
   2.162 @@ -1804,16 +1810,14 @@ static int vmx_cr_access(unsigned long e
   2.163          setup_fpu(v);
   2.164          __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
   2.165  
   2.166 -        __vmread_vcpu(v, GUEST_CR0, &value);
   2.167 -        value &= ~X86_CR0_TS; /* clear TS */
   2.168 -        __vmwrite(GUEST_CR0, value);
   2.169 +        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
   2.170 +        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   2.171  
   2.172 -        __vmread_vcpu(v, CR0_READ_SHADOW, &value);
   2.173 -        value &= ~X86_CR0_TS; /* clear TS */
   2.174 -        __vmwrite(CR0_READ_SHADOW, value);
   2.175 +        v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
   2.176 +        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   2.177          break;
   2.178      case TYPE_LMSW:
   2.179 -        __vmread_vcpu(v, CR0_READ_SHADOW, &value);
   2.180 +        value = v->arch.hvm_vmx.cpu_shadow_cr0;
   2.181          value = (value & ~0xF) |
   2.182              (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
   2.183          TRACE_VMEXIT(1, TYPE_LMSW);
     3.1 --- a/xen/arch/x86/mm/shadow/common.c	Mon Nov 06 13:13:04 2006 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/common.c	Mon Nov 06 15:40:30 2006 +0000
     3.3 @@ -2273,15 +2273,6 @@ void sh_update_paging_modes(struct vcpu 
     3.4      //     - changes in CR0.PG, CR4.PAE, CR4.PSE, or CR4.PGE
     3.5      //
     3.6  
     3.7 -    // Avoid determining the current shadow mode for uninitialized CPUs, as
     3.8 -    // we can not yet determine whether it is an HVM or PV domain.
     3.9 -    //
    3.10 -    if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
    3.11 -    {
    3.12 -        SHADOW_PRINTK("%s: postponing determination of shadow mode\n", __func__);
    3.13 -        return;
    3.14 -    }
    3.15 -
    3.16      // First, tear down any old shadow tables held by this vcpu.
    3.17      //
    3.18      shadow_detach_old_tables(v);
    3.19 @@ -2316,7 +2307,6 @@ void sh_update_paging_modes(struct vcpu 
    3.20          v->arch.shadow.translate_enabled = !!hvm_paging_enabled(v);
    3.21          if ( !v->arch.shadow.translate_enabled )
    3.22          {
    3.23 -            
    3.24              /* Set v->arch.guest_table to use the p2m map, and choose
    3.25               * the appropriate shadow mode */
    3.26              old_guest_table = pagetable_get_mfn(v->arch.guest_table);
     4.1 --- a/xen/arch/x86/mm/shadow/multi.c	Mon Nov 06 13:13:04 2006 +0000
     4.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Mon Nov 06 15:40:30 2006 +0000
     4.3 @@ -3357,7 +3357,7 @@ sh_update_cr3(struct vcpu *v)
     4.4  
     4.5      sh_detach_old_tables(v);
     4.6  
     4.7 -    if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
     4.8 +    if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
     4.9      {
    4.10          ASSERT(v->arch.cr3 == 0);
    4.11          return;
     5.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Mon Nov 06 13:13:04 2006 +0000
     5.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Mon Nov 06 15:40:30 2006 +0000
     5.3 @@ -76,7 +76,6 @@ struct arch_vmx_struct {
     5.4      unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
     5.5      unsigned long        cpu_cr2; /* save CR2 */
     5.6      unsigned long        cpu_cr3;
     5.7 -    unsigned long        cpu_based_exec_control;
     5.8      struct vmx_msr_state msr_content;
     5.9      unsigned long        vmxassist_enabled:1; 
    5.10  };
     6.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Nov 06 13:13:04 2006 +0000
     6.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Nov 06 15:40:30 2006 +0000
     6.3 @@ -30,7 +30,6 @@ extern void vmx_asm_vmexit_handler(struc
     6.4  extern void vmx_asm_do_vmentry(void);
     6.5  extern void vmx_intr_assist(void);
     6.6  extern void vmx_migrate_timers(struct vcpu *v);
     6.7 -extern void arch_vmx_do_launch(struct vcpu *);
     6.8  extern void arch_vmx_do_resume(struct vcpu *);
     6.9  extern void set_guest_time(struct vcpu *v, u64 gtime);
    6.10  
    6.11 @@ -220,54 +219,8 @@ static always_inline int ___vmread(
    6.12      return rc;
    6.13  }
    6.14  
    6.15 -
    6.16 -static always_inline void __vmwrite_vcpu(
    6.17 -    struct vcpu *v, unsigned long field, unsigned long value)
    6.18 -{
    6.19 -    switch ( field ) {
    6.20 -    case CR0_READ_SHADOW:
    6.21 -        v->arch.hvm_vmx.cpu_shadow_cr0 = value;
    6.22 -        break;
    6.23 -    case GUEST_CR0:
    6.24 -        v->arch.hvm_vmx.cpu_cr0 = value;
    6.25 -        break;
    6.26 -    case CR4_READ_SHADOW:
    6.27 -        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
    6.28 -        break;
    6.29 -    case CPU_BASED_VM_EXEC_CONTROL:
    6.30 -        v->arch.hvm_vmx.cpu_based_exec_control = value;
    6.31 -        break;
    6.32 -    default:
    6.33 -        printk("__vmwrite_cpu: invalid field %lx\n", field);
    6.34 -        break;
    6.35 -    }
    6.36 -}
    6.37 -
    6.38 -static always_inline void __vmread_vcpu(
    6.39 -    struct vcpu *v, unsigned long field, unsigned long *value)
    6.40 -{
    6.41 -    switch ( field ) {
    6.42 -    case CR0_READ_SHADOW:
    6.43 -        *value = v->arch.hvm_vmx.cpu_shadow_cr0;
    6.44 -        break;
    6.45 -    case GUEST_CR0:
    6.46 -        *value = v->arch.hvm_vmx.cpu_cr0;
    6.47 -        break;
    6.48 -    case CR4_READ_SHADOW:
    6.49 -        *value = v->arch.hvm_vmx.cpu_shadow_cr4;
    6.50 -        break;
    6.51 -    case CPU_BASED_VM_EXEC_CONTROL:
    6.52 -        *value = v->arch.hvm_vmx.cpu_based_exec_control;
    6.53 -        break;
    6.54 -    default:
    6.55 -        printk("__vmread_vcpu: invalid field %lx\n", field);
    6.56 -        break;
    6.57 -    }
    6.58 -}
    6.59 -
    6.60  static inline int __vmwrite(unsigned long field, unsigned long value)
    6.61  {
    6.62 -    struct vcpu *v = current;
    6.63      int rc;
    6.64  
    6.65      __asm__ __volatile__ ( VMWRITE_OPCODE
    6.66 @@ -278,15 +231,6 @@ static inline int __vmwrite(unsigned lon
    6.67                             : "0" (0), "a" (field) , "c" (value)
    6.68                             : "memory");
    6.69  
    6.70 -    switch ( field ) {
    6.71 -    case CR0_READ_SHADOW:
    6.72 -    case GUEST_CR0:
    6.73 -    case CR4_READ_SHADOW:
    6.74 -    case CPU_BASED_VM_EXEC_CONTROL:
    6.75 -        __vmwrite_vcpu(v, field, value);
    6.76 -        break;
    6.77 -    }
    6.78 -
    6.79      return rc;
    6.80  }
    6.81  
    6.82 @@ -337,18 +281,10 @@ static inline int __vmxon (u64 addr)
    6.83  
    6.84  static inline int vmx_paging_enabled(struct vcpu *v)
    6.85  {
    6.86 -    unsigned long cr0;
    6.87 -    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
    6.88 +    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    6.89      return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));
    6.90  }
    6.91  
    6.92 -static inline int vmx_pae_enabled(struct vcpu *v)
    6.93 -{
    6.94 -    unsigned long cr4;
    6.95 -    __vmread_vcpu(v, CR4_READ_SHADOW, &cr4);
    6.96 -    return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
    6.97 -}
    6.98 -
    6.99  static inline int vmx_long_mode_enabled(struct vcpu *v)
   6.100  {
   6.101      u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
   6.102 @@ -370,9 +306,7 @@ static inline void vmx_update_host_cr3(s
   6.103  
   6.104  static inline int vmx_pgbit_test(struct vcpu *v)
   6.105  {
   6.106 -    unsigned long cr0;
   6.107 -
   6.108 -    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
   6.109 +    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   6.110      return (cr0 & X86_CR0_PG);
   6.111  }
   6.112