direct-io.hg

changeset 15498:41c8284cfc0c

vms: A cleanup to vmx.c:
1) move some functions from vmx.h to vmx.c, since they are private.
2) adjust vmx_set_cr0 handling.
3) majorly coding style tidying.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Sat Jul 07 11:08:57 2007 +0100 (2007-07-07)
parents 8528da5be577
children 50c18666d660
files xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Sat Jul 07 10:31:15 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Sat Jul 07 11:08:57 2007 +0100
     1.3 @@ -98,8 +98,63 @@ static void vmx_vcpu_destroy(struct vcpu
     1.4      vmx_destroy_vmcs(v);
     1.5  }
     1.6  
     1.7 +static int vmx_paging_enabled(struct vcpu *v)
     1.8 +{
     1.9 +    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    1.10 +    return (cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
    1.11 +}
    1.12 +
    1.13 +static int vmx_pgbit_test(struct vcpu *v)
    1.14 +{
    1.15 +    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    1.16 +    return cr0 & X86_CR0_PG;
    1.17 +}
    1.18 +
    1.19 +static int vmx_pae_enabled(struct vcpu *v)
    1.20 +{
    1.21 +    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
    1.22 +    return vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE);
    1.23 +}
    1.24 +
    1.25 +static int vmx_nx_enabled(struct vcpu *v)
    1.26 +{
    1.27 +    return v->arch.hvm_vmx.efer & EFER_NX;
    1.28 +}
    1.29 +
    1.30  #ifdef __x86_64__
    1.31  
    1.32 +static int vmx_lme_is_set(struct vcpu *v)
    1.33 +{
    1.34 +    return v->arch.hvm_vmx.efer & EFER_LME;
    1.35 +}
    1.36 +
    1.37 +static int vmx_long_mode_enabled(struct vcpu *v)
    1.38 +{
    1.39 +    return v->arch.hvm_vmx.efer & EFER_LMA;
    1.40 +}
    1.41 +
    1.42 +static void vmx_enable_long_mode(struct vcpu *v)
    1.43 +{
    1.44 +    unsigned long vm_entry_value;
    1.45 +
    1.46 +    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
    1.47 +    vm_entry_value |= VM_ENTRY_IA32E_MODE;
    1.48 +    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
    1.49 +
    1.50 +    v->arch.hvm_vmx.efer |= EFER_LMA;
    1.51 +}
    1.52 +
    1.53 +static void vmx_disable_long_mode(struct vcpu *v)
    1.54 +{
    1.55 +    unsigned long vm_entry_value;
    1.56 +
    1.57 +    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
    1.58 +    vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
    1.59 +    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
    1.60 +
    1.61 +    v->arch.hvm_vmx.efer &= ~EFER_LMA;
    1.62 +}
    1.63 +
    1.64  static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
    1.65  
    1.66  static u32 msr_index[VMX_MSR_COUNT] =
    1.67 @@ -123,7 +178,7 @@ static void vmx_save_host_msrs(void)
    1.68          set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags);     \
    1.69          break
    1.70  
    1.71 -static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
    1.72 +static int long_mode_do_msr_read(struct cpu_user_regs *regs)
    1.73  {
    1.74      u64 msr_content = 0;
    1.75      u32 ecx = regs->ecx;
    1.76 @@ -181,7 +236,7 @@ static inline int long_mode_do_msr_read(
    1.77      return 1;
    1.78  }
    1.79  
    1.80 -static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
    1.81 +static int long_mode_do_msr_write(struct cpu_user_regs *regs)
    1.82  {
    1.83      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
    1.84      u32 ecx = regs->ecx;
    1.85 @@ -300,6 +355,7 @@ static void vmx_restore_host_msrs(void)
    1.86          wrmsrl(msr_index[i], host_msr_state->msrs[i]);
    1.87          clear_bit(i, &host_msr_state->flags);
    1.88      }
    1.89 +
    1.90      if ( cpu_has_nx && !(read_efer() & EFER_NX) )
    1.91          write_efer(read_efer() | EFER_NX);
    1.92  }
    1.93 @@ -323,7 +379,8 @@ static void vmx_restore_guest_msrs(struc
    1.94  
    1.95      guest_flags = guest_msr_state->flags;
    1.96  
    1.97 -    while ( guest_flags ) {
    1.98 +    while ( guest_flags )
    1.99 +    {
   1.100          i = find_first_set_bit(guest_flags);
   1.101  
   1.102          HVM_DBG_LOG(DBG_LEVEL_2,
   1.103 @@ -334,18 +391,27 @@ static void vmx_restore_guest_msrs(struc
   1.104          clear_bit(i, &guest_flags);
   1.105      }
   1.106  
   1.107 -    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX|EFER_SCE) )
   1.108 +    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
   1.109      {
   1.110          HVM_DBG_LOG(DBG_LEVEL_2,
   1.111                      "restore guest's EFER with value %lx",
   1.112                      v->arch.hvm_vmx.efer);
   1.113 -        write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
   1.114 -                   (v->arch.hvm_vmx.efer & (EFER_NX|EFER_SCE)));
   1.115 +        write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
   1.116 +                   (v->arch.hvm_vmx.efer & (EFER_NX | EFER_SCE)));
   1.117      }
   1.118  }
   1.119  
   1.120  #else  /* __i386__ */
   1.121  
   1.122 +static int vmx_lme_is_set(struct vcpu *v)
   1.123 +{ return 0; }
   1.124 +static int vmx_long_mode_enabled(struct vcpu *v)
   1.125 +{ return 0; }
   1.126 +static void vmx_enable_long_mode(struct vcpu *v)
   1.127 +{ BUG(); }
   1.128 +static void vmx_disable_long_mode(struct vcpu *v)
   1.129 +{ BUG(); }
   1.130 +
   1.131  #define vmx_save_host_msrs()        ((void)0)
   1.132  
   1.133  static void vmx_restore_host_msrs(void)
   1.134 @@ -368,7 +434,7 @@ static void vmx_restore_guest_msrs(struc
   1.135      }
   1.136  }
   1.137  
   1.138 -static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
   1.139 +static int long_mode_do_msr_read(struct cpu_user_regs *regs)
   1.140  {
   1.141      u64 msr_content = 0;
   1.142      struct vcpu *v = current;
   1.143 @@ -388,7 +454,7 @@ static inline int long_mode_do_msr_read(
   1.144      return 1;
   1.145  }
   1.146  
   1.147 -static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
   1.148 +static int long_mode_do_msr_write(struct cpu_user_regs *regs)
   1.149  {
   1.150      u64 msr_content = regs->eax | ((u64)regs->edx << 32);
   1.151      struct vcpu *v = current;
   1.152 @@ -426,7 +492,24 @@ static inline int long_mode_do_msr_write
   1.153  #define savedebug(_v,_reg)  \
   1.154      __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
   1.155  
   1.156 -static inline void vmx_save_dr(struct vcpu *v)
   1.157 +static int vmx_guest_x86_mode(struct vcpu *v)
   1.158 +{
   1.159 +    unsigned int cs_ar_bytes;
   1.160 +
   1.161 +    ASSERT(v == current);
   1.162 +
   1.163 +    if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
   1.164 +        return 0;
   1.165 +    if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
   1.166 +        return 1;
   1.167 +    cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   1.168 +    if ( vmx_long_mode_enabled(v) &&
   1.169 +         likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
   1.170 +        return 8;
   1.171 +    return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
   1.172 +}
   1.173 +
   1.174 +static void vmx_save_dr(struct vcpu *v)
   1.175  {
   1.176      if ( !v->arch.hvm_vcpu.flag_dr_dirty )
   1.177          return;
   1.178 @@ -444,7 +527,7 @@ static inline void vmx_save_dr(struct vc
   1.179      v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7);
   1.180  }
   1.181  
   1.182 -static inline void __restore_debug_registers(struct vcpu *v)
   1.183 +static void __restore_debug_registers(struct vcpu *v)
   1.184  {
   1.185      loaddebug(&v->arch.guest_context, 0);
   1.186      loaddebug(&v->arch.guest_context, 1);
   1.187 @@ -455,10 +538,12 @@ static inline void __restore_debug_regis
   1.188      /* DR7 is loaded from the VMCS. */
   1.189  }
   1.190  
   1.191 -int vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
   1.192 -{    
   1.193 +void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
   1.194 +{
   1.195      uint32_t ev;
   1.196  
   1.197 +    vmx_vmcs_enter(v);
   1.198 +
   1.199      c->rip = __vmread(GUEST_RIP);
   1.200      c->rsp = __vmread(GUEST_RSP);
   1.201      c->rflags = __vmread(GUEST_RFLAGS);
   1.202 @@ -468,11 +553,11 @@ int vmx_vmcs_save(struct vcpu *v, struct
   1.203      c->cr3 = v->arch.hvm_vmx.cpu_cr3;
   1.204      c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
   1.205  
   1.206 +    c->msr_efer = v->arch.hvm_vmx.efer;
   1.207 +
   1.208  #ifdef HVM_DEBUG_SUSPEND
   1.209 -    printk("vmx_vmcs_save: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   1.210 -            c->cr3,
   1.211 -            c->cr0,
   1.212 -            c->cr4);
   1.213 +    printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   1.214 +           __func__, c->cr3, c->cr0, c->cr4);
   1.215  #endif
   1.216  
   1.217      c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
   1.218 @@ -525,29 +610,31 @@ int vmx_vmcs_save(struct vcpu *v, struct
   1.219      c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP);
   1.220      c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP);
   1.221  
   1.222 -    /* Save any event/interrupt that was being injected when we last
   1.223 -     * exited.  IDT_VECTORING_INFO_FIELD has priority, as anything in
   1.224 +    /*
   1.225 +     * Save any event/interrupt that was being injected when we last
   1.226 +     * exited. IDT_VECTORING_INFO_FIELD has priority, as anything in
   1.227       * VM_ENTRY_INTR_INFO_FIELD is either a fault caused by the first
   1.228       * event, which will happen the next time, or an interrupt, which we
   1.229 -     * never inject when IDT_VECTORING_INFO_FIELD is valid.*/
   1.230 -    if ( (ev = __vmread(IDT_VECTORING_INFO_FIELD)) & INTR_INFO_VALID_MASK ) 
   1.231 +     * never inject when IDT_VECTORING_INFO_FIELD is valid.
   1.232 +     */
   1.233 +    if ( (ev = __vmread(IDT_VECTORING_INFO_FIELD)) & INTR_INFO_VALID_MASK )
   1.234      {
   1.235          c->pending_event = ev;
   1.236          c->error_code = __vmread(IDT_VECTORING_ERROR_CODE);
   1.237      }
   1.238 -    else if ( (ev = __vmread(VM_ENTRY_INTR_INFO_FIELD)) 
   1.239 -              & INTR_INFO_VALID_MASK ) 
   1.240 +    else if ( (ev = __vmread(VM_ENTRY_INTR_INFO_FIELD)) &
   1.241 +              INTR_INFO_VALID_MASK )
   1.242      {
   1.243          c->pending_event = ev;
   1.244          c->error_code = __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE);
   1.245      }
   1.246 -    else 
   1.247 +    else
   1.248      {
   1.249          c->pending_event = 0;
   1.250          c->error_code = 0;
   1.251      }
   1.252  
   1.253 -    return 1;
   1.254 +    vmx_vmcs_exit(v);
   1.255  }
   1.256  
   1.257  int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
   1.258 @@ -560,47 +647,47 @@ int vmx_vmcs_restore(struct vcpu *v, str
   1.259      __vmwrite(GUEST_RSP, c->rsp);
   1.260      __vmwrite(GUEST_RFLAGS, c->rflags);
   1.261  
   1.262 -    v->arch.hvm_vmx.cpu_cr0 = (c->cr0 | X86_CR0_PE | X86_CR0_PG 
   1.263 -                               | X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
   1.264 +    v->arch.hvm_vmx.cpu_cr0 = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
   1.265 +                               X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
   1.266      __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   1.267      v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
   1.268      __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   1.269  
   1.270      v->arch.hvm_vmx.cpu_cr2 = c->cr2;
   1.271  
   1.272 +    v->arch.hvm_vmx.efer = c->msr_efer;
   1.273 +
   1.274  #ifdef HVM_DEBUG_SUSPEND
   1.275 -    printk("vmx_vmcs_restore: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   1.276 -            c->cr3,
   1.277 -            c->cr0,
   1.278 -            c->cr4);
   1.279 +    printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   1.280 +           __func__, c->cr3, c->cr0, c->cr4);
   1.281  #endif
   1.282  
   1.283 -    if (!vmx_paging_enabled(v)) {
   1.284 -        printk("vmx_vmcs_restore: paging not enabled.");
   1.285 +    if ( !vmx_paging_enabled(v) )
   1.286 +    {
   1.287 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "%s: paging not enabled.", __func__);
   1.288          goto skip_cr3;
   1.289      }
   1.290  
   1.291 -    HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
   1.292 +    HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
   1.293      /* current!=vcpu as not called by arch_vmx_do_launch */
   1.294      mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
   1.295 -    if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) {
   1.296 -        goto bad_cr3;
   1.297 +    if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   1.298 +    {
   1.299 +        gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64".\n", c->cr3);
   1.300 +        vmx_vmcs_exit(v);
   1.301 +        return -EINVAL;
   1.302      }
   1.303 +
   1.304      old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   1.305      v->arch.guest_table = pagetable_from_pfn(mfn);
   1.306 -    if (old_base_mfn)
   1.307 +    if ( old_base_mfn )
   1.308          put_page(mfn_to_page(old_base_mfn));
   1.309 +
   1.310      v->arch.hvm_vmx.cpu_cr3 = c->cr3;
   1.311  
   1.312   skip_cr3:
   1.313 -#if defined(__x86_64__)
   1.314 -    if (vmx_long_mode_enabled(v)) {
   1.315 -        unsigned long vm_entry_value;
   1.316 -        vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   1.317 -        vm_entry_value |= VM_ENTRY_IA32E_MODE;
   1.318 -        __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   1.319 -    }
   1.320 -#endif
   1.321 +    if ( vmx_long_mode_enabled(v) )
   1.322 +        vmx_enable_long_mode(v);
   1.323  
   1.324      __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
   1.325      v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
   1.326 @@ -662,53 +749,53 @@ int vmx_vmcs_restore(struct vcpu *v, str
   1.327  
   1.328      paging_update_paging_modes(v);
   1.329  
   1.330 -    if ( c->pending_valid ) 
   1.331 +    if ( c->pending_valid )
   1.332      {
   1.333          vmx_vmcs_enter(v);
   1.334 +
   1.335          gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n",
   1.336                   c->pending_event, c->error_code);
   1.337  
   1.338          /* SVM uses type 3 ("Exception") for #OF and #BP; VMX uses type 6 */
   1.339 -        if ( c->pending_type == 3 
   1.340 -             && (c->pending_vector == 3 || c->pending_vector == 4) ) 
   1.341 +        if ( (c->pending_type == 3) &&
   1.342 +             ((c->pending_vector == 3) || (c->pending_vector == 4)) )
   1.343              c->pending_type = 6;
   1.344  
   1.345 -        /* For software exceptions, we need to tell the hardware the 
   1.346 +        /* For software exceptions, we need to tell the hardware the
   1.347           * instruction length as well (hmmm). */
   1.348 -        if ( c->pending_type > 4 ) 
   1.349 +        if ( c->pending_type > 4 )
   1.350          {
   1.351 -            int addrbytes, ilen; 
   1.352 -            if ( (c->cs_arbytes & (1u<<13)) && (c->msr_efer & EFER_LMA) ) 
   1.353 +            int addrbytes, ilen;
   1.354 +            if ( (c->cs_arbytes & X86_SEG_AR_CS_LM_ACTIVE) &&
   1.355 +                 (c->msr_efer & EFER_LMA) )
   1.356                  addrbytes = 8;
   1.357 -            else if ( (c->cs_arbytes & (1u<<14)) ) 
   1.358 +            else if ( c->cs_arbytes & X86_SEG_AR_DEF_OP_SIZE )
   1.359                  addrbytes = 4;
   1.360 -            else 
   1.361 +            else
   1.362                  addrbytes = 2;
   1.363 +
   1.364              ilen = hvm_instruction_length(c->rip, addrbytes);
   1.365              __vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
   1.366          }
   1.367  
   1.368          /* Sanity check */
   1.369 -        if ( c->pending_type == 1 || c->pending_type > 6
   1.370 -             || c->pending_reserved != 0 )
   1.371 +        if ( (c->pending_type == 1) || (c->pending_type > 6) ||
   1.372 +             (c->pending_reserved != 0) )
   1.373          {
   1.374 -            gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32"\n", 
   1.375 +            gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n",
   1.376                       c->pending_event);
   1.377              return -EINVAL;
   1.378          }
   1.379 +
   1.380          /* Re-inject the exception */
   1.381          __vmwrite(VM_ENTRY_INTR_INFO_FIELD, c->pending_event);
   1.382          __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, c->error_code);
   1.383          v->arch.hvm_vmx.vector_injected = 1;
   1.384 +
   1.385          vmx_vmcs_exit(v);
   1.386      }
   1.387  
   1.388      return 0;
   1.389 -
   1.390 - bad_cr3:
   1.391 -    gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"", c->cr3);
   1.392 -    vmx_vmcs_exit(v);
   1.393 -    return -EINVAL;
   1.394  }
   1.395  
   1.396  #if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND)
   1.397 @@ -717,14 +804,14 @@ static void dump_msr_state(struct vmx_ms
   1.398      int i = 0;
   1.399      printk("**** msr state ****\n");
   1.400      printk("shadow_gs=0x%lx, flags=0x%lx, msr_items:", m->shadow_gs, m->flags);
   1.401 -    for (i = 0; i < VMX_MSR_COUNT; i++)
   1.402 +    for ( i = 0; i < VMX_MSR_COUNT; i++ )
   1.403          printk("0x%lx,", m->msrs[i]);
   1.404      printk("\n");
   1.405  }
   1.406  #else
   1.407  #define dump_msr_state(m) ((void)0)
   1.408  #endif
   1.409 -        
   1.410 +
   1.411  static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
   1.412  {
   1.413  #ifdef __x86_64__
   1.414 @@ -741,10 +828,8 @@ static void vmx_save_cpu_state(struct vc
   1.415      data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
   1.416  #endif
   1.417  
   1.418 -    data->msr_efer = v->arch.hvm_vmx.efer;
   1.419 -
   1.420      data->tsc = hvm_get_guest_time(v);
   1.421 -    
   1.422 +
   1.423      dump_msr_state(guest_state);
   1.424  }
   1.425  
   1.426 @@ -763,8 +848,6 @@ static void vmx_load_cpu_state(struct vc
   1.427      v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
   1.428  #endif
   1.429  
   1.430 -    v->arch.hvm_vmx.efer = data->msr_efer;
   1.431 -
   1.432      v->arch.hvm_vmx.vmxassist_enabled = !(data->cr0 & X86_CR0_PE);
   1.433  
   1.434      hvm_set_guest_time(v, data->tsc);
   1.435 @@ -776,16 +859,16 @@ static void vmx_load_cpu_state(struct vc
   1.436  static void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
   1.437  {
   1.438      vmx_save_cpu_state(v, ctxt);
   1.439 -    vmx_vmcs_enter(v);
   1.440      vmx_vmcs_save(v, ctxt);
   1.441 -    vmx_vmcs_exit(v);
   1.442  }
   1.443  
   1.444  static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
   1.445  {
   1.446      vmx_load_cpu_state(v, ctxt);
   1.447 -    if (vmx_vmcs_restore(v, ctxt)) {
   1.448 -        printk("vmx_vmcs restore failed!\n");
   1.449 +
   1.450 +    if ( vmx_vmcs_restore(v, ctxt) )
   1.451 +    {
   1.452 +        gdprintk(XENLOG_ERR, "vmx_vmcs restore failed!\n");
   1.453          domain_crash(v->domain);
   1.454          return -EINVAL;
   1.455      }
   1.456 @@ -801,7 +884,7 @@ static int vmx_load_vmcs_ctxt(struct vcp
   1.457   */
   1.458  #define DR7_ACTIVE_MASK 0xff
   1.459  
   1.460 -static inline void vmx_restore_dr(struct vcpu *v)
   1.461 +static void vmx_restore_dr(struct vcpu *v)
   1.462  {
   1.463      /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
   1.464      if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
   1.465 @@ -915,11 +998,9 @@ static unsigned long vmx_get_segment_bas
   1.466  
   1.467      ASSERT(v == current);
   1.468  
   1.469 -#ifdef __x86_64__
   1.470      if ( vmx_long_mode_enabled(v) &&
   1.471           (__vmread(GUEST_CS_AR_BYTES) & X86_SEG_AR_CS_LM_ACTIVE) )
   1.472          long_mode = 1;
   1.473 -#endif
   1.474  
   1.475      switch ( seg )
   1.476      {
   1.477 @@ -1072,34 +1153,6 @@ static void vmx_init_hypercall_page(stru
   1.478      *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
   1.479  }
   1.480  
   1.481 -static int vmx_guest_x86_mode(struct vcpu *v)
   1.482 -{
   1.483 -    unsigned int cs_ar_bytes;
   1.484 -
   1.485 -    ASSERT(v == current);
   1.486 -
   1.487 -    if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
   1.488 -        return 0;
   1.489 -    if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
   1.490 -        return 1;
   1.491 -    cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   1.492 -    if ( vmx_long_mode_enabled(v) && likely(cs_ar_bytes &
   1.493 -                                            X86_SEG_AR_CS_LM_ACTIVE) )
   1.494 -        return 8;
   1.495 -    return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
   1.496 -}
   1.497 -
   1.498 -static int vmx_pae_enabled(struct vcpu *v)
   1.499 -{
   1.500 -    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
   1.501 -    return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
   1.502 -}
   1.503 -
   1.504 -static int vmx_nx_enabled(struct vcpu *v)
   1.505 -{
   1.506 -    return v->arch.hvm_vmx.efer & EFER_NX;
   1.507 -}
   1.508 -
   1.509  static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
   1.510  {
   1.511      unsigned long intr_shadow, eflags;
   1.512 @@ -1137,7 +1190,7 @@ static void vmx_update_guest_cr3(struct 
   1.513  
   1.514  static void vmx_flush_guest_tlbs(void)
   1.515  {
   1.516 -    /* No tagged TLB support on VMX yet.  The fact that we're in Xen 
   1.517 +    /* No tagged TLB support on VMX yet.  The fact that we're in Xen
   1.518       * at all means any guest will have a clean TLB when it's next run,
   1.519       * because VMRESUME will flush it for us. */
   1.520  }
   1.521 @@ -1302,7 +1355,7 @@ static int __get_instruction_length(void
   1.522      return len;
   1.523  }
   1.524  
   1.525 -static void inline __update_guest_eip(unsigned long inst_len)
   1.526 +static void __update_guest_eip(unsigned long inst_len)
   1.527  {
   1.528      unsigned long x;
   1.529  
   1.530 @@ -1605,10 +1658,10 @@ static int vmx_str_pio_check_descriptor(
   1.531  }
   1.532  
   1.533  
   1.534 -static inline void vmx_str_pio_check_limit(u32 limit, unsigned int size,
   1.535 -                                           u32 ar_bytes, unsigned long addr,
   1.536 -                                           unsigned long base, int df,
   1.537 -                                           unsigned long *count)
   1.538 +static void vmx_str_pio_check_limit(u32 limit, unsigned int size,
   1.539 +                                    u32 ar_bytes, unsigned long addr,
   1.540 +                                    unsigned long base, int df,
   1.541 +                                    unsigned long *count)
   1.542  {
   1.543      unsigned long ea = addr - base;
   1.544  
   1.545 @@ -1664,10 +1717,10 @@ static inline void vmx_str_pio_check_lim
   1.546  }
   1.547  
   1.548  #ifdef __x86_64__
   1.549 -static inline void vmx_str_pio_lm_check_limit(struct cpu_user_regs *regs,
   1.550 -                                              unsigned int size,
   1.551 -                                              unsigned long addr,
   1.552 -                                              unsigned long *count)
   1.553 +static void vmx_str_pio_lm_check_limit(struct cpu_user_regs *regs,
   1.554 +                                       unsigned int size,
   1.555 +                                       unsigned long addr,
   1.556 +                                       unsigned long *count)
   1.557  {
   1.558      if ( !is_canonical_address(addr) ||
   1.559           !is_canonical_address(addr + size - 1) )
   1.560 @@ -1693,12 +1746,12 @@ static inline void vmx_str_pio_lm_check_
   1.561  }
   1.562  #endif
   1.563  
   1.564 -static inline void vmx_send_str_pio(struct cpu_user_regs *regs,
   1.565 -                                    struct hvm_io_op *pio_opp,
   1.566 -                                    unsigned long inst_len, unsigned int port,
   1.567 -                                    int sign, unsigned int size, int dir,
   1.568 -                                    int df, unsigned long addr,
   1.569 -                                    unsigned long paddr, unsigned long count)
   1.570 +static void vmx_send_str_pio(struct cpu_user_regs *regs,
   1.571 +                             struct hvm_io_op *pio_opp,
   1.572 +                             unsigned long inst_len, unsigned int port,
   1.573 +                             int sign, unsigned int size, int dir,
   1.574 +                             int df, unsigned long addr,
   1.575 +                             unsigned long paddr, unsigned long count)
   1.576  {
   1.577      /*
   1.578       * Handle string pio instructions that cross pages or that
   1.579 @@ -1754,10 +1807,10 @@ static inline void vmx_send_str_pio(stru
   1.580      }
   1.581  }
   1.582  
   1.583 -static void vmx_str_pio_handler(unsigned long exit_qualification,
   1.584 -                                unsigned long inst_len,
   1.585 -                                struct cpu_user_regs *regs,
   1.586 -                                struct hvm_io_op *pio_opp)
   1.587 +static void vmx_do_str_pio(unsigned long exit_qualification,
   1.588 +                           unsigned long inst_len,
   1.589 +                           struct cpu_user_regs *regs,
   1.590 +                           struct hvm_io_op *pio_opp)
   1.591  {
   1.592      unsigned int port, size;
   1.593      int dir, df, vm86;
   1.594 @@ -1786,11 +1839,9 @@ static void vmx_str_pio_handler(unsigned
   1.595  
   1.596      sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
   1.597      ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   1.598 -#ifdef __x86_64__
   1.599      if ( vmx_long_mode_enabled(current) &&
   1.600           (ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
   1.601          long_mode = 1;
   1.602 -#endif
   1.603      addr = __vmread(GUEST_LINEAR_ADDRESS);
   1.604  
   1.605      if ( test_bit(5, &exit_qualification) ) { /* "rep" prefix */
   1.606 @@ -1874,7 +1925,7 @@ static void vmx_io_instruction(unsigned 
   1.607                  regs->cs, (unsigned long)regs->eip, exit_qualification);
   1.608  
   1.609      if ( test_bit(4, &exit_qualification) ) /* string instrucation */
   1.610 -        vmx_str_pio_handler(exit_qualification, inst_len, regs, pio_opp);
   1.611 +        vmx_do_str_pio(exit_qualification, inst_len, regs, pio_opp);
   1.612      else
   1.613      {
   1.614          unsigned int port, size;
   1.615 @@ -2000,7 +2051,7 @@ static int vmx_world_restore(struct vcpu
   1.616              goto bad_cr3;
   1.617          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   1.618          v->arch.guest_table = pagetable_from_pfn(mfn);
   1.619 -        if (old_base_mfn)
   1.620 +        if ( old_base_mfn )
   1.621               put_page(mfn_to_page(old_base_mfn));
   1.622          v->arch.hvm_vmx.cpu_cr3 = c->cr3;
   1.623      }
   1.624 @@ -2078,13 +2129,13 @@ static int vmx_assist(struct vcpu *v, in
   1.625      u32 magic, cp;
   1.626  
   1.627      /* make sure vmxassist exists (this is not an error) */
   1.628 -    if (hvm_copy_from_guest_phys(&magic, VMXASSIST_MAGIC_OFFSET,
   1.629 -                                 sizeof(magic)))
   1.630 +    if ( hvm_copy_from_guest_phys(&magic, VMXASSIST_MAGIC_OFFSET,
   1.631 +                                  sizeof(magic)) )
   1.632          return 0;
   1.633 -    if (magic != VMXASSIST_MAGIC)
   1.634 +    if ( magic != VMXASSIST_MAGIC )
   1.635          return 0;
   1.636  
   1.637 -    switch (mode) {
   1.638 +    switch ( mode ) {
   1.639          /*
   1.640           * Transfer control to vmxassist.
   1.641           * Store the current context in VMXASSIST_OLD_CONTEXT and load
   1.642 @@ -2093,19 +2144,19 @@ static int vmx_assist(struct vcpu *v, in
   1.643           */
   1.644      case VMX_ASSIST_INVOKE:
   1.645          /* save the old context */
   1.646 -        if (hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)))
   1.647 +        if ( hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)) )
   1.648              goto error;
   1.649 -        if (cp != 0) {
   1.650 +        if ( cp != 0 ) {
   1.651              vmx_world_save(v, &c);
   1.652 -            if (hvm_copy_to_guest_phys(cp, &c, sizeof(c)))
   1.653 +            if ( hvm_copy_to_guest_phys(cp, &c, sizeof(c)))
   1.654                  goto error;
   1.655          }
   1.656  
   1.657          /* restore the new context, this should activate vmxassist */
   1.658 -        if (hvm_copy_from_guest_phys(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp)))
   1.659 +        if ( hvm_copy_from_guest_phys(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp)) )
   1.660              goto error;
   1.661 -        if (cp != 0) {
   1.662 -            if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
   1.663 +        if ( cp != 0 ) {
   1.664 +            if ( hvm_copy_from_guest_phys(&c, cp, sizeof(c)) )
   1.665                  goto error;
   1.666              if ( vmx_world_restore(v, &c) != 0 )
   1.667                  goto error;
   1.668 @@ -2124,10 +2175,10 @@ static int vmx_assist(struct vcpu *v, in
   1.669           */
   1.670      case VMX_ASSIST_RESTORE:
   1.671          /* save the old context */
   1.672 -        if (hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)))
   1.673 +        if ( hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)) )
   1.674              goto error;
   1.675 -        if (cp != 0) {
   1.676 -            if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
   1.677 +        if ( cp != 0 ) {
   1.678 +            if ( hvm_copy_from_guest_phys(&c, cp, sizeof(c)) )
   1.679                  goto error;
   1.680              if ( vmx_world_restore(v, &c) != 0 )
   1.681                  goto error;
   1.682 @@ -2156,7 +2207,6 @@ static int vmx_set_cr0(unsigned long val
   1.683      unsigned long mfn;
   1.684      unsigned long eip;
   1.685      int paging_enabled;
   1.686 -    unsigned long vm_entry_value;
   1.687      unsigned long old_cr0;
   1.688      unsigned long old_base_mfn;
   1.689  
   1.690 @@ -2181,17 +2231,32 @@ static int vmx_set_cr0(unsigned long val
   1.691      old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   1.692      paging_enabled = old_cr0 & X86_CR0_PG;
   1.693  
   1.694 -    v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG 
   1.695 +    v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG
   1.696                                 | X86_CR0_NE | X86_CR0_WP);
   1.697      __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   1.698  
   1.699      v->arch.hvm_vmx.cpu_shadow_cr0 = value;
   1.700      __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   1.701  
   1.702 +    /* Trying to enable paging. */
   1.703      if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
   1.704      {
   1.705 +        if ( vmx_lme_is_set(v) && !vmx_long_mode_enabled(v) )
   1.706 +        {
   1.707 +            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
   1.708 +            {
   1.709 +                HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
   1.710 +                            "with EFER.LME set but not CR4.PAE");
   1.711 +                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   1.712 +            }
   1.713 +            else
   1.714 +            {
   1.715 +                HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
   1.716 +                vmx_enable_long_mode(v);
   1.717 +            }
   1.718 +        }
   1.719 +
   1.720          /*
   1.721 -         * Trying to enable guest paging.
   1.722           * The guest CR3 must be pointing to the guest physical.
   1.723           */
   1.724          mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
   1.725 @@ -2203,33 +2268,14 @@ static int vmx_set_cr0(unsigned long val
   1.726              return 0;
   1.727          }
   1.728  
   1.729 -#if defined(__x86_64__)
   1.730 -        if ( vmx_lme_is_set(v) )
   1.731 -        {
   1.732 -            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
   1.733 -            {
   1.734 -                HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
   1.735 -                            "with EFER.LME set but not CR4.PAE");
   1.736 -                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   1.737 -            }
   1.738 -            else
   1.739 -            {
   1.740 -                HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
   1.741 -                v->arch.hvm_vmx.efer |= EFER_LMA;
   1.742 -                vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   1.743 -                vm_entry_value |= VM_ENTRY_IA32E_MODE;
   1.744 -                __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   1.745 -            }
   1.746 -        }
   1.747 -#endif
   1.748 -
   1.749          /*
   1.750           * Now arch.guest_table points to machine physical.
   1.751           */
   1.752          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   1.753          v->arch.guest_table = pagetable_from_pfn(mfn);
   1.754 -        if (old_base_mfn)
   1.755 +        if ( old_base_mfn )
   1.756              put_page(mfn_to_page(old_base_mfn));
   1.757 +
   1.758          paging_update_paging_modes(v);
   1.759  
   1.760          HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
   1.761 @@ -2239,13 +2285,21 @@ static int vmx_set_cr0(unsigned long val
   1.762                      v->arch.hvm_vmx.cpu_cr3, mfn);
   1.763      }
   1.764  
   1.765 -    if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
   1.766 -        if ( v->arch.hvm_vmx.cpu_cr3 ) {
   1.767 +    /* Trying to disable paging. */
   1.768 +    if ( ((value & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) &&
   1.769 +         paging_enabled )
   1.770 +    {
   1.771 +        if ( v->arch.hvm_vmx.cpu_cr3 )
   1.772 +        {
   1.773              put_page(mfn_to_page(get_mfn_from_gpfn(
   1.774                        v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
   1.775              v->arch.guest_table = pagetable_null();
   1.776          }
   1.777  
   1.778 +        if ( vmx_long_mode_enabled(v) )
   1.779 +            vmx_disable_long_mode(v);
   1.780 +    }
   1.781 +
   1.782      /*
   1.783       * VMX does not implement real-mode virtualization. We emulate
   1.784       * real-mode by performing a world switch to VMXAssist whenever
   1.785 @@ -2257,18 +2311,6 @@ static int vmx_set_cr0(unsigned long val
   1.786              /* inject GP here */
   1.787              vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   1.788              return 0;
   1.789 -        } else {
   1.790 -            /*
   1.791 -             * Disable paging here.
   1.792 -             * Same to PE == 1 && PG == 0
   1.793 -             */
   1.794 -            if ( vmx_long_mode_enabled(v) )
   1.795 -            {
   1.796 -                v->arch.hvm_vmx.efer &= ~EFER_LMA;
   1.797 -                vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   1.798 -                vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
   1.799 -                __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   1.800 -            }
   1.801          }
   1.802  
   1.803          if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
   1.804 @@ -2292,16 +2334,7 @@ static int vmx_set_cr0(unsigned long val
   1.805          }
   1.806      }
   1.807      else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
   1.808 -    {
   1.809 -        if ( vmx_long_mode_enabled(v) )
   1.810 -        {
   1.811 -            v->arch.hvm_vmx.efer &= ~EFER_LMA;
   1.812 -            vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   1.813 -            vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
   1.814 -            __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   1.815 -        }
   1.816          paging_update_paging_modes(v);
   1.817 -    }
   1.818  
   1.819      return 1;
   1.820  }
   1.821 @@ -2370,7 +2403,7 @@ static int mov_to_cr(int gp, int cr, str
   1.822          /*
   1.823           * If paging is not enabled yet, simply copy the value to CR3.
   1.824           */
   1.825 -        if (!vmx_paging_enabled(v)) {
   1.826 +        if ( !vmx_paging_enabled(v) ) {
   1.827              v->arch.hvm_vmx.cpu_cr3 = value;
   1.828              break;
   1.829          }
   1.830 @@ -2378,14 +2411,14 @@ static int mov_to_cr(int gp, int cr, str
   1.831          /*
   1.832           * We make a new one if the shadow does not exist.
   1.833           */
   1.834 -        if (value == v->arch.hvm_vmx.cpu_cr3) {
   1.835 +        if ( value == v->arch.hvm_vmx.cpu_cr3 ) {
   1.836              /*
   1.837               * This is simple TLB flush, implying the guest has
   1.838               * removed some translation or changed page attributes.
   1.839               * We simply invalidate the shadow.
   1.840               */
   1.841              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   1.842 -            if (mfn != pagetable_get_pfn(v->arch.guest_table))
   1.843 +            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
   1.844                  goto bad_cr3;
   1.845              paging_update_cr3(v);
   1.846          } else {
   1.847 @@ -2399,7 +2432,7 @@ static int mov_to_cr(int gp, int cr, str
   1.848                  goto bad_cr3;
   1.849              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   1.850              v->arch.guest_table = pagetable_from_pfn(mfn);
   1.851 -            if (old_base_mfn)
   1.852 +            if ( old_base_mfn )
   1.853                  put_page(mfn_to_page(old_base_mfn));
   1.854              v->arch.hvm_vmx.cpu_cr3 = value;
   1.855              update_cr3(v);
   1.856 @@ -2443,7 +2476,7 @@ static int mov_to_cr(int gp, int cr, str
   1.857                  HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
   1.858                              (unsigned long) (mfn << PAGE_SHIFT));
   1.859  
   1.860 -                HVM_DBG_LOG(DBG_LEVEL_VMMU, 
   1.861 +                HVM_DBG_LOG(DBG_LEVEL_VMMU,
   1.862                              "Update CR3 value = %lx, mfn = %lx",
   1.863                              v->arch.hvm_vmx.cpu_cr3, mfn);
   1.864  #endif
   1.865 @@ -2469,6 +2502,7 @@ static int mov_to_cr(int gp, int cr, str
   1.866           */
   1.867          if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
   1.868              paging_update_paging_modes(v);
   1.869 +
   1.870          break;
   1.871  
   1.872      case 8:
   1.873 @@ -2545,7 +2579,7 @@ static int vmx_cr_access(unsigned long e
   1.874      unsigned long value;
   1.875      struct vcpu *v = current;
   1.876  
   1.877 -    switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
   1.878 +    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) {
   1.879      case TYPE_MOV_TO_CR:
   1.880          gp = exit_qualification & CONTROL_REG_ACCESS_REG;
   1.881          cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
   1.882 @@ -2578,7 +2612,7 @@ static int vmx_cr_access(unsigned long e
   1.883      return 1;
   1.884  }
   1.885  
   1.886 -static inline int vmx_do_msr_read(struct cpu_user_regs *regs)
   1.887 +static int vmx_do_msr_read(struct cpu_user_regs *regs)
   1.888  {
   1.889      u64 msr_content = 0;
   1.890      u32 ecx = regs->ecx, eax, edx;
   1.891 @@ -2699,7 +2733,7 @@ void vmx_vlapic_msr_changed(struct vcpu 
   1.892      vmx_vmcs_exit(v);
   1.893  }
   1.894  
   1.895 -static inline int vmx_do_msr_write(struct cpu_user_regs *regs)
   1.896 +static int vmx_do_msr_write(struct cpu_user_regs *regs)
   1.897  {
   1.898      u32 ecx = regs->ecx;
   1.899      u64 msr_content;
   1.900 @@ -2752,7 +2786,7 @@ static void vmx_do_hlt(void)
   1.901      hvm_hlt(rflags);
   1.902  }
   1.903  
   1.904 -static inline void vmx_do_extint(struct cpu_user_regs *regs)
   1.905 +static void vmx_do_extint(struct cpu_user_regs *regs)
   1.906  {
   1.907      unsigned int vector;
   1.908  
     2.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Sat Jul 07 10:31:15 2007 +0100
     2.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Sat Jul 07 11:08:57 2007 +0100
     2.3 @@ -263,28 +263,6 @@ static inline int __vmxon (u64 addr)
     2.4      return rc;
     2.5  }
     2.6  
     2.7 -static inline int vmx_paging_enabled(struct vcpu *v)
     2.8 -{
     2.9 -    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    2.10 -    return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));
    2.11 -}
    2.12 -
    2.13 -static inline int vmx_long_mode_enabled(struct vcpu *v)
    2.14 -{
    2.15 -    return v->arch.hvm_vmx.efer & EFER_LMA;
    2.16 -}
    2.17 -
    2.18 -static inline int vmx_lme_is_set(struct vcpu *v)
    2.19 -{
    2.20 -    return v->arch.hvm_vmx.efer & EFER_LME;
    2.21 -}
    2.22 -
    2.23 -static inline int vmx_pgbit_test(struct vcpu *v)
    2.24 -{
    2.25 -    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    2.26 -    return (cr0 & X86_CR0_PG);
    2.27 -}
    2.28 -
    2.29  static inline void __vmx_inject_exception(struct vcpu *v, int trap, int type,
    2.30                                           int error_code, int ilen)
    2.31  {