ia64/xen-unstable

changeset 15668:f42ca20a1bb3

hvm: Clean up CR0 handling.

Upper 32 bits should #GP if set. Lower 32 bits should have reserved
bits silently cleared.

Check Intel VMX MSRs to check for compatibility with our CR0
requirements.

Signed-off-by: Eric E Liu <eric.e.liu@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Jul 27 09:43:07 2007 +0100 (2007-07-27)
parents 5682f899c7ae
children d9c3836e0684
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Jul 27 09:06:58 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Jul 27 09:43:07 2007 +0100
     1.3 @@ -1668,6 +1668,17 @@ static int svm_set_cr0(unsigned long val
     1.4    
     1.5      HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
     1.6  
     1.7 +    if ( (u32)value != value )
     1.8 +    {
     1.9 +        HVM_DBG_LOG(DBG_LEVEL_1,
    1.10 +                    "Guest attempts to set upper 32 bits in CR0: %lx",
    1.11 +                    value);
    1.12 +        svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    1.13 +        return 0;
    1.14 +    }
    1.15 +
    1.16 +    value &= HVM_CR0_GUEST_RESERVED_BITS;
    1.17 +
    1.18      /* ET is reserved and should be always be 1. */
    1.19      value |= X86_CR0_ET;
    1.20  
     2.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jul 27 09:06:58 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jul 27 09:43:07 2007 +0100
     2.3 @@ -240,9 +240,24 @@ int vmx_cpu_up(void)
     2.4  {
     2.5      u32 eax, edx;
     2.6      int cpu = smp_processor_id();
     2.7 +    u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1;
     2.8  
     2.9      BUG_ON(!(read_cr4() & X86_CR4_VMXE));
    2.10  
    2.11 +    /* 
    2.12 +     * Ensure the current processor operating mode meets 
    2.13 +     * the requred CRO fixed bits in VMX operation. 
    2.14 +     */
    2.15 +    cr0 = read_cr0();
    2.16 +    rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0);
    2.17 +    rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1);
    2.18 +    if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) )
    2.19 +    {
    2.20 +        printk("CPU%d: some settings of host CR0 are " 
    2.21 +               "not allowed in VMX operation.\n", cpu);
    2.22 +        return 0;
    2.23 +    }
    2.24 +
    2.25      rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
    2.26  
    2.27      if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
     3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Jul 27 09:06:58 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Jul 27 09:43:07 2007 +0100
     3.3 @@ -2200,6 +2200,17 @@ static int vmx_set_cr0(unsigned long val
     3.4  
     3.5      HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
     3.6  
     3.7 +    if ( (u32)value != value )
     3.8 +    {
     3.9 +        HVM_DBG_LOG(DBG_LEVEL_1,
    3.10 +                    "Guest attempts to set upper 32 bits in CR0: %lx",
    3.11 +                    value);
    3.12 +        vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    3.13 +        return 0;
    3.14 +    }
    3.15 +
    3.16 +    value &= HVM_CR0_GUEST_RESERVED_BITS;
    3.17 +
    3.18      /* ET is reserved and should be always be 1. */
    3.19      value |= X86_CR0_ET;
    3.20  
     4.1 --- a/xen/include/asm-x86/hvm/hvm.h	Fri Jul 27 09:06:58 2007 +0100
     4.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Fri Jul 27 09:43:07 2007 +0100
     4.3 @@ -300,6 +300,13 @@ static inline int hvm_event_injection_fa
     4.4      return hvm_funcs.event_injection_faulted(v);
     4.5  }
     4.6  
     4.7 +/* These reserved bits in lower 32 remain 0 after any load of CR0 */
     4.8 +#define HVM_CR0_GUEST_RESERVED_BITS \
     4.9 +    ~(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \
    4.10 +      X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \
    4.11 +      X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \
    4.12 +      X86_CR0_CD | X86_CR0_PG)
    4.13 +
    4.14  /* These bits in CR4 are owned by the host. */
    4.15  #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
    4.16      (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))