direct-io.hg

changeset 10322:8d5d225e9f34

[HVM][VMX] Provide right view of cpuid to the HVM guests.
Some of the CPU features such as APIC, PAE, MTRR, HT are virtualized;
while others are not virtualized yet such as TM1, TM2, MCA and there are
some features which do not need virtualization such as MMX. With the
patch Guest sees only those processor features in the cpuid which are
virtualized in the hyper visor, or do not need any virtualization in
hypervisor.

Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Asit Mallick <asit.k.mallick@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jun 12 09:45:03 2006 +0100 (2006-06-12)
parents e2a2b2da92f4
children a936c9c3ea60
files xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/cpu.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Jun 12 09:06:55 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Jun 12 09:45:03 2006 +0100
     1.3 @@ -38,6 +38,7 @@
     1.4  #include <asm/hvm/support.h>
     1.5  #include <asm/hvm/vmx/vmx.h>
     1.6  #include <asm/hvm/vmx/vmcs.h>
     1.7 +#include <asm/hvm/vmx/cpu.h>
     1.8  #include <asm/shadow.h>
     1.9  #if CONFIG_PAGING_LEVELS >= 3
    1.10  #include <asm/shadow_64.h>
    1.11 @@ -749,9 +750,7 @@ static void vmx_do_no_device_fault(void)
    1.12      }
    1.13  }
    1.14  
    1.15 -/* Reserved bits: [31:15], [12:11], [9], [6], [2:1] */
    1.16 -#define VMX_VCPU_CPUID_L1_RESERVED 0xffff9a46
    1.17 -
    1.18 +#define bitmaskof(idx) (1U << ((idx)&31))
    1.19  static void vmx_vmexit_do_cpuid(struct cpu_user_regs *regs)
    1.20  {
    1.21      unsigned int input = (unsigned int)regs->eax;
    1.22 @@ -768,50 +767,74 @@ static void vmx_vmexit_do_cpuid(struct c
    1.23                  (unsigned long)regs->ecx, (unsigned long)regs->edx,
    1.24                  (unsigned long)regs->esi, (unsigned long)regs->edi);
    1.25  
    1.26 -    if ( input == 4 )
    1.27 +    if ( input == CPUID_LEAF_0x4 )
    1.28 +    {
    1.29          cpuid_count(input, count, &eax, &ebx, &ecx, &edx);
    1.30 +        eax &= NUM_CORES_RESET_MASK;  
    1.31 +    }
    1.32      else
    1.33 +    {
    1.34          cpuid(input, &eax, &ebx, &ecx, &edx);
    1.35  
    1.36 -    if ( input == 1 )
    1.37 -    {
    1.38 -        if ( !hvm_apic_support(v->domain) ||
    1.39 -             !vlapic_global_enabled((VLAPIC(v))) )
    1.40 +        if ( input == CPUID_LEAF_0x1 )
    1.41          {
    1.42 -            clear_bit(X86_FEATURE_APIC, &edx);
    1.43 -            /* Since the apic is disabled, avoid any confusion about SMP cpus being available */
    1.44 -            clear_bit(X86_FEATURE_HT, &edx);  /* clear the hyperthread bit */
    1.45 -            ebx &= 0xFF00FFFF;  /* set the logical processor count to 1 */
    1.46 -            ebx |= 0x00010000;
    1.47 -        }
    1.48 +            /* mask off reserved bits */
    1.49 +            ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED; 
    1.50  
    1.51 +            if ( !hvm_apic_support(v->domain) ||
    1.52 +                 !vlapic_global_enabled((VLAPIC(v))) )
    1.53 +            {
    1.54 +                /* Since the apic is disabled, avoid any 
    1.55 +                confusion about SMP cpus being available */
    1.56  
    1.57 +                clear_bit(X86_FEATURE_APIC, &edx);
    1.58 +            }
    1.59 +    
    1.60  #if CONFIG_PAGING_LEVELS < 3
    1.61 -        clear_bit(X86_FEATURE_PAE, &edx);
    1.62 -        clear_bit(X86_FEATURE_PSE, &edx);
    1.63 -        clear_bit(X86_FEATURE_PSE36, &edx);
    1.64 +            edx &= ~(bitmaskof(X86_FEATURE_PAE)  |
    1.65 +                     bitmaskof(X86_FEATURE_PSE)  |
    1.66 +                     bitmaskof(X86_FEATURE_PSE36)); 
    1.67  #else
    1.68 -        if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
    1.69 +            if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
    1.70 +            {
    1.71 +                if ( !v->domain->arch.hvm_domain.pae_enabled )
    1.72 +                    clear_bit(X86_FEATURE_PAE, &edx);
    1.73 +                clear_bit(X86_FEATURE_PSE, &edx);
    1.74 +                clear_bit(X86_FEATURE_PSE36, &edx);
    1.75 +            }
    1.76 +#endif
    1.77 +
    1.78 +            ebx &= NUM_THREADS_RESET_MASK;  
    1.79 +
    1.80 +            /* Unsupportable for virtualised CPUs. */
    1.81 +            ecx &= ~(bitmaskof(X86_FEATURE_VMXE)  |
    1.82 +                     bitmaskof(X86_FEATURE_EST)   |
    1.83 +                     bitmaskof(X86_FEATURE_TM2)   |
    1.84 +                     bitmaskof(X86_FEATURE_CID)   |
    1.85 +                     bitmaskof(X86_FEATURE_MWAIT) );
    1.86 +
    1.87 +            edx &= ~( bitmaskof(X86_FEATURE_HT)   |
    1.88 +                     bitmaskof(X86_FEATURE_MCA)   |
    1.89 +                     bitmaskof(X86_FEATURE_MCE)   |
    1.90 +                     bitmaskof(X86_FEATURE_ACPI)  |
    1.91 +                     bitmaskof(X86_FEATURE_ACC) );
    1.92 +        }
    1.93 +        else if (  ( input == CPUID_LEAF_0x6 ) 
    1.94 +                || ( input == CPUID_LEAF_0x9 )
    1.95 +                || ( input == CPUID_LEAF_0xA ))
    1.96          {
    1.97 -            if ( !v->domain->arch.hvm_domain.pae_enabled )
    1.98 -                clear_bit(X86_FEATURE_PAE, &edx);
    1.99 -            clear_bit(X86_FEATURE_PSE, &edx);
   1.100 -            clear_bit(X86_FEATURE_PSE36, &edx);
   1.101 +            eax = ebx = ecx = edx = 0x0;
   1.102 +        }
   1.103 +#ifdef __i386__
   1.104 +        else if ( input == CPUID_LEAF_0x80000001 )
   1.105 +        {
   1.106 +            clear_bit(X86_FEATURE_LAHF_LM & 31, &ecx);
   1.107 +
   1.108 +            clear_bit(X86_FEATURE_LM & 31, &edx);
   1.109 +            clear_bit(X86_FEATURE_SYSCALL & 31, &edx);
   1.110          }
   1.111  #endif
   1.112 -
   1.113 -        /* Unsupportable for virtualised CPUs. */
   1.114 -        ecx &= ~VMX_VCPU_CPUID_L1_RESERVED; /* mask off reserved bits */
   1.115 -        clear_bit(X86_FEATURE_VMXE & 31, &ecx);
   1.116 -        clear_bit(X86_FEATURE_MWAIT & 31, &ecx);
   1.117      }
   1.118 -#ifdef __i386__
   1.119 -    else if ( input == 0x80000001 )
   1.120 -    {
   1.121 -        /* Mask feature for Intel ia32e or AMD long mode. */
   1.122 -        clear_bit(X86_FEATURE_LM & 31, &edx);
   1.123 -    }
   1.124 -#endif
   1.125  
   1.126      regs->eax = (unsigned long) eax;
   1.127      regs->ebx = (unsigned long) ebx;
     2.1 --- a/xen/include/asm-x86/hvm/vmx/cpu.h	Mon Jun 12 09:06:55 2006 +0100
     2.2 +++ b/xen/include/asm-x86/hvm/vmx/cpu.h	Mon Jun 12 09:45:03 2006 +0100
     2.3 @@ -32,4 +32,21 @@ struct arch_state_struct {
     2.4  #define VMX_MF_32       1
     2.5  #define VMX_MF_64       2
     2.6  
     2.7 +#define CPUID_LEAF_0x1        0x1
     2.8 +#define CPUID_LEAF_0x4        0x4
     2.9 +#define CPUID_LEAF_0x6        0x6
    2.10 +#define CPUID_LEAF_0x9        0x9
    2.11 +#define CPUID_LEAF_0xA        0xA
    2.12 +#define CPUID_LEAF_0x80000001 0x80000001
    2.13 +
    2.14 +#define NUM_CORES_RESET_MASK                 0x00003FFF
    2.15 +#define NUM_THREADS_RESET_MASK               0xFF00FFFF
    2.16 +
    2.17 +#define VMX_VCPU_CPUID_L1_ECX_RESERVED_18    0x00040000
    2.18 +#define VMX_VCPU_CPUID_L1_ECX_RESERVED_6     0x00000040
    2.19 +
    2.20 +#define VMX_VCPU_CPUID_L1_ECX_RESERVED             \
    2.21 +            ( VMX_VCPU_CPUID_L1_ECX_RESERVED_18  | \
    2.22 +              VMX_VCPU_CPUID_L1_ECX_RESERVED_6   )
    2.23 +
    2.24  #endif /* __ASM_X86_HVM_VMX_CPU_H__ */