ia64/xen-unstable

changeset 9190:3219aa25ad46

The PAE guest can run on SMP 64-bit Xen0 now.

Optimize the save/restore action for EFER MSR during the context switch.

Signed-off-by: Jun Nakajima jun.nakajima@intel.com
Signed-off-by: Xiaohui Xin xiaohui.xin@intel.com
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 08 11:53:39 2006 +0100 (2006-03-08)
parents f55d6c299c7e
children 64f890639992
files xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/shadow.c xen/include/asm-x86/shadow_64.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Mar 08 11:51:17 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Mar 08 11:53:39 2006 +0100
     1.3 @@ -223,6 +223,11 @@ static inline int long_mode_do_msr_write
     1.4  
     1.5      switch (regs->ecx){
     1.6      case MSR_EFER:
     1.7 +        /* offending reserved bit will cause #GP */
     1.8 +        if ( msr_content &
     1.9 +                ~( EFER_LME | EFER_LMA | EFER_NX | EFER_SCE ) )
    1.10 +             vmx_inject_exception(vc, TRAP_gp_fault, 0);
    1.11 +
    1.12          if ((msr_content & EFER_LME) ^
    1.13              test_bit(VMX_CPU_STATE_LME_ENABLED,
    1.14                       &vc->arch.hvm_vmx.cpu_state)){
    1.15 @@ -236,18 +241,9 @@ static inline int long_mode_do_msr_write
    1.16          if (msr_content & EFER_LME)
    1.17              set_bit(VMX_CPU_STATE_LME_ENABLED,
    1.18                      &vc->arch.hvm_vmx.cpu_state);
    1.19 -        /* No update for LME/LMA since it have no effect */
    1.20 +
    1.21          msr->msr_items[VMX_INDEX_MSR_EFER] =
    1.22              msr_content;
    1.23 -        if (msr_content & ~(EFER_LME | EFER_LMA)){
    1.24 -            msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
    1.25 -            if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){
    1.26 -                rdmsrl(MSR_EFER,
    1.27 -                       host_state->msr_items[VMX_INDEX_MSR_EFER]);
    1.28 -                set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
    1.29 -                set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
    1.30 -            }
    1.31 -        }
    1.32          break;
    1.33  
    1.34      case MSR_FS_BASE:
     2.1 --- a/xen/arch/x86/shadow.c	Wed Mar 08 11:51:17 2006 +0100
     2.2 +++ b/xen/arch/x86/shadow.c	Wed Mar 08 11:53:39 2006 +0100
     2.3 @@ -3583,6 +3583,11 @@ static inline int guest_page_fault(
     2.4  
     2.5      ASSERT( d->arch.ops->guest_paging_levels >= PAGING_L3 );
     2.6  
     2.7 +#if CONFIG_PAGING_LEVELS >= 4
     2.8 +    if ( (error_code & (ERROR_I | ERROR_P)) == (ERROR_I | ERROR_P) )
     2.9 +        return 1;
    2.10 +#endif
    2.11 +
    2.12  #if CONFIG_PAGING_LEVELS == 4
    2.13      if ( d->arch.ops->guest_paging_levels == PAGING_L4 ) 
    2.14      {
     3.1 --- a/xen/include/asm-x86/shadow_64.h	Wed Mar 08 11:51:17 2006 +0100
     3.2 +++ b/xen/include/asm-x86/shadow_64.h	Wed Mar 08 11:53:39 2006 +0100
     3.3 @@ -51,8 +51,11 @@ typedef struct { intpte_t l4; } l4_pgent
     3.4  #define READ_FAULT  0
     3.5  #define WRITE_FAULT 1
     3.6  
     3.7 -#define ERROR_W    2
     3.8 +#define ERROR_P     1
     3.9 +#define ERROR_W     2
    3.10  #define ERROR_U     4
    3.11 +#define ERROR_I     (1 << 4)
    3.12 +
    3.13  #define X86_64_SHADOW_DEBUG 0
    3.14  
    3.15  #if X86_64_SHADOW_DEBUG