ia64/xen-unstable

changeset 15729:da2c7dab1a3a

hvm: More cleanups, particularly to %cr4 handling.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Wed Aug 08 11:28:26 2007 +0100 (2007-08-08)
parents 511c41a55045
children 35337d5c83f9
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/support.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Aug 08 10:34:03 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Aug 08 11:28:26 2007 +0100
     1.3 @@ -525,16 +525,9 @@ int hvm_set_cr3(unsigned long value)
     1.4      unsigned long old_base_mfn, mfn;
     1.5      struct vcpu *v = current;
     1.6  
     1.7 -    if ( paging_mode_hap(v->domain) )
     1.8 +    if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) )
     1.9      {
    1.10 -        /* HAP mode. HAP-specific code does all the hard work. */
    1.11 -        v->arch.hvm_vcpu.guest_cr[3] = value;
    1.12 -        paging_update_cr3(v);
    1.13 -    }
    1.14 -    else if ( !hvm_paging_enabled(v) )
    1.15 -    {
    1.16 -        /* Shadow-mode, paging disabled. Just update guest CR3 value. */
    1.17 -        v->arch.hvm_vcpu.guest_cr[3] = value;
    1.18 +        /* Nothing to do. */
    1.19      }
    1.20      else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
    1.21      {
    1.22 @@ -542,7 +535,6 @@ int hvm_set_cr3(unsigned long value)
    1.23          mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
    1.24          if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
    1.25              goto bad_cr3;
    1.26 -        paging_update_cr3(v);
    1.27      }
    1.28      else 
    1.29      {
    1.30 @@ -558,11 +550,11 @@ int hvm_set_cr3(unsigned long value)
    1.31          if ( old_base_mfn )
    1.32              put_page(mfn_to_page(old_base_mfn));
    1.33  
    1.34 -        v->arch.hvm_vcpu.guest_cr[3] = value;
    1.35          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
    1.36 -        paging_update_cr3(v);
    1.37      }
    1.38  
    1.39 +    v->arch.hvm_vcpu.guest_cr[3] = value;
    1.40 +    paging_update_cr3(v);
    1.41      return 1;
    1.42  
    1.43   bad_cr3:
    1.44 @@ -571,6 +563,44 @@ int hvm_set_cr3(unsigned long value)
    1.45      return 0;
    1.46  }
    1.47  
    1.48 +int hvm_set_cr4(unsigned long value)
    1.49 +{
    1.50 +    struct vcpu *v = current;
    1.51 +    unsigned long old_cr;
    1.52 +
    1.53 +    if ( value & HVM_CR4_GUEST_RESERVED_BITS )
    1.54 +    {
    1.55 +        HVM_DBG_LOG(DBG_LEVEL_1,
    1.56 +                    "Guest attempts to set reserved bit in CR4: %lx",
    1.57 +                    value);
    1.58 +        goto gpf;
    1.59 +    }
    1.60 +
    1.61 +    if ( !(value & X86_CR4_PAE) && hvm_long_mode_enabled(v) )
    1.62 +    {
    1.63 +        HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
    1.64 +                    "EFER.LMA is set");
    1.65 +        goto gpf;
    1.66 +    }
    1.67 +
    1.68 +    old_cr = v->arch.hvm_vcpu.guest_cr[4];
    1.69 +    v->arch.hvm_vcpu.guest_cr[4] = value;
    1.70 +    v->arch.hvm_vcpu.hw_cr[4] = value | HVM_CR4_HOST_MASK;
    1.71 +    if ( paging_mode_hap(v->domain) )
    1.72 +        v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
    1.73 +    hvm_update_guest_cr(v, 4);
    1.74 +  
    1.75 +    /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
    1.76 +    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
    1.77 +        paging_update_paging_modes(v);
    1.78 +
    1.79 +    return 1;
    1.80 +
    1.81 + gpf:
    1.82 +    hvm_inject_exception(TRAP_gp_fault, 0, 0);
    1.83 +    return 0;
    1.84 +}
    1.85 +
    1.86  /*
    1.87   * __hvm_copy():
    1.88   *  @buf  = hypervisor buffer
    1.89 @@ -863,12 +893,6 @@ int hvm_do_hypercall(struct cpu_user_reg
    1.90              flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
    1.91  }
    1.92  
    1.93 -void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
    1.94 -{
    1.95 -    v->arch.hvm_vcpu.hw_cr[3] = guest_cr3;
    1.96 -    hvm_funcs.update_guest_cr3(v);
    1.97 -}
    1.98 -
    1.99  static void hvm_latch_shinfo_size(struct domain *d)
   1.100  {
   1.101      /*
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Aug 08 10:34:03 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Aug 08 11:28:26 2007 +0100
     2.3 @@ -578,10 +578,20 @@ static void svm_update_host_cr3(struct v
     2.4      /* SVM doesn't have a HOST_CR3 equivalent to update. */
     2.5  }
     2.6  
     2.7 -static void svm_update_guest_cr3(struct vcpu *v)
     2.8 +static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
     2.9  {
    2.10 -    v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
    2.11 -    svm_asid_inv_asid(v);
    2.12 +    switch ( cr )
    2.13 +    {
    2.14 +    case 3:
    2.15 +        v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
    2.16 +        svm_asid_inv_asid(v);
    2.17 +        break;
    2.18 +    case 4:
    2.19 +        v->arch.hvm_svm.vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
    2.20 +        break;
    2.21 +    default:
    2.22 +        BUG();
    2.23 +    }
    2.24  }
    2.25  
    2.26  static void svm_flush_guest_tlbs(void)
    2.27 @@ -917,7 +927,7 @@ static struct hvm_function_table svm_fun
    2.28      .get_segment_base     = svm_get_segment_base,
    2.29      .get_segment_register = svm_get_segment_register,
    2.30      .update_host_cr3      = svm_update_host_cr3,
    2.31 -    .update_guest_cr3     = svm_update_guest_cr3,
    2.32 +    .update_guest_cr      = svm_update_guest_cr,
    2.33      .flush_guest_tlbs     = svm_flush_guest_tlbs,
    2.34      .update_vtpr          = svm_update_vtpr,
    2.35      .stts                 = svm_stts,
    2.36 @@ -1684,9 +1694,6 @@ static int svm_set_cr0(unsigned long val
    2.37      return 1;
    2.38  }
    2.39  
    2.40 -/*
    2.41 - * Read from control registers. CR0 and CR4 are read from the shadow.
    2.42 - */
    2.43  static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
    2.44  {
    2.45      unsigned long value = 0;
    2.46 @@ -1725,13 +1732,9 @@ static void mov_from_cr(int cr, int gp, 
    2.47      HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
    2.48  }
    2.49  
    2.50 -
    2.51 -/*
    2.52 - * Write to control registers
    2.53 - */
    2.54  static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
    2.55  {
    2.56 -    unsigned long value, old_cr;
    2.57 +    unsigned long value;
    2.58      struct vcpu *v = current;
    2.59      struct vlapic *vlapic = vcpu_vlapic(v);
    2.60      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.61 @@ -1752,69 +1755,7 @@ static int mov_to_cr(int gpreg, int cr, 
    2.62          return hvm_set_cr3(value);
    2.63  
    2.64      case 4:
    2.65 -        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
    2.66 -        {
    2.67 -            HVM_DBG_LOG(DBG_LEVEL_1,
    2.68 -                        "Guest attempts to set reserved bit in CR4: %lx",
    2.69 -                        value);
    2.70 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    2.71 -            break;
    2.72 -        }
    2.73 -
    2.74 -        if ( paging_mode_hap(v->domain) )
    2.75 -        {
    2.76 -            v->arch.hvm_vcpu.guest_cr[4] = value;
    2.77 -            vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
    2.78 -            paging_update_paging_modes(v);
    2.79 -            break;
    2.80 -        }
    2.81 -
    2.82 -        old_cr = v->arch.hvm_vcpu.guest_cr[4];
    2.83 -        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
    2.84 -        {
    2.85 -            if ( hvm_paging_enabled(v) )
    2.86 -            {
    2.87 -#if CONFIG_PAGING_LEVELS >= 3
    2.88 -                /* The guest is a 32-bit PAE guest. */
    2.89 -                unsigned long mfn, old_base_mfn;
    2.90 -                mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
    2.91 -                if ( !mfn_valid(mfn) || 
    2.92 -                     !get_page(mfn_to_page(mfn), v->domain) )
    2.93 -                    goto bad_cr3;
    2.94 -
    2.95 -                /*
    2.96 -                 * Now arch.guest_table points to machine physical.
    2.97 -                 */
    2.98 -                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    2.99 -                v->arch.guest_table = pagetable_from_pfn(mfn);
   2.100 -                if ( old_base_mfn )
   2.101 -                    put_page(mfn_to_page(old_base_mfn));
   2.102 -                paging_update_paging_modes(v);
   2.103 -
   2.104 -                HVM_DBG_LOG(DBG_LEVEL_VMMU, 
   2.105 -                            "Update CR3 value = %lx, mfn = %lx",
   2.106 -                            v->arch.hvm_vcpu.guest_cr[3], mfn);
   2.107 -#endif
   2.108 -            }
   2.109 -        } 
   2.110 -        else if ( !(value & X86_CR4_PAE) )
   2.111 -        {
   2.112 -            if ( hvm_long_mode_enabled(v) )
   2.113 -            {
   2.114 -                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   2.115 -            }
   2.116 -        }
   2.117 -
   2.118 -        v->arch.hvm_vcpu.guest_cr[4] = value;
   2.119 -        vmcb->cr4 = value | HVM_CR4_HOST_MASK;
   2.120 -  
   2.121 -        /*
   2.122 -         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
   2.123 -         * all TLB entries except global entries.
   2.124 -         */
   2.125 -        if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
   2.126 -            paging_update_paging_modes(v);
   2.127 -        break;
   2.128 +        return hvm_set_cr4(value);
   2.129  
   2.130      case 8:
   2.131          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
   2.132 @@ -1828,19 +1769,11 @@ static int mov_to_cr(int gpreg, int cr, 
   2.133      }
   2.134  
   2.135      return 1;
   2.136 -
   2.137 - bad_cr3:
   2.138 -    gdprintk(XENLOG_ERR, "Invalid CR3\n");
   2.139 -    domain_crash(v->domain);
   2.140 -    return 0;
   2.141  }
   2.142  
   2.143 -
   2.144 -#define ARR_SIZE(x) (sizeof(x) / sizeof(x[0]))
   2.145 -
   2.146 -
   2.147 -static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
   2.148 -                         struct cpu_user_regs *regs)
   2.149 +static void svm_cr_access(
   2.150 +    struct vcpu *v, unsigned int cr, unsigned int type,
   2.151 +    struct cpu_user_regs *regs)
   2.152  {
   2.153      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.154      int inst_len = 0;
   2.155 @@ -1865,12 +1798,12 @@ static int svm_cr_access(struct vcpu *v,
   2.156      if ( type == TYPE_MOV_TO_CR )
   2.157      {
   2.158          inst_len = __get_instruction_length_from_list(
   2.159 -            v, list_a, ARR_SIZE(list_a), &buffer[index], &match);
   2.160 +            v, list_a, ARRAY_SIZE(list_a), &buffer[index], &match);
   2.161      }
   2.162      else /* type == TYPE_MOV_FROM_CR */
   2.163      {
   2.164          inst_len = __get_instruction_length_from_list(
   2.165 -            v, list_b, ARR_SIZE(list_b), &buffer[index], &match);
   2.166 +            v, list_b, ARRAY_SIZE(list_b), &buffer[index], &match);
   2.167      }
   2.168  
   2.169      ASSERT(inst_len > 0);
   2.170 @@ -1883,7 +1816,8 @@ static int svm_cr_access(struct vcpu *v,
   2.171  
   2.172      HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
   2.173  
   2.174 -    switch (match) 
   2.175 +    switch ( match )
   2.176 +
   2.177      {
   2.178      case INSTR_MOV2CR:
   2.179          gpreg = decode_src_reg(prefix, buffer[index+2]);
   2.180 @@ -1974,9 +1908,8 @@ static int svm_cr_access(struct vcpu *v,
   2.181  
   2.182      ASSERT(inst_len);
   2.183  
   2.184 -    __update_guest_eip(vmcb, inst_len);
   2.185 -    
   2.186 -    return result;
   2.187 +    if ( result )
   2.188 +        __update_guest_eip(vmcb, inst_len);
   2.189  }
   2.190  
   2.191  static void svm_do_msr_access(
     3.1 --- a/xen/arch/x86/hvm/vioapic.c	Wed Aug 08 10:34:03 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/vioapic.c	Wed Aug 08 11:28:26 2007 +0100
     3.3 @@ -43,10 +43,6 @@
     3.4  /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
     3.5  #define IRQ0_SPECIAL_ROUTING 1
     3.6  
     3.7 -#if defined(__ia64__)
     3.8 -#define opt_hvm_debug_level opt_vmx_debug_level
     3.9 -#endif
    3.10 -
    3.11  static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);
    3.12  
    3.13  static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
     4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 08 10:34:03 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 08 11:28:26 2007 +0100
     4.3 @@ -1087,11 +1087,25 @@ static void vmx_update_host_cr3(struct v
     4.4      vmx_vmcs_exit(v);
     4.5  }
     4.6  
     4.7 -static void vmx_update_guest_cr3(struct vcpu *v)
     4.8 +static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
     4.9  {
    4.10      ASSERT((v == current) || !vcpu_runnable(v));
    4.11 +
    4.12      vmx_vmcs_enter(v);
    4.13 -    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
    4.14 +
    4.15 +    switch ( cr )
    4.16 +    {
    4.17 +    case 3:
    4.18 +        __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
    4.19 +        break;
    4.20 +    case 4:
    4.21 +        __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
    4.22 +        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
    4.23 +        break;
    4.24 +    default:
    4.25 +        BUG();
    4.26 +    }
    4.27 +
    4.28      vmx_vmcs_exit(v);
    4.29  }
    4.30  
    4.31 @@ -1157,7 +1171,7 @@ static struct hvm_function_table vmx_fun
    4.32      .get_segment_base     = vmx_get_segment_base,
    4.33      .get_segment_register = vmx_get_segment_register,
    4.34      .update_host_cr3      = vmx_update_host_cr3,
    4.35 -    .update_guest_cr3     = vmx_update_guest_cr3,
    4.36 +    .update_guest_cr      = vmx_update_guest_cr,
    4.37      .flush_guest_tlbs     = vmx_flush_guest_tlbs,
    4.38      .update_vtpr          = vmx_update_vtpr,
    4.39      .stts                 = vmx_stts,
    4.40 @@ -2263,12 +2277,9 @@ static int vmx_set_cr0(unsigned long val
    4.41      CASE_ ## T ## ET_REG(R15, r15)
    4.42  #endif
    4.43  
    4.44 -/*
    4.45 - * Write to control registers
    4.46 - */
    4.47  static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
    4.48  {
    4.49 -    unsigned long value, old_cr;
    4.50 +    unsigned long value;
    4.51      struct vcpu *v = current;
    4.52      struct vlapic *vlapic = vcpu_vlapic(v);
    4.53  
    4.54 @@ -2303,66 +2314,7 @@ static int mov_to_cr(int gp, int cr, str
    4.55          return hvm_set_cr3(value);
    4.56  
    4.57      case 4:
    4.58 -        old_cr = v->arch.hvm_vcpu.guest_cr[4];
    4.59 -
    4.60 -        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
    4.61 -        {
    4.62 -            HVM_DBG_LOG(DBG_LEVEL_1,
    4.63 -                        "Guest attempts to set reserved bit in CR4: %lx",
    4.64 -                        value);
    4.65 -            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    4.66 -            return 0;
    4.67 -        }
    4.68 -
    4.69 -        if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
    4.70 -        {
    4.71 -            if ( hvm_paging_enabled(v) )
    4.72 -            {
    4.73 -#if CONFIG_PAGING_LEVELS >= 3
    4.74 -                /* The guest is a 32-bit PAE guest. */
    4.75 -                unsigned long mfn, old_base_mfn;
    4.76 -                mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
    4.77 -                if ( !mfn_valid(mfn) ||
    4.78 -                     !get_page(mfn_to_page(mfn), v->domain) )
    4.79 -                    goto bad_cr3;
    4.80 -
    4.81 -                /*
    4.82 -                 * Now arch.guest_table points to machine physical.
    4.83 -                 */
    4.84 -                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    4.85 -                v->arch.guest_table = pagetable_from_pfn(mfn);
    4.86 -                if ( old_base_mfn )
    4.87 -                    put_page(mfn_to_page(old_base_mfn));
    4.88 -
    4.89 -                HVM_DBG_LOG(DBG_LEVEL_VMMU,
    4.90 -                            "Update CR3 value = %lx, mfn = %lx",
    4.91 -                            v->arch.hvm_vcpu.guest_cr[3], mfn);
    4.92 -#endif
    4.93 -            }
    4.94 -        }
    4.95 -        else if ( !(value & X86_CR4_PAE) )
    4.96 -        {
    4.97 -            if ( unlikely(hvm_long_mode_enabled(v)) )
    4.98 -            {
    4.99 -                HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
   4.100 -                            "EFER.LMA is set");
   4.101 -                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   4.102 -                return 0;
   4.103 -            }
   4.104 -        }
   4.105 -
   4.106 -        __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
   4.107 -        v->arch.hvm_vcpu.guest_cr[4] = value;
   4.108 -        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
   4.109 -
   4.110 -        /*
   4.111 -         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
   4.112 -         * all TLB entries except global entries.
   4.113 -         */
   4.114 -        if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
   4.115 -            paging_update_paging_modes(v);
   4.116 -
   4.117 -        break;
   4.118 +        return hvm_set_cr4(value);
   4.119  
   4.120      case 8:
   4.121          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
   4.122 @@ -2370,14 +2322,11 @@ static int mov_to_cr(int gp, int cr, str
   4.123  
   4.124      default:
   4.125          gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
   4.126 -        domain_crash(v->domain);
   4.127 -        return 0;
   4.128 +        goto exit_and_crash;
   4.129      }
   4.130  
   4.131      return 1;
   4.132  
   4.133 - bad_cr3:
   4.134 -    gdprintk(XENLOG_ERR, "Invalid CR3\n");
   4.135   exit_and_crash:
   4.136      domain_crash(v->domain);
   4.137      return 0;
   4.138 @@ -2438,7 +2387,8 @@ static int vmx_cr_access(unsigned long e
   4.139      unsigned long value;
   4.140      struct vcpu *v = current;
   4.141  
   4.142 -    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) {
   4.143 +    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
   4.144 +    {
   4.145      case TYPE_MOV_TO_CR:
   4.146          gp = exit_qualification & CONTROL_REG_ACCESS_REG;
   4.147          cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
     5.1 --- a/xen/arch/x86/mm/hap/hap.c	Wed Aug 08 10:34:03 2007 +0100
     5.2 +++ b/xen/arch/x86/mm/hap/hap.c	Wed Aug 08 11:28:26 2007 +0100
     5.3 @@ -605,7 +605,8 @@ static int hap_invlpg(struct vcpu *v, un
     5.4  
     5.5  static void hap_update_cr3(struct vcpu *v, int do_locking)
     5.6  {
     5.7 -    hvm_update_guest_cr3(v, v->arch.hvm_vcpu.guest_cr[3]);
     5.8 +    v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
     5.9 +    hvm_update_guest_cr(v, 3);
    5.10  }
    5.11  
    5.12  static void hap_update_paging_modes(struct vcpu *v)
    5.13 @@ -631,7 +632,7 @@ static void hap_update_paging_modes(stru
    5.14      }
    5.15  
    5.16      /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
    5.17 -    hvm_update_guest_cr3(v, v->arch.hvm_vcpu.guest_cr[3]);
    5.18 +    hap_update_cr3(v, 0);
    5.19  
    5.20      hap_unlock(d);
    5.21  }
     6.1 --- a/xen/arch/x86/mm/shadow/multi.c	Wed Aug 08 10:34:03 2007 +0100
     6.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Wed Aug 08 11:28:26 2007 +0100
     6.3 @@ -3483,7 +3483,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
     6.4   * Paravirtual guests should set v->arch.guest_table (and guest_table_user,
     6.5   * if appropriate).
     6.6   * HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
     6.7 - * this function will call hvm_update_guest_cr3() to tell them where the 
     6.8 + * this function will call hvm_update_guest_cr(v, 3) to tell them where the 
     6.9   * shadow tables are.
    6.10   * If do_locking != 0, assume we are being called from outside the 
    6.11   * shadow code, and must take and release the shadow lock; otherwise 
    6.12 @@ -3725,11 +3725,14 @@ sh_update_cr3(struct vcpu *v, int do_loc
    6.13          ASSERT(is_hvm_domain(d));
    6.14  #if SHADOW_PAGING_LEVELS == 3
    6.15          /* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
    6.16 -        hvm_update_guest_cr3(v, virt_to_maddr(&v->arch.paging.shadow.l3table));
    6.17 +        v->arch.hvm_vcpu.hw_cr[3] =
    6.18 +            virt_to_maddr(&v->arch.paging.shadow.l3table);
    6.19  #else
    6.20          /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
    6.21 -        hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.shadow_table[0]));
    6.22 +        v->arch.hvm_vcpu.hw_cr[3] =
    6.23 +            pagetable_get_paddr(v->arch.shadow_table[0]);
    6.24  #endif
    6.25 +        hvm_update_guest_cr(v, 3);
    6.26      }
    6.27  
    6.28      /* Fix up the linear pagetable mappings */
     7.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Aug 08 10:34:03 2007 +0100
     7.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Aug 08 11:28:26 2007 +0100
     7.3 @@ -107,14 +107,14 @@ struct hvm_function_table {
     7.4                                   struct segment_register *reg);
     7.5  
     7.6      /* 
     7.7 -     * Re-set the value of CR3 that Xen runs on when handling VM exits
     7.8 +     * Re-set the value of CR3 that Xen runs on when handling VM exits.
     7.9       */
    7.10      void (*update_host_cr3)(struct vcpu *v);
    7.11  
    7.12      /*
    7.13 -     * Called to inform HVM layer that a guest cr3 has changed
    7.14 +     * Called to inform HVM layer that a guest control register has changed.
    7.15       */
    7.16 -    void (*update_guest_cr3)(struct vcpu *v);
    7.17 +    void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
    7.18  
    7.19      /*
    7.20       * Called to ensure than all guest-specific mappings in a tagged TLB
    7.21 @@ -220,7 +220,10 @@ hvm_update_vtpr(struct vcpu *v, unsigned
    7.22      hvm_funcs.update_vtpr(v, value);
    7.23  }
    7.24  
    7.25 -void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
    7.26 +static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
    7.27 +{
    7.28 +    hvm_funcs.update_guest_cr(v, cr);
    7.29 +}
    7.30  
    7.31  static inline void 
    7.32  hvm_flush_guest_tlbs(void)
     8.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Aug 08 10:34:03 2007 +0100
     8.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Aug 08 11:28:26 2007 +0100
     8.3 @@ -235,5 +235,6 @@ void hvm_hlt(unsigned long rflags);
     8.4  void hvm_triple_fault(void);
     8.5  
     8.6  int hvm_set_cr3(unsigned long value);
     8.7 +int hvm_set_cr4(unsigned long value);
     8.8  
     8.9  #endif /* __ASM_X86_HVM_SUPPORT_H__ */