ia64/xen-unstable

changeset 13494:873884fe1827

[HVM] save restore: vcpu context support

Signed-off-by: Zhai Edwin <edwin.zhai@intel.com>

save/restore HVM vcpu context such as vmcs
author Steven Hand <steven@xensource.com>
date Tue Jan 16 16:58:16 2007 +0000 (2007-01-16)
parents 5f340f19bbb7
children 239c8504f48d
files xen/arch/x86/domain.c xen/arch/x86/domctl.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h xen/include/public/arch-x86/xen.h xen/include/xlat.lst
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu Jan 18 16:48:05 2007 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Tue Jan 16 16:58:16 2007 +0000
     1.3 @@ -573,6 +573,7 @@ int arch_set_info_guest(
     1.4      else
     1.5      {
     1.6          hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
     1.7 +        hvm_load_cpu_context(v, &v->arch.guest_context.hvmcpu_ctxt);
     1.8      }
     1.9  
    1.10      if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
     2.1 --- a/xen/arch/x86/domctl.c	Thu Jan 18 16:48:05 2007 +0000
     2.2 +++ b/xen/arch/x86/domctl.c	Tue Jan 16 16:58:16 2007 +0000
     2.3 @@ -322,8 +322,10 @@ void arch_get_info_guest(struct vcpu *v,
     2.4  
     2.5      if ( is_hvm_vcpu(v) )
     2.6      {
     2.7 -        if ( !IS_COMPAT(v->domain) )
     2.8 +        if ( !IS_COMPAT(v->domain) ) {
     2.9              hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
    2.10 +            hvm_save_cpu_context(v, &c.nat->hvmcpu_ctxt);
    2.11 +        }
    2.12  #ifdef CONFIG_COMPAT
    2.13          else
    2.14          {
     3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Jan 18 16:48:05 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Jan 16 16:58:16 2007 +0000
     3.3 @@ -363,6 +363,299 @@ static inline void __restore_debug_regis
     3.4      /* DR7 is loaded from the VMCS. */
     3.5  }
     3.6  
     3.7 +static int __get_instruction_length(void);
     3.8 +int vmx_vmcs_save(struct vcpu *v, struct vmcs_data *c)
     3.9 +{
    3.10 +    unsigned long inst_len;
    3.11 +
    3.12 +    inst_len = __get_instruction_length();
    3.13 +    c->eip = __vmread(GUEST_RIP);
    3.14 +
    3.15 +#ifdef HVM_DEBUG_SUSPEND
    3.16 +    printk("vmx_vmcs_save: inst_len=0x%lx, eip=0x%"PRIx64".\n", 
    3.17 +            inst_len, c->eip);
    3.18 +#endif
    3.19 +
    3.20 +    c->esp = __vmread(GUEST_RSP);
    3.21 +    c->eflags = __vmread(GUEST_RFLAGS);
    3.22 +
    3.23 +    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    3.24 +    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
    3.25 +    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
    3.26 +
    3.27 +#ifdef HVM_DEBUG_SUSPEND
    3.28 +    printk("vmx_vmcs_save: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
    3.29 +            c->cr3,
    3.30 +            c->cr0,
    3.31 +            c->cr4);
    3.32 +#endif
    3.33 +
    3.34 +    c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
    3.35 +    c->idtr_base = __vmread(GUEST_IDTR_BASE);
    3.36 +
    3.37 +    c->gdtr_limit = __vmread(GUEST_GDTR_LIMIT);
    3.38 +    c->gdtr_base = __vmread(GUEST_GDTR_BASE);
    3.39 +
    3.40 +    c->cs_sel = __vmread(GUEST_CS_SELECTOR);
    3.41 +    c->cs_limit = __vmread(GUEST_CS_LIMIT);
    3.42 +    c->cs_base = __vmread(GUEST_CS_BASE);
    3.43 +    c->cs_arbytes = __vmread(GUEST_CS_AR_BYTES);
    3.44 +
    3.45 +    c->ds_sel = __vmread(GUEST_DS_SELECTOR);
    3.46 +    c->ds_limit = __vmread(GUEST_DS_LIMIT);
    3.47 +    c->ds_base = __vmread(GUEST_DS_BASE);
    3.48 +    c->ds_arbytes = __vmread(GUEST_DS_AR_BYTES);
    3.49 +
    3.50 +    c->es_sel = __vmread(GUEST_ES_SELECTOR);
    3.51 +    c->es_limit = __vmread(GUEST_ES_LIMIT);
    3.52 +    c->es_base = __vmread(GUEST_ES_BASE);
    3.53 +    c->es_arbytes = __vmread(GUEST_ES_AR_BYTES);
    3.54 +
    3.55 +    c->ss_sel = __vmread(GUEST_SS_SELECTOR);
    3.56 +    c->ss_limit = __vmread(GUEST_SS_LIMIT);
    3.57 +    c->ss_base = __vmread(GUEST_SS_BASE);
    3.58 +    c->ss_arbytes = __vmread(GUEST_SS_AR_BYTES);
    3.59 +
    3.60 +    c->fs_sel = __vmread(GUEST_FS_SELECTOR);
    3.61 +    c->fs_limit = __vmread(GUEST_FS_LIMIT);
    3.62 +    c->fs_base = __vmread(GUEST_FS_BASE);
    3.63 +    c->fs_arbytes = __vmread(GUEST_FS_AR_BYTES);
    3.64 +
    3.65 +    c->gs_sel = __vmread(GUEST_GS_SELECTOR);
    3.66 +    c->gs_limit = __vmread(GUEST_GS_LIMIT);
    3.67 +    c->gs_base = __vmread(GUEST_GS_BASE);
    3.68 +    c->gs_arbytes = __vmread(GUEST_GS_AR_BYTES);
    3.69 +
    3.70 +    c->tr_sel = __vmread(GUEST_TR_SELECTOR);
    3.71 +    c->tr_limit = __vmread(GUEST_TR_LIMIT);
    3.72 +    c->tr_base = __vmread(GUEST_TR_BASE);
    3.73 +    c->tr_arbytes = __vmread(GUEST_TR_AR_BYTES);
    3.74 +
    3.75 +    c->ldtr_sel = __vmread(GUEST_LDTR_SELECTOR);
    3.76 +    c->ldtr_limit = __vmread(GUEST_LDTR_LIMIT);
    3.77 +    c->ldtr_base = __vmread(GUEST_LDTR_BASE);
    3.78 +    c->ldtr_arbytes = __vmread(GUEST_LDTR_AR_BYTES);
    3.79 +
    3.80 +    c->sysenter_cs = __vmread(GUEST_SYSENTER_CS);
    3.81 +    c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP);
    3.82 +    c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP);
    3.83 +
    3.84 +    return 1;
    3.85 +}
    3.86 +
    3.87 +int vmx_vmcs_restore(struct vcpu *v, struct vmcs_data *c)
    3.88 +{
    3.89 +    unsigned long mfn, old_base_mfn;
    3.90 +
    3.91 +    vmx_vmcs_enter(v);
    3.92 +
    3.93 +    __vmwrite(GUEST_RIP, c->eip);
    3.94 +    __vmwrite(GUEST_RSP, c->esp);
    3.95 +    __vmwrite(GUEST_RFLAGS, c->eflags);
    3.96 +
    3.97 +    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
    3.98 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
    3.99 +
   3.100 +#ifdef HVM_DEBUG_SUSPEND
   3.101 +    printk("vmx_vmcs_restore: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   3.102 +            c->cr3,
   3.103 +            c->cr0,
   3.104 +            c->cr4);
   3.105 +#endif
   3.106 +
   3.107 +    if (!vmx_paging_enabled(v)) {
   3.108 +        printk("vmx_vmcs_restore: paging not enabled.");
   3.109 +        goto skip_cr3;
   3.110 +    }
   3.111 +
   3.112 +    if (c->cr3 == v->arch.hvm_vmx.cpu_cr3) {
   3.113 +        /*
   3.114 +         * This is simple TLB flush, implying the guest has
   3.115 +         * removed some translation or changed page attributes.
   3.116 +         * We simply invalidate the shadow.
   3.117 +         */
   3.118 +        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
   3.119 +        if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
   3.120 +            goto bad_cr3;
   3.121 +        }
   3.122 +    } else {
   3.123 +        /*
   3.124 +         * If different, make a shadow. Check if the PDBR is valid
   3.125 +         * first.
   3.126 +         */
   3.127 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
   3.128 +        /* current!=vcpu as not called by arch_vmx_do_launch */
   3.129 +        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
   3.130 +        if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) {
   3.131 +            goto bad_cr3;
   3.132 +        }
   3.133 +        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   3.134 +        v->arch.guest_table = pagetable_from_pfn(mfn);
   3.135 +        if (old_base_mfn)
   3.136 +             put_page(mfn_to_page(old_base_mfn));
   3.137 +        /*
   3.138 +         * arch.shadow_table should now hold the next CR3 for shadow
   3.139 +         */
   3.140 +        v->arch.hvm_vmx.cpu_cr3 = c->cr3;
   3.141 +    }
   3.142 +
   3.143 + skip_cr3:
   3.144 +#if defined(__x86_64__)
   3.145 +    if (vmx_long_mode_enabled(v)) {
   3.146 +        unsigned long vm_entry_value;
   3.147 +        vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   3.148 +        vm_entry_value |= VM_ENTRY_IA32E_MODE;
   3.149 +        __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   3.150 +    }
   3.151 +#endif
   3.152 +
   3.153 +    __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
   3.154 +    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
   3.155 +    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   3.156 +
   3.157 +    __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   3.158 +    __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   3.159 +
   3.160 +    __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
   3.161 +    __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
   3.162 +
   3.163 +    __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
   3.164 +    __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
   3.165 +    __vmwrite(GUEST_CS_BASE, c->cs_base);
   3.166 +    __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes);
   3.167 +
   3.168 +    __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
   3.169 +    __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
   3.170 +    __vmwrite(GUEST_DS_BASE, c->ds_base);
   3.171 +    __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes);
   3.172 +
   3.173 +    __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
   3.174 +    __vmwrite(GUEST_ES_LIMIT, c->es_limit);
   3.175 +    __vmwrite(GUEST_ES_BASE, c->es_base);
   3.176 +    __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes);
   3.177 +
   3.178 +    __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
   3.179 +    __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
   3.180 +    __vmwrite(GUEST_SS_BASE, c->ss_base);
   3.181 +    __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes);
   3.182 +
   3.183 +    __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
   3.184 +    __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
   3.185 +    __vmwrite(GUEST_FS_BASE, c->fs_base);
   3.186 +    __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes);
   3.187 +
   3.188 +    __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
   3.189 +    __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
   3.190 +    __vmwrite(GUEST_GS_BASE, c->gs_base);
   3.191 +    __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes);
   3.192 +
   3.193 +    __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
   3.194 +    __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
   3.195 +    __vmwrite(GUEST_TR_BASE, c->tr_base);
   3.196 +    __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes);
   3.197 +
   3.198 +    __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
   3.199 +    __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
   3.200 +    __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
   3.201 +    __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes);
   3.202 +
   3.203 +    __vmwrite(GUEST_SYSENTER_CS, c->sysenter_cs);
   3.204 +    __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp);
   3.205 +    __vmwrite(GUEST_SYSENTER_EIP, c->sysenter_eip);
   3.206 +
   3.207 +    vmx_vmcs_exit(v);
   3.208 +
   3.209 +    shadow_update_paging_modes(v);
   3.210 +    return 0;
   3.211 +
   3.212 + bad_cr3:
   3.213 +    gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"", c->cr3);
   3.214 +    vmx_vmcs_exit(v);
   3.215 +    return -EINVAL;
   3.216 +}
   3.217 +
   3.218 +#ifdef HVM_DEBUG_SUSPEND
   3.219 +static void dump_msr_state(struct vmx_msr_state *m)
   3.220 +{
   3.221 +    int i = 0;
   3.222 +    printk("**** msr state ****\n");
   3.223 +    printk("shadow_gs=0x%lx, flags=0x%lx, msr_items:", m->shadow_gs, m->flags);
   3.224 +    for (i = 0; i < VMX_MSR_COUNT; i++)
   3.225 +        printk("0x%lx,", m->msrs[i]);
   3.226 +    printk("\n");
   3.227 +}
   3.228 +#else
   3.229 +static void dump_msr_state(struct vmx_msr_state *m)
   3.230 +{
   3.231 +}
   3.232 +#endif
   3.233 +        
   3.234 +void vmx_save_cpu_state(struct vcpu *v, struct hvmcpu_context *ctxt)
   3.235 +{
   3.236 +    struct vmcs_data *data = &ctxt->data;
   3.237 +    struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
   3.238 +    unsigned long guest_flags = guest_state->flags;
   3.239 +    int i = 0;
   3.240 +
   3.241 +    data->shadow_gs = guest_state->shadow_gs;
   3.242 +    data->vmxassist_enabled = v->arch.hvm_vmx.vmxassist_enabled;
   3.243 +    /* save msrs */
   3.244 +    data->flags = guest_flags;
   3.245 +    for (i = 0; i < VMX_MSR_COUNT; i++)
   3.246 +        data->msr_items[i] = guest_state->msrs[i];
   3.247 +
   3.248 +    dump_msr_state(guest_state);
   3.249 +}
   3.250 +
   3.251 +void vmx_load_cpu_state(struct vcpu *v, struct hvmcpu_context *ctxt)
   3.252 +{
   3.253 +    int i = 0;
   3.254 +    struct vmcs_data *data = &ctxt->data;
   3.255 +    struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
   3.256 +
   3.257 +    /* restore msrs */
   3.258 +    guest_state->flags = data->flags;
   3.259 +    for (i = 0; i < VMX_MSR_COUNT; i++)
   3.260 +        guest_state->msrs[i] = data->msr_items[i];
   3.261 +
   3.262 +    guest_state->shadow_gs = data->shadow_gs;
   3.263 +
   3.264 +    /*XXX:no need to restore msrs, current!=vcpu as not called by arch_vmx_do_launch */
   3.265 +/*    vmx_restore_guest_msrs(v);*/
   3.266 +
   3.267 +    v->arch.hvm_vmx.vmxassist_enabled = data->vmxassist_enabled;
   3.268 +
   3.269 +    dump_msr_state(guest_state);
   3.270 +}
   3.271 +
   3.272 +void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvmcpu_context *ctxt)
   3.273 +{
   3.274 +    struct vmcs_data *data = &ctxt->data;
   3.275 +
   3.276 +    vmx_save_cpu_state(v, ctxt);
   3.277 +
   3.278 +    vmx_vmcs_enter(v);
   3.279 +
   3.280 +    vmx_vmcs_save(v, data);
   3.281 +
   3.282 +    vmx_vmcs_exit(v);
   3.283 +
   3.284 +}
   3.285 +
   3.286 +void vmx_load_vmcs_ctxt(struct vcpu *v, struct hvmcpu_context *ctxt)
   3.287 +{
   3.288 +    vmx_load_cpu_state(v, ctxt);
   3.289 +
   3.290 +    if (vmx_vmcs_restore(v, &ctxt->data)) {
   3.291 +        printk("vmx_vmcs restore failed!\n");
   3.292 +        domain_crash(v->domain);
   3.293 +    }
   3.294 +
   3.295 +    /* only load vmcs once */
   3.296 +    ctxt->valid = 0;
   3.297 +
   3.298 +}
   3.299 +
   3.300  /*
   3.301   * DR7 is saved and restored on every vmexit.  Other debug registers only
   3.302   * need to be restored if their value is going to affect execution -- i.e.,
   3.303 @@ -721,6 +1014,9 @@ static void vmx_setup_hvm_funcs(void)
   3.304      hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
   3.305      hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
   3.306  
   3.307 +    hvm_funcs.save_cpu_ctxt = vmx_save_vmcs_ctxt;
   3.308 +    hvm_funcs.load_cpu_ctxt = vmx_load_vmcs_ctxt;
   3.309 +
   3.310      hvm_funcs.paging_enabled = vmx_paging_enabled;
   3.311      hvm_funcs.long_mode_enabled = vmx_long_mode_enabled;
   3.312      hvm_funcs.pae_enabled = vmx_pae_enabled;
     4.1 --- a/xen/include/asm-x86/hvm/hvm.h	Thu Jan 18 16:48:05 2007 +0000
     4.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Tue Jan 16 16:58:16 2007 +0000
     4.3 @@ -79,6 +79,13 @@ struct hvm_function_table {
     4.4          struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
     4.5      void (*load_cpu_guest_regs)(
     4.6          struct vcpu *v, struct cpu_user_regs *r);
     4.7 +
     4.8 +    /* save and load hvm guest cpu context for save/restore */
     4.9 +    void (*save_cpu_ctxt)(
    4.10 +        struct vcpu *v, struct hvmcpu_context *ctxt);
    4.11 +    void (*load_cpu_ctxt)(
    4.12 +        struct vcpu *v, struct hvmcpu_context *ctxt);
    4.13 +
    4.14      /*
    4.15       * Examine specifics of the guest state:
    4.16       * 1) determine whether paging is enabled,
    4.17 @@ -157,6 +164,35 @@ hvm_load_cpu_guest_regs(struct vcpu *v, 
    4.18      hvm_funcs.load_cpu_guest_regs(v, r);
    4.19  }
    4.20  
    4.21 +void hvm_set_guest_time(struct vcpu *v, u64 gtime);
    4.22 +u64 hvm_get_guest_time(struct vcpu *v);
    4.23 +
    4.24 +static inline void
    4.25 +hvm_save_cpu_context(
    4.26 +        struct vcpu *v, struct hvmcpu_context *ctxt)
    4.27 +{
    4.28 +    hvm_funcs.save_cpu_ctxt(v, ctxt);
    4.29 +
    4.30 +    /* save guest time */
    4.31 +    ctxt->gtime = hvm_get_guest_time(v);
    4.32 +
    4.33 +    /* set valid flag to recover whole vmcs when restore */
    4.34 +    ctxt->valid = 0x55885588;
    4.35 +}
    4.36 +
    4.37 +static inline void
    4.38 +hvm_load_cpu_context(
    4.39 +        struct vcpu *v, struct hvmcpu_context *ctxt)
    4.40 +{
    4.41 +    if ( ctxt->valid != 0x55885588)
    4.42 +        return;
    4.43 +
    4.44 +    hvm_funcs.load_cpu_ctxt(v, ctxt);
    4.45 +
    4.46 +    /* restore guest time*/
    4.47 +    hvm_set_guest_time(v, ctxt->gtime);
    4.48 +}
    4.49 +
    4.50  static inline int
    4.51  hvm_paging_enabled(struct vcpu *v)
    4.52  {
    4.53 @@ -222,8 +258,6 @@ hvm_get_segment_register(struct vcpu *v,
    4.54  void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
    4.55                                     unsigned int *ecx, unsigned int *edx);
    4.56  void hvm_stts(struct vcpu *v);
    4.57 -void hvm_set_guest_time(struct vcpu *v, u64 gtime);
    4.58 -u64 hvm_get_guest_time(struct vcpu *v);
    4.59  void hvm_migrate_timers(struct vcpu *v);
    4.60  void hvm_do_resume(struct vcpu *v);
    4.61  
     5.1 --- a/xen/include/public/arch-x86/xen.h	Thu Jan 18 16:48:05 2007 +0000
     5.2 +++ b/xen/include/public/arch-x86/xen.h	Tue Jan 16 16:58:16 2007 +0000
     5.3 @@ -109,6 +109,70 @@ DEFINE_XEN_GUEST_HANDLE(trap_info_t);
     5.4  typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
     5.5  
     5.6  /*
     5.7 + * World vmcs state
     5.8 + */
     5.9 +struct vmcs_data {
    5.10 +    uint64_t  eip;        /* execution pointer */
    5.11 +    uint64_t  esp;        /* stack pointer */
    5.12 +    uint64_t  eflags;     /* flags register */
    5.13 +    uint64_t  cr0;
    5.14 +    uint64_t  cr3;        /* page table directory */
    5.15 +    uint64_t  cr4;
    5.16 +    uint32_t  idtr_limit; /* idt */
    5.17 +    uint64_t  idtr_base;
    5.18 +    uint32_t  gdtr_limit; /* gdt */
    5.19 +    uint64_t  gdtr_base;
    5.20 +    uint32_t  cs_sel;     /* cs selector */
    5.21 +    uint32_t  cs_limit;
    5.22 +    uint64_t  cs_base;
    5.23 +    uint32_t  cs_arbytes;
    5.24 +    uint32_t  ds_sel;     /* ds selector */
    5.25 +    uint32_t  ds_limit;
    5.26 +    uint64_t  ds_base;
    5.27 +    uint32_t  ds_arbytes;
    5.28 +    uint32_t  es_sel;     /* es selector */
    5.29 +    uint32_t  es_limit;
    5.30 +    uint64_t  es_base;
    5.31 +    uint32_t  es_arbytes;
    5.32 +    uint32_t  ss_sel;     /* ss selector */
    5.33 +    uint32_t  ss_limit;
    5.34 +    uint64_t  ss_base;
    5.35 +    uint32_t  ss_arbytes;
    5.36 +    uint32_t  fs_sel;     /* fs selector */
    5.37 +    uint32_t  fs_limit;
    5.38 +    uint64_t  fs_base;
    5.39 +    uint32_t  fs_arbytes;
    5.40 +    uint32_t  gs_sel;     /* gs selector */
    5.41 +    uint32_t  gs_limit;
    5.42 +    uint64_t  gs_base;
    5.43 +    uint32_t  gs_arbytes;
    5.44 +    uint32_t  tr_sel;     /* task selector */
    5.45 +    uint32_t  tr_limit;
    5.46 +    uint64_t  tr_base;
    5.47 +    uint32_t  tr_arbytes;
    5.48 +    uint32_t  ldtr_sel;   /* ldtr selector */
    5.49 +    uint32_t  ldtr_limit;
    5.50 +    uint64_t  ldtr_base;
    5.51 +    uint32_t  ldtr_arbytes;
    5.52 +    uint32_t  sysenter_cs;
    5.53 +    uint64_t  sysenter_esp;
    5.54 +    uint64_t  sysenter_eip;
    5.55 +    /* msr for em64t */
    5.56 +    uint64_t shadow_gs;
    5.57 +    uint64_t flags;
    5.58 +    /* same size as VMX_MSR_COUNT */
    5.59 +    uint64_t msr_items[6];
    5.60 +    uint64_t vmxassist_enabled;
    5.61 +};
    5.62 +typedef struct vmcs_data vmcs_data_t;
    5.63 +
    5.64 +struct hvmcpu_context {
    5.65 +    uint32_t valid;
    5.66 +    struct vmcs_data data;
    5.67 +    uint64_t gtime;
    5.68 +};
    5.69 +
    5.70 +/*
    5.71   * The following is all CPU context. Note that the fpu_ctxt block is filled 
    5.72   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
    5.73   */
    5.74 @@ -154,6 +218,7 @@ struct vcpu_guest_context {
    5.75  #endif
    5.76  #endif
    5.77      unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
    5.78 +    struct hvmcpu_context hvmcpu_ctxt;      /* whole vmcs region */
    5.79  #ifdef __x86_64__
    5.80      /* Segment base addresses. */
    5.81      uint64_t      fs_base;
     6.1 --- a/xen/include/xlat.lst	Thu Jan 18 16:48:05 2007 +0000
     6.2 +++ b/xen/include/xlat.lst	Tue Jan 16 16:58:16 2007 +0000
     6.3 @@ -8,6 +8,8 @@
     6.4  ?	vcpu_time_info			xen.h
     6.5  !	cpu_user_regs			arch-x86/xen-@arch@.h
     6.6  !	trap_info			arch-x86/xen.h
     6.7 +!	hvmcpu_context			arch-x86/xen.h
     6.8 +!	vmcs_data			arch-x86/xen.h
     6.9  !	vcpu_guest_context		arch-x86/xen.h
    6.10  ?	acm_getdecision			acm_ops.h
    6.11  !	ctl_cpumap			domctl.h