ia64/xen-unstable

changeset 14830:1fa9b5f1df8f

[HVM] Save/restore: save HVM GPRs and DRs with the rest of the CPU state
and remove the use of xc_vcpu_getcontext() from HVM save/restore.
Also fixes loss of CR2, DR6 and DR7 contents across HVM save/restore.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Thu Apr 12 15:02:03 2007 +0100 (2007-04-12)
parents a839e331f06f
children 5a8cb6354df5
files tools/libxc/xc_domain_restore.c tools/libxc/xc_domain_save.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/include/public/hvm/save.h
line diff
     1.1 --- a/tools/libxc/xc_domain_restore.c	Thu Apr 12 14:13:04 2007 +0100
     1.2 +++ b/tools/libxc/xc_domain_restore.c	Thu Apr 12 15:02:03 2007 +0100
     1.3 @@ -696,26 +696,6 @@ int xc_domain_restore(int xc_handle, int
     1.4          xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
     1.5          *store_mfn = magic_pfns[2];
     1.6  
     1.7 -        /* Read vcpu contexts */
     1.8 -        for ( i = 0; i <= max_vcpu_id; i++ )
     1.9 -        {
    1.10 -            if ( !(vcpumap & (1ULL << i)) )
    1.11 -                continue;
    1.12 -
    1.13 -            if ( !read_exact(io_fd, &(ctxt), sizeof(ctxt)) )
    1.14 -            {
    1.15 -                ERROR("error read vcpu context.\n");
    1.16 -                goto out;
    1.17 -            }
    1.18 -            
    1.19 -            if ( (rc = xc_vcpu_setcontext(xc_handle, dom, i, &ctxt)) )
    1.20 -            {
    1.21 -                ERROR("Could not set vcpu context, rc=%d", rc);
    1.22 -                goto out;
    1.23 -            }
    1.24 -            rc = 1;
    1.25 -        }
    1.26 -
    1.27          /* Read HVM context */
    1.28          if ( !read_exact(io_fd, &rec_len, sizeof(uint32_t)) )
    1.29          {
     2.1 --- a/tools/libxc/xc_domain_save.c	Thu Apr 12 14:13:04 2007 +0100
     2.2 +++ b/tools/libxc/xc_domain_save.c	Thu Apr 12 15:02:03 2007 +0100
     2.3 @@ -378,8 +378,7 @@ static int analysis_phase(int xc_handle,
     2.4  
     2.5  
     2.6  static int suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
     2.7 -                             int dom, xc_dominfo_t *info,
     2.8 -                             vcpu_guest_context_t *ctxt)
     2.9 +                             int dom, xc_dominfo_t *info)
    2.10  {
    2.11      int i = 0;
    2.12  
    2.13 @@ -397,10 +396,6 @@ static int suspend_and_state(int (*suspe
    2.14          return -1;
    2.15      }
    2.16  
    2.17 -    if ( xc_vcpu_getcontext(xc_handle, dom, 0, ctxt) )
    2.18 -        ERROR("Could not get vcpu context");
    2.19 -
    2.20 -
    2.21      if ( info->dying )
    2.22      {
    2.23          ERROR("domain is dying");
    2.24 @@ -663,10 +658,11 @@ static xen_pfn_t *xc_map_m2p(int xc_hand
    2.25  static xen_pfn_t *map_and_save_p2m_table(int xc_handle, 
    2.26                                           int io_fd, 
    2.27                                           uint32_t dom,
    2.28 -                                         vcpu_guest_context_t *ctxt,
    2.29                                           unsigned long p2m_size,
    2.30                                           shared_info_t *live_shinfo)
    2.31  {
    2.32 +    vcpu_guest_context_t ctxt;
    2.33 +
    2.34      /* Double and single indirect references to the live P2M table */
    2.35      xen_pfn_t *live_p2m_frame_list_list = NULL;
    2.36      xen_pfn_t *live_p2m_frame_list = NULL;
    2.37 @@ -730,13 +726,19 @@ static xen_pfn_t *map_and_save_p2m_table
    2.38          }
    2.39      }
    2.40  
    2.41 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
    2.42 +    {
    2.43 +        ERROR("Could not get vcpu context");
    2.44 +        goto out;
    2.45 +    }
    2.46 +
    2.47      /*
    2.48       * Write an extended-info structure to inform the restore code that
    2.49       * a PAE guest understands extended CR3 (PDPTs above 4GB). Turns off
    2.50       * slow paths in the restore code.
    2.51       */
    2.52      if ( (pt_levels == 3) &&
    2.53 -         (ctxt->vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
    2.54 +         (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
    2.55      {
    2.56          unsigned long signature = ~0UL;
    2.57          uint32_t tot_sz   = sizeof(struct vcpu_guest_context) + 8;
    2.58 @@ -746,7 +748,7 @@ static xen_pfn_t *map_and_save_p2m_table
    2.59               !write_exact(io_fd, &tot_sz,    sizeof(tot_sz)) ||
    2.60               !write_exact(io_fd, &chunk_sig, 4) ||
    2.61               !write_exact(io_fd, &chunk_sz,  sizeof(chunk_sz)) ||
    2.62 -             !write_exact(io_fd, ctxt,       sizeof(*ctxt)) )
    2.63 +             !write_exact(io_fd, &ctxt,      sizeof(ctxt)) )
    2.64          {
    2.65              ERROR("write: extended info");
    2.66              goto out;
    2.67 @@ -853,11 +855,6 @@ int xc_domain_save(int xc_handle, int io
    2.68          return 1;
    2.69      }
    2.70  
    2.71 -    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
    2.72 -    {
    2.73 -        ERROR("Could not get vcpu context");
    2.74 -        goto out;
    2.75 -    }
    2.76      shared_info_frame = info.shared_info_frame;
    2.77  
    2.78      /* Map the shared info frame */
    2.79 @@ -900,7 +897,7 @@ int xc_domain_save(int xc_handle, int io
    2.80      else
    2.81      {
    2.82          /* This is a non-live suspend. Suspend the domain .*/
    2.83 -        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt) )
    2.84 +        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
    2.85          {
    2.86              ERROR("Domain appears not to have suspended");
    2.87              goto out;
    2.88 @@ -999,7 +996,7 @@ int xc_domain_save(int xc_handle, int io
    2.89  
    2.90          /* Map the P2M table, and write the list of P2M frames */
    2.91          live_p2m = map_and_save_p2m_table(xc_handle, io_fd, dom, 
    2.92 -                                          &ctxt, p2m_size, live_shinfo);
    2.93 +                                          p2m_size, live_shinfo);
    2.94          if ( live_p2m == NULL )
    2.95          {
    2.96              ERROR("Failed to map/save the p2m frame list");
    2.97 @@ -1304,17 +1301,13 @@ int xc_domain_save(int xc_handle, int io
    2.98                  DPRINTF("Start last iteration\n");
    2.99                  last_iter = 1;
   2.100  
   2.101 -                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
   2.102 -                                       &ctxt) )
   2.103 +                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
   2.104                  {
   2.105                      ERROR("Domain appears not to have suspended");
   2.106                      goto out;
   2.107                  }
   2.108  
   2.109 -                DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
   2.110 -                        info.shared_info_frame,
   2.111 -                        (unsigned long)ctxt.user_regs.eip,
   2.112 -                        (unsigned long)ctxt.user_regs.edx);
   2.113 +                DPRINTF("SUSPEND shinfo %08lx\n", info.shared_info_frame);
   2.114              }
   2.115  
   2.116              if ( xc_shadow_control(xc_handle, dom, 
   2.117 @@ -1410,27 +1403,6 @@ int xc_domain_save(int xc_handle, int io
   2.118              goto out;
   2.119          }
   2.120  
   2.121 -        /* Save vcpu contexts */
   2.122 -
   2.123 -        for ( i = 0; i <= info.max_vcpu_id; i++ )
   2.124 -        {
   2.125 -            if ( !(vcpumap & (1ULL << i)) )
   2.126 -                continue;
   2.127 -            
   2.128 -            if ( xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
   2.129 -            {
   2.130 -                ERROR("HVM:Could not get vcpu context");
   2.131 -                goto out;
   2.132 -            }
   2.133 -            
   2.134 -            DPRINTF("write vcpu %d context.\n", i); 
   2.135 -            if ( !write_exact(io_fd, &(ctxt), sizeof(ctxt)) )
   2.136 -            {
   2.137 -                ERROR("write vcpu context failed!\n");
   2.138 -                goto out;
   2.139 -            }
   2.140 -        }
   2.141 -
   2.142          /* Get HVM context from Xen and save it too */
   2.143          if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, 
   2.144                                                    hvm_buf_size)) == -1 )
   2.145 @@ -1494,6 +1466,12 @@ int xc_domain_save(int xc_handle, int io
   2.146          }
   2.147      }
   2.148  
   2.149 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
   2.150 +    {
   2.151 +        ERROR("Could not get vcpu context");
   2.152 +        goto out;
   2.153 +    }
   2.154 +
   2.155      /* Canonicalise the suspend-record frame number. */
   2.156      if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) )
   2.157      {
     3.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Apr 12 14:13:04 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Apr 12 15:02:03 2007 +0100
     3.3 @@ -191,6 +191,7 @@ static int hvm_save_cpu_ctxt(struct doma
     3.4  {
     3.5      struct vcpu *v;
     3.6      struct hvm_hw_cpu ctxt;
     3.7 +    struct vcpu_guest_context *vc;
     3.8  
     3.9      for_each_vcpu(d, v)
    3.10      {
    3.11 @@ -199,7 +200,40 @@ static int hvm_save_cpu_ctxt(struct doma
    3.12          if ( test_bit(_VPF_down, &v->pause_flags) ) 
    3.13              continue;
    3.14  
    3.15 +        /* Architecture-specific vmcs/vmcb bits */
    3.16          hvm_funcs.save_cpu_ctxt(v, &ctxt);
    3.17 +
    3.18 +        /* Other vcpu register state */
    3.19 +        vc = &v->arch.guest_context;
    3.20 +        if ( vc->flags & VGCF_i387_valid )
    3.21 +            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
    3.22 +        else 
    3.23 +            memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
    3.24 +        ctxt.rax = vc->user_regs.eax;
    3.25 +        ctxt.rbx = vc->user_regs.ebx;
    3.26 +        ctxt.rcx = vc->user_regs.ecx;
    3.27 +        ctxt.rdx = vc->user_regs.edx;
    3.28 +        ctxt.rbp = vc->user_regs.ebp;
    3.29 +        ctxt.rsi = vc->user_regs.esi;
    3.30 +        ctxt.rdi = vc->user_regs.edi;
    3.31 +        /* %rsp handled by arch-specific call above */
    3.32 +#ifdef __x86_64__        
    3.33 +        ctxt.r8  = vc->user_regs.r8;
    3.34 +        ctxt.r9  = vc->user_regs.r9;
    3.35 +        ctxt.r10 = vc->user_regs.r10;
    3.36 +        ctxt.r11 = vc->user_regs.r11;
    3.37 +        ctxt.r12 = vc->user_regs.r12;
    3.38 +        ctxt.r13 = vc->user_regs.r13;
    3.39 +        ctxt.r14 = vc->user_regs.r14;
    3.40 +        ctxt.r15 = vc->user_regs.r15;
    3.41 +#endif
    3.42 +        ctxt.dr0 = vc->debugreg[0];
    3.43 +        ctxt.dr1 = vc->debugreg[1];
    3.44 +        ctxt.dr2 = vc->debugreg[2];
    3.45 +        ctxt.dr3 = vc->debugreg[3];
    3.46 +        ctxt.dr6 = vc->debugreg[6];
    3.47 +        ctxt.dr7 = vc->debugreg[7];
    3.48 +
    3.49          if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
    3.50              return 1; 
    3.51      }
    3.52 @@ -208,9 +242,10 @@ static int hvm_save_cpu_ctxt(struct doma
    3.53  
    3.54  static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
    3.55  {
    3.56 -    int vcpuid;
    3.57 +    int vcpuid, rc;
    3.58      struct vcpu *v;
    3.59      struct hvm_hw_cpu ctxt;
    3.60 +    struct vcpu_guest_context *vc;
    3.61  
    3.62      /* Which vcpu is this? */
    3.63      vcpuid = hvm_load_instance(h);
    3.64 @@ -219,13 +254,52 @@ static int hvm_load_cpu_ctxt(struct doma
    3.65          gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
    3.66          return -EINVAL;
    3.67      }
    3.68 +    vc = &v->arch.guest_context;
    3.69 +
    3.70 +    /* Need to init this vcpu before loading its contents */
    3.71 +    LOCK_BIGLOCK(d);
    3.72 +    if ( !v->is_initialised )
    3.73 +        if ( (rc = boot_vcpu(d, vcpuid, vc)) != 0 )
    3.74 +            return rc;
    3.75 +    UNLOCK_BIGLOCK(d);
    3.76  
    3.77      if ( hvm_load_entry(CPU, h, &ctxt) != 0 ) 
    3.78          return -EINVAL;
    3.79  
    3.80 +    /* Architecture-specific vmcs/vmcb bits */
    3.81      if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
    3.82          return -EINVAL;
    3.83  
    3.84 +    /* Other vcpu register state */
    3.85 +    memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
    3.86 +    vc->user_regs.eax = ctxt.rax;
    3.87 +    vc->user_regs.ebx = ctxt.rbx;
    3.88 +    vc->user_regs.ecx = ctxt.rcx;
    3.89 +    vc->user_regs.edx = ctxt.rdx;
    3.90 +    vc->user_regs.ebp = ctxt.rbp;
    3.91 +    vc->user_regs.esi = ctxt.rsi;
    3.92 +    vc->user_regs.edi = ctxt.rdi;
    3.93 +    vc->user_regs.esp = ctxt.rsp;
    3.94 +#ifdef __x86_64__
    3.95 +    vc->user_regs.r8; = ctxt.r8; 
    3.96 +    vc->user_regs.r9; = ctxt.r9; 
    3.97 +    vc->user_regs.r10 = ctxt.r10;
    3.98 +    vc->user_regs.r11 = ctxt.r11;
    3.99 +    vc->user_regs.r12 = ctxt.r12;
   3.100 +    vc->user_regs.r13 = ctxt.r13;
   3.101 +    vc->user_regs.r14 = ctxt.r14;
   3.102 +    vc->user_regs.r15 = ctxt.r15;
   3.103 +#endif
   3.104 +    vc->debugreg[0] = ctxt.dr0;
   3.105 +    vc->debugreg[1] = ctxt.dr1;
   3.106 +    vc->debugreg[2] = ctxt.dr2;
   3.107 +    vc->debugreg[3] = ctxt.dr3;
   3.108 +    vc->debugreg[6] = ctxt.dr6;
   3.109 +    vc->debugreg[7] = ctxt.dr7;
   3.110 +
   3.111 +    vc->flags = VGCF_i387_valid | VGCF_online;
   3.112 +    v->fpu_initialised = 1;
   3.113 +
   3.114      /* Auxiliary processors should be woken immediately. */
   3.115      if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
   3.116          vcpu_wake(v);
     4.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Apr 12 14:13:04 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Apr 12 15:02:03 2007 +0100
     4.3 @@ -233,7 +233,7 @@ int svm_vmcb_save(struct vcpu *v, struct
     4.4  {
     4.5      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     4.6  
     4.7 -    c->eip = vmcb->rip;
     4.8 +    c->rip = vmcb->rip;
     4.9  
    4.10  #ifdef HVM_DEBUG_SUSPEND
    4.11      printk("%s: eip=0x%"PRIx64".\n", 
    4.12 @@ -241,10 +241,11 @@ int svm_vmcb_save(struct vcpu *v, struct
    4.13             inst_len, c->eip);
    4.14  #endif
    4.15  
    4.16 -    c->esp = vmcb->rsp;
    4.17 -    c->eflags = vmcb->rflags;
    4.18 +    c->rsp = vmcb->rsp;
    4.19 +    c->rflags = vmcb->rflags;
    4.20  
    4.21      c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    4.22 +    c->cr2 = v->arch.hvm_svm.cpu_cr2;
    4.23      c->cr3 = v->arch.hvm_svm.cpu_cr3;
    4.24      c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    4.25  
    4.26 @@ -315,15 +316,17 @@ int svm_vmcb_restore(struct vcpu *v, str
    4.27      unsigned long mfn, old_base_mfn;
    4.28      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    4.29  
    4.30 -    vmcb->rip    = c->eip;
    4.31 -    vmcb->rsp    = c->esp;
    4.32 -    vmcb->rflags = c->eflags;
    4.33 +    vmcb->rip    = c->rip;
    4.34 +    vmcb->rsp    = c->rsp;
    4.35 +    vmcb->rflags = c->rflags;
    4.36  
    4.37      v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
    4.38      vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET;
    4.39      if ( !paging_mode_hap(v->domain) ) 
    4.40          vmcb->cr0 |= X86_CR0_PG;
    4.41  
    4.42 +    v->arch.hvm_svm.cpu_cr2 = c->cr2;
    4.43 +
    4.44  #ifdef HVM_DEBUG_SUSPEND
    4.45      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
    4.46             __func__,
    4.47 @@ -421,6 +424,9 @@ int svm_vmcb_restore(struct vcpu *v, str
    4.48      vmcb->sysenter_esp = c->sysenter_esp;
    4.49      vmcb->sysenter_eip = c->sysenter_eip;
    4.50  
    4.51 +    vmcb->dr6 = c->dr6;
    4.52 +    vmcb->dr7 = c->dr7;
    4.53 +
    4.54      paging_update_paging_modes(v);
    4.55      return 0;
    4.56   
    4.57 @@ -440,6 +446,7 @@ void svm_save_cpu_state(struct vcpu *v, 
    4.58      data->msr_cstar        = vmcb->cstar;
    4.59      data->msr_syscall_mask = vmcb->sfmask;
    4.60      data->msr_efer         = v->arch.hvm_svm.cpu_shadow_efer;
    4.61 +    data->msr_flags        = -1ULL;
    4.62  
    4.63      data->tsc = hvm_get_guest_time(v);
    4.64  }
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Apr 12 14:13:04 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Apr 12 15:02:03 2007 +0100
     5.3 @@ -370,11 +370,12 @@ static inline void __restore_debug_regis
     5.4  
     5.5  int vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
     5.6  {    
     5.7 -    c->eip = __vmread(GUEST_RIP);
     5.8 -    c->esp = __vmread(GUEST_RSP);
     5.9 -    c->eflags = __vmread(GUEST_RFLAGS);
    5.10 +    c->rip = __vmread(GUEST_RIP);
    5.11 +    c->rsp = __vmread(GUEST_RSP);
    5.12 +    c->rflags = __vmread(GUEST_RFLAGS);
    5.13  
    5.14      c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
    5.15 +    c->cr2 = v->arch.hvm_vmx.cpu_cr2;
    5.16      c->cr3 = v->arch.hvm_vmx.cpu_cr3;
    5.17      c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
    5.18  
    5.19 @@ -444,13 +445,15 @@ int vmx_vmcs_restore(struct vcpu *v, str
    5.20  
    5.21      vmx_vmcs_enter(v);
    5.22  
    5.23 -    __vmwrite(GUEST_RIP, c->eip);
    5.24 -    __vmwrite(GUEST_RSP, c->esp);
    5.25 -    __vmwrite(GUEST_RFLAGS, c->eflags);
    5.26 +    __vmwrite(GUEST_RIP, c->rip);
    5.27 +    __vmwrite(GUEST_RSP, c->rsp);
    5.28 +    __vmwrite(GUEST_RFLAGS, c->rflags);
    5.29  
    5.30      v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
    5.31      __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
    5.32  
    5.33 +    v->arch.hvm_vmx.cpu_cr2 = c->cr2;
    5.34 +
    5.35  #ifdef HVM_DEBUG_SUSPEND
    5.36      printk("vmx_vmcs_restore: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
    5.37              c->cr3,
    5.38 @@ -555,6 +558,8 @@ int vmx_vmcs_restore(struct vcpu *v, str
    5.39      __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp);
    5.40      __vmwrite(GUEST_SYSENTER_EIP, c->sysenter_eip);
    5.41  
    5.42 +    __vmwrite(GUEST_DR7, c->dr7);
    5.43 +
    5.44      vmx_vmcs_exit(v);
    5.45  
    5.46      paging_update_paging_modes(v);
    5.47 @@ -590,7 +595,7 @@ void vmx_save_cpu_state(struct vcpu *v, 
    5.48      data->shadow_gs = guest_state->shadow_gs;
    5.49  
    5.50      /* save msrs */
    5.51 -    data->flags = guest_flags;
    5.52 +    data->msr_flags        = guest_flags;
    5.53      data->msr_lstar        = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
    5.54      data->msr_star         = guest_state->msrs[VMX_INDEX_MSR_STAR];
    5.55      data->msr_cstar        = guest_state->msrs[VMX_INDEX_MSR_CSTAR];
    5.56 @@ -607,7 +612,7 @@ void vmx_load_cpu_state(struct vcpu *v, 
    5.57      struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
    5.58  
    5.59      /* restore msrs */
    5.60 -    guest_state->flags = data->flags;
    5.61 +    guest_state->flags = data->msr_flags;
    5.62      guest_state->msrs[VMX_INDEX_MSR_LSTAR]        = data->msr_lstar;
    5.63      guest_state->msrs[VMX_INDEX_MSR_STAR]         = data->msr_star;
    5.64      guest_state->msrs[VMX_INDEX_MSR_CSTAR]        = data->msr_cstar;
     6.1 --- a/xen/include/public/hvm/save.h	Thu Apr 12 14:13:04 2007 +0100
     6.2 +++ b/xen/include/public/hvm/save.h	Thu Apr 12 15:02:03 2007 +0100
     6.3 @@ -87,13 +87,40 @@ DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct 
     6.4   */
     6.5  
     6.6  struct hvm_hw_cpu {
     6.7 -    uint64_t eip;
     6.8 -    uint64_t esp;
     6.9 -    uint64_t eflags;
    6.10 +    uint8_t  fpu_regs[512];
    6.11 +
    6.12 +    uint64_t rax;
    6.13 +    uint64_t rbx;
    6.14 +    uint64_t rcx;
    6.15 +    uint64_t rdx;
    6.16 +    uint64_t rbp;
    6.17 +    uint64_t rsi;
    6.18 +    uint64_t rdi;
    6.19 +    uint64_t rsp;
    6.20 +    uint64_t r8;
    6.21 +    uint64_t r9;
    6.22 +    uint64_t r10;
    6.23 +    uint64_t r11;
    6.24 +    uint64_t r12;
    6.25 +    uint64_t r13;
    6.26 +    uint64_t r14;
    6.27 +    uint64_t r15;
    6.28 +
    6.29 +    uint64_t rip;
    6.30 +    uint64_t rflags;
    6.31 +
    6.32      uint64_t cr0;
    6.33 +    uint64_t cr2;
    6.34      uint64_t cr3;
    6.35      uint64_t cr4;
    6.36  
    6.37 +    uint64_t dr0;
    6.38 +    uint64_t dr1;
    6.39 +    uint64_t dr2;
    6.40 +    uint64_t dr3;
    6.41 +    uint64_t dr6;
    6.42 +    uint64_t dr7;    
    6.43 +
    6.44      uint32_t cs_sel;
    6.45      uint32_t ds_sel;
    6.46      uint32_t es_sel;
    6.47 @@ -142,9 +169,9 @@ struct hvm_hw_cpu {
    6.48  
    6.49      /* msr for em64t */
    6.50      uint64_t shadow_gs;
    6.51 -    uint64_t flags;
    6.52  
    6.53      /* msr content saved/restored. */
    6.54 +    uint64_t msr_flags;
    6.55      uint64_t msr_lstar;
    6.56      uint64_t msr_star;
    6.57      uint64_t msr_cstar;