ia64/xen-unstable

changeset 15856:a53aaea4c698

hvm: Fix up guest_table handling after p2m changes.
Fixes a host crash on HVM guest restore.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Sep 07 19:54:29 2007 +0100 (2007-09-07)
parents f8e7f06b351c
children f16bff0934d7 1a8f8f96e0cd
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/shadow/multi.c
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Sep 07 19:53:57 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Sep 07 19:54:29 2007 +0100
     1.3 @@ -586,8 +586,7 @@ int hvm_set_cr0(unsigned long value)
     1.4  
     1.5          if ( !paging_mode_hap(v->domain) )
     1.6          {
     1.7 -            put_page(mfn_to_page(get_mfn_from_gpfn(
     1.8 -                v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
     1.9 +            put_page(pagetable_get_page(v->arch.guest_table));
    1.10              v->arch.guest_table = pagetable_null();
    1.11          }
    1.12      }
    1.13 @@ -603,21 +602,11 @@ int hvm_set_cr0(unsigned long value)
    1.14  
    1.15  int hvm_set_cr3(unsigned long value)
    1.16  {
    1.17 -    unsigned long old_base_mfn, mfn;
    1.18 +    unsigned long mfn;
    1.19      struct vcpu *v = current;
    1.20  
    1.21 -    if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) )
    1.22 -    {
    1.23 -        /* Nothing to do. */
    1.24 -    }
    1.25 -    else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
    1.26 -    {
    1.27 -        /* Shadow-mode TLB flush. Invalidate the shadow. */
    1.28 -        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
    1.29 -        if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
    1.30 -            goto bad_cr3;
    1.31 -    }
    1.32 -    else 
    1.33 +    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
    1.34 +         (value != v->arch.hvm_vcpu.guest_cr[3]) )
    1.35      {
    1.36          /* Shadow-mode CR3 change. Check PDBR and then make a new shadow. */
    1.37          HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
    1.38 @@ -625,12 +614,9 @@ int hvm_set_cr3(unsigned long value)
    1.39          if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
    1.40              goto bad_cr3;
    1.41  
    1.42 -        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    1.43 +        put_page(pagetable_get_page(v->arch.guest_table));
    1.44          v->arch.guest_table = pagetable_from_pfn(mfn);
    1.45  
    1.46 -        if ( old_base_mfn )
    1.47 -            put_page(mfn_to_page(old_base_mfn));
    1.48 -
    1.49          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
    1.50      }
    1.51  
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Sep 07 19:53:57 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Sep 07 19:54:29 2007 +0100
     2.3 @@ -337,9 +337,37 @@ int svm_vmcb_save(struct vcpu *v, struct
     2.4  
     2.5  int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
     2.6  {
     2.7 -    unsigned long mfn, old_base_mfn;
     2.8 +    unsigned long mfn = 0;
     2.9      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.10  
    2.11 +    if ( c->pending_valid &&
    2.12 +         ((c->pending_type == 1) || (c->pending_type > 6) ||
    2.13 +          (c->pending_reserved != 0)) )
    2.14 +    {
    2.15 +        gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n",
    2.16 +                 c->pending_event);
    2.17 +        return -EINVAL;
    2.18 +    }
    2.19 +
    2.20 +    if ( !paging_mode_hap(v->domain) )
    2.21 +    {
    2.22 +        if ( c->cr0 & X86_CR0_PG )
    2.23 +        {
    2.24 +            mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
    2.25 +            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
    2.26 +            {
    2.27 +                gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
    2.28 +                         c->cr3);
    2.29 +                return -EINVAL;
    2.30 +            }
    2.31 +        }
    2.32 +
    2.33 +        if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
    2.34 +            put_page(pagetable_get_page(v->arch.guest_table));
    2.35 +
    2.36 +        v->arch.guest_table = pagetable_from_pfn(mfn);
    2.37 +    }
    2.38 +
    2.39      vmcb->rip    = c->rip;
    2.40      vmcb->rsp    = c->rsp;
    2.41      vmcb->rflags = c->rflags;
    2.42 @@ -357,18 +385,6 @@ int svm_vmcb_restore(struct vcpu *v, str
    2.43             __func__, c->cr3, c->cr0, c->cr4);
    2.44  #endif
    2.45  
    2.46 -    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
    2.47 -    {
    2.48 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
    2.49 -        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
    2.50 -        if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) 
    2.51 -            goto bad_cr3;
    2.52 -        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    2.53 -        v->arch.guest_table = pagetable_from_pfn(mfn);
    2.54 -        if ( old_base_mfn )
    2.55 -             put_page(mfn_to_page(old_base_mfn));
    2.56 -    }
    2.57 -
    2.58      vmcb->idtr.limit = c->idtr_limit;
    2.59      vmcb->idtr.base  = c->idtr_base;
    2.60  
    2.61 @@ -435,14 +451,6 @@ int svm_vmcb_restore(struct vcpu *v, str
    2.62          gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n",
    2.63                   c->pending_event, c->error_code);
    2.64  
    2.65 -        if ( (c->pending_type == 1) || (c->pending_type > 6) ||
    2.66 -             (c->pending_reserved != 0) )
    2.67 -        {
    2.68 -            gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32"\n", 
    2.69 -                     c->pending_event);
    2.70 -            return -EINVAL;
    2.71 -        }
    2.72 -
    2.73          if ( hvm_event_needs_reinjection(c->pending_type, c->pending_vector) )
    2.74          {
    2.75              vmcb->eventinj.bytes = c->pending_event;
    2.76 @@ -453,10 +461,6 @@ int svm_vmcb_restore(struct vcpu *v, str
    2.77      paging_update_paging_modes(v);
    2.78  
    2.79      return 0;
    2.80 - 
    2.81 - bad_cr3:
    2.82 -    gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3);
    2.83 -    return -EINVAL;
    2.84  }
    2.85  
    2.86          
     3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Sep 07 19:53:57 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Sep 07 19:54:29 2007 +0100
     3.3 @@ -565,7 +565,31 @@ void vmx_vmcs_save(struct vcpu *v, struc
     3.4  
     3.5  int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
     3.6  {
     3.7 -    unsigned long mfn, old_base_mfn;
     3.8 +    unsigned long mfn = 0;
     3.9 +
    3.10 +    if ( c->pending_valid &&
    3.11 +         ((c->pending_type == 1) || (c->pending_type > 6) ||
    3.12 +          (c->pending_reserved != 0)) )
    3.13 +    {
    3.14 +        gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n",
    3.15 +                 c->pending_event);
    3.16 +        return -EINVAL;
    3.17 +    }
    3.18 +
    3.19 +    if ( c->cr0 & X86_CR0_PG )
    3.20 +    {
    3.21 +        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
    3.22 +        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
    3.23 +        {
    3.24 +            gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3);
    3.25 +            return -EINVAL;
    3.26 +        }
    3.27 +    }
    3.28 +
    3.29 +    if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
    3.30 +        put_page(pagetable_get_page(v->arch.guest_table));
    3.31 +
    3.32 +    v->arch.guest_table = pagetable_from_pfn(mfn);
    3.33  
    3.34      vmx_vmcs_enter(v);
    3.35  
    3.36 @@ -586,18 +610,6 @@ int vmx_vmcs_restore(struct vcpu *v, str
    3.37             __func__, c->cr3, c->cr0, c->cr4);
    3.38  #endif
    3.39  
    3.40 -    if ( hvm_paging_enabled(v) )
    3.41 -    {
    3.42 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
    3.43 -        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
    3.44 -        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
    3.45 -            goto bad_cr3;
    3.46 -        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    3.47 -        v->arch.guest_table = pagetable_from_pfn(mfn);
    3.48 -        if ( old_base_mfn )
    3.49 -            put_page(mfn_to_page(old_base_mfn));
    3.50 -    }
    3.51 -
    3.52      v->arch.hvm_vcpu.guest_efer = c->msr_efer;
    3.53      vmx_update_guest_efer(v);
    3.54  
    3.55 @@ -662,14 +674,6 @@ int vmx_vmcs_restore(struct vcpu *v, str
    3.56          gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n",
    3.57                   c->pending_event, c->error_code);
    3.58  
    3.59 -        if ( (c->pending_type == 1) || (c->pending_type > 6) ||
    3.60 -             (c->pending_reserved != 0) )
    3.61 -        {
    3.62 -            gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n",
    3.63 -                     c->pending_event);
    3.64 -            return -EINVAL;
    3.65 -        }
    3.66 -
    3.67          if ( hvm_event_needs_reinjection(c->pending_type, c->pending_vector) )
    3.68          {
    3.69              vmx_vmcs_enter(v);
    3.70 @@ -680,11 +684,6 @@ int vmx_vmcs_restore(struct vcpu *v, str
    3.71      }
    3.72  
    3.73      return 0;
    3.74 -
    3.75 - bad_cr3:
    3.76 -    gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3);
    3.77 -    vmx_vmcs_exit(v);
    3.78 -    return -EINVAL;
    3.79  }
    3.80  
    3.81  #if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND)
    3.82 @@ -1905,7 +1904,22 @@ static void vmx_world_save(struct vcpu *
    3.83  
    3.84  static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
    3.85  {
    3.86 -    unsigned long mfn, old_base_mfn;
    3.87 +    unsigned long mfn = 0;
    3.88 +
    3.89 +    if ( c->cr0 & X86_CR0_PG )
    3.90 +    {
    3.91 +        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
    3.92 +        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
    3.93 +        {
    3.94 +            gdprintk(XENLOG_ERR, "Invalid CR3 value=%x", c->cr3);
    3.95 +            return -EINVAL;
    3.96 +        }
    3.97 +    }
    3.98 +
    3.99 +    if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
   3.100 +        put_page(pagetable_get_page(v->arch.guest_table));
   3.101 +
   3.102 +    v->arch.guest_table = pagetable_from_pfn(mfn);
   3.103  
   3.104      __vmwrite(GUEST_RIP, c->eip);
   3.105      __vmwrite(GUEST_RSP, c->esp);
   3.106 @@ -1917,18 +1931,6 @@ static int vmx_world_restore(struct vcpu
   3.107      vmx_update_guest_cr(v, 0);
   3.108      vmx_update_guest_cr(v, 4);
   3.109  
   3.110 -    if ( hvm_paging_enabled(v) )
   3.111 -    {
   3.112 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %x", c->cr3);
   3.113 -        mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
   3.114 -        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   3.115 -            goto bad_cr3;
   3.116 -        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   3.117 -        v->arch.guest_table = pagetable_from_pfn(mfn);
   3.118 -        if ( old_base_mfn )
   3.119 -             put_page(mfn_to_page(old_base_mfn));
   3.120 -    }
   3.121 -
   3.122      __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   3.123      __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   3.124  
   3.125 @@ -1977,10 +1979,6 @@ static int vmx_world_restore(struct vcpu
   3.126  
   3.127      paging_update_paging_modes(v);
   3.128      return 0;
   3.129 -
   3.130 - bad_cr3:
   3.131 -    gdprintk(XENLOG_ERR, "Invalid CR3 value=%x", c->cr3);
   3.132 -    return -EINVAL;
   3.133  }
   3.134  
   3.135  enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
     4.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Sep 07 19:53:57 2007 +0100
     4.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Sep 07 19:54:29 2007 +0100
     4.3 @@ -3502,24 +3502,12 @@ sh_update_cr3(struct vcpu *v, int do_loc
     4.4      /* Double-check that the HVM code has sent us a sane guest_table */
     4.5      if ( is_hvm_domain(d) )
     4.6      {
     4.7 -        gfn_t gfn;
     4.8 -
     4.9          ASSERT(shadow_mode_external(d));
    4.10 -
    4.11 -        // Is paging enabled on this vcpu?
    4.12          if ( hvm_paging_enabled(v) )
    4.13 -        {
    4.14 -            gfn = _gfn(paddr_to_pfn(v->arch.hvm_vcpu.guest_cr[3]));
    4.15 -            gmfn = gfn_to_mfn(d, gfn);
    4.16 -            ASSERT(mfn_valid(gmfn));
    4.17 -            ASSERT(pagetable_get_pfn(v->arch.guest_table) == mfn_x(gmfn));
    4.18 -        } 
    4.19 +            ASSERT(pagetable_get_pfn(v->arch.guest_table));
    4.20          else 
    4.21 -        {
    4.22 -            /* Paging disabled: guest_table points at a 32-bit 1-to-1 map */
    4.23              ASSERT(v->arch.guest_table.pfn
    4.24                     == d->arch.paging.shadow.unpaged_pagetable.pfn);
    4.25 -        }
    4.26      }
    4.27  #endif
    4.28