ia64/xen-unstable

changeset 8319:5f94478ca9b5

VMX domain should call domain_crash_synchronous instead of domain_crash.
Since domain_crash will return at last, and I watched system crash after
its return.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Dec 11 00:17:11 2005 +0100 (2005-12-11)
parents c0dbb3a538f2
children bdcb115c667a
files xen/arch/x86/shadow_public.c xen/arch/x86/vmx.c xen/arch/x86/vmx_vmcs.c
line diff
     1.1 --- a/xen/arch/x86/shadow_public.c	Sun Dec 11 00:16:26 2005 +0100
     1.2 +++ b/xen/arch/x86/shadow_public.c	Sun Dec 11 00:17:11 2005 +0100
     1.3 @@ -236,26 +236,24 @@ static pagetable_t page_table_convert(st
     1.4      l4_pgentry_t *l4;
     1.5      l3_pgentry_t *l3, *pae_l3;
     1.6      int i;
     1.7 -    
     1.8 +
     1.9      l4page = alloc_domheap_page(NULL);
    1.10      if (l4page == NULL)
    1.11 -        domain_crash(d);
    1.12 +        domain_crash_synchronous();
    1.13      l4 = map_domain_page(page_to_pfn(l4page));
    1.14      memset(l4, 0, PAGE_SIZE);
    1.15  
    1.16      l3page = alloc_domheap_page(NULL);
    1.17      if (l3page == NULL)
    1.18 -        domain_crash(d);
    1.19 -    l3 =  map_domain_page(page_to_pfn(l3page));
    1.20 +        domain_crash_synchronous();
    1.21 +    l3 = map_domain_page(page_to_pfn(l3page));
    1.22      memset(l3, 0, PAGE_SIZE);
    1.23  
    1.24      l4[0] = l4e_from_page(l3page, __PAGE_HYPERVISOR);
    1.25 +
    1.26      pae_l3 = map_domain_page(pagetable_get_pfn(d->arch.phys_table));
    1.27 -
    1.28 -    for (i = 0; i < PDP_ENTRIES; i++) {
    1.29 -        l3[i] = pae_l3[i];
    1.30 -        l3e_add_flags(l3[i], 0x67);
    1.31 -    }
    1.32 +    for (i = 0; i < PDP_ENTRIES; i++)
    1.33 +        l3[i] = l3e_from_pfn(l3e_get_pfn(pae_l3[i]), __PAGE_HYPERVISOR);
    1.34  
    1.35      unmap_domain_page(l4);
    1.36      unmap_domain_page(l3);
    1.37 @@ -276,17 +274,18 @@ static void alloc_monitor_pagetable(stru
    1.38      mmfn_info = alloc_domheap_page(NULL);
    1.39      ASSERT( mmfn_info );
    1.40  
    1.41 -    mmfn = (unsigned long) (mmfn_info - frame_table);
    1.42 +    mmfn = page_to_pfn(mmfn_info);
    1.43      mpl4e = (l4_pgentry_t *) map_domain_page(mmfn);
    1.44      memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE);
    1.45      mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
    1.46          l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
    1.47 +
    1.48      /* map the phys_to_machine map into the per domain Read-Only MPT space */
    1.49      phys_table = page_table_convert(d);
    1.50 -
    1.51      mpl4e[l4_table_offset(RO_MPT_VIRT_START)] =
    1.52          l4e_from_paddr(pagetable_get_paddr(phys_table),
    1.53                         __PAGE_HYPERVISOR);
    1.54 +
    1.55      v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
    1.56      v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e;
    1.57  }
     2.1 --- a/xen/arch/x86/vmx.c	Sun Dec 11 00:16:26 2005 +0100
     2.2 +++ b/xen/arch/x86/vmx.c	Sun Dec 11 00:17:11 2005 +0100
     2.3 @@ -196,12 +196,12 @@ static inline int long_mode_do_msr_read(
     2.4      case MSR_FS_BASE:
     2.5          if (!(VMX_LONG_GUEST(vc)))
     2.6              /* XXX should it be GP fault */
     2.7 -            domain_crash(vc->domain);
     2.8 +            domain_crash_synchronous();
     2.9          __vmread(GUEST_FS_BASE, &msr_content);
    2.10          break;
    2.11      case MSR_GS_BASE:
    2.12          if (!(VMX_LONG_GUEST(vc)))
    2.13 -            domain_crash(vc->domain);
    2.14 +            domain_crash_synchronous();
    2.15          __vmread(GUEST_GS_BASE, &msr_content);
    2.16          break;
    2.17      case MSR_SHADOW_GS_BASE:
    2.18 @@ -265,7 +265,7 @@ static inline int long_mode_do_msr_write
    2.19      case MSR_FS_BASE:
    2.20      case MSR_GS_BASE:
    2.21          if (!(VMX_LONG_GUEST(vc)))
    2.22 -            domain_crash(vc->domain);
    2.23 +            domain_crash_synchronous();
    2.24          if (!IS_CANO_ADDRESS(msr_content)){
    2.25              VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
    2.26              vmx_inject_exception(vc, TRAP_gp_fault, 0);
    2.27 @@ -278,7 +278,7 @@ static inline int long_mode_do_msr_write
    2.28  
    2.29      case MSR_SHADOW_GS_BASE:
    2.30          if (!(VMX_LONG_GUEST(vc)))
    2.31 -            domain_crash(vc->domain);
    2.32 +            domain_crash_synchronous();
    2.33          vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
    2.34          wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
    2.35          break;
    2.36 @@ -1345,12 +1345,8 @@ static int mov_to_cr(int gp, int cr, str
    2.37          }
    2.38          break;
    2.39      }
    2.40 -    case 4:
    2.41 +    case 4: /* CR4 */
    2.42      {
    2.43 -        /* CR4 */
    2.44 -        unsigned long old_guest_cr;
    2.45 -
    2.46 -        __vmread(GUEST_CR4, &old_guest_cr);
    2.47          if (value & X86_CR4_PAE){
    2.48              set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.arch_vmx.cpu_state);
    2.49          } else {
     3.1 --- a/xen/arch/x86/vmx_vmcs.c	Sun Dec 11 00:16:26 2005 +0100
     3.2 +++ b/xen/arch/x86/vmx_vmcs.c	Sun Dec 11 00:17:11 2005 +0100
     3.3 @@ -157,13 +157,13 @@ static void vmx_map_io_shared_page(struc
     3.4      mpfn = get_mfn_from_pfn(E820_MAP_PAGE >> PAGE_SHIFT);
     3.5      if (mpfn == INVALID_MFN) {
     3.6          printk("Can not find E820 memory map page for VMX domain.\n");
     3.7 -        domain_crash(d);
     3.8 +        domain_crash_synchronous();
     3.9      }
    3.10  
    3.11      p = map_domain_page(mpfn);
    3.12      if (p == NULL) {
    3.13          printk("Can not map E820 memory map page for VMX domain.\n");
    3.14 -        domain_crash(d);
    3.15 +        domain_crash_synchronous();
    3.16      }
    3.17  
    3.18      e820_map_nr = *(p + E820_MAP_NR_OFFSET);
    3.19 @@ -182,7 +182,7 @@ static void vmx_map_io_shared_page(struc
    3.20          printk("Can not get io request shared page"
    3.21                 " from E820 memory map for VMX domain.\n");
    3.22          unmap_domain_page(p);
    3.23 -        domain_crash(d);
    3.24 +        domain_crash_synchronous();
    3.25      }
    3.26      unmap_domain_page(p);
    3.27  
    3.28 @@ -190,13 +190,13 @@ static void vmx_map_io_shared_page(struc
    3.29      mpfn = get_mfn_from_pfn(gpfn);
    3.30      if (mpfn == INVALID_MFN) {
    3.31          printk("Can not find io request shared page for VMX domain.\n");
    3.32 -        domain_crash(d);
    3.33 +        domain_crash_synchronous();
    3.34      }
    3.35  
    3.36      p = map_domain_page(mpfn);
    3.37      if (p == NULL) {
    3.38          printk("Can not map io request shared page for VMX domain.\n");
    3.39 -        domain_crash(d);
    3.40 +        domain_crash_synchronous();
    3.41      }
    3.42      d->arch.vmx_platform.shared_page_va = (unsigned long)p;
    3.43