ia64/xen-unstable

changeset 10110:d714f923b7cf

Several page allocations, done in the hypervisor when starting an HVM
domain, are not checked. This can cause the physical machine to crash
when starting the HVM domain during low-memory conditions.

Kudos to Charles Arnold for catching the problem with
shadow_direct_map_init.

Signed-off-by: Charles Coffing <ccoffing@novell.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri May 19 16:10:52 2006 +0100 (2006-05-19)
parents f4f2ff82e797
children 33b2ae024663
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Fri May 19 16:08:51 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri May 19 16:10:52 2006 +0100
     1.3 @@ -189,7 +189,11 @@ void hvm_setup_platform(struct domain* d
     1.4      if ( !hvm_guest(current) || (current->vcpu_id != 0) )
     1.5          return;
     1.6  
     1.7 -    shadow_direct_map_init(d);
     1.8 +    if ( shadow_direct_map_init(d) == 0 )
     1.9 +    {
    1.10 +        printk("Can not allocate shadow direct map for HVM domain.\n");
    1.11 +        domain_crash_synchronous();
    1.12 +    }
    1.13  
    1.14      hvm_map_io_shared_page(d);
    1.15      hvm_get_info(d);
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri May 19 16:08:51 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri May 19 16:10:52 2006 +0100
     2.3 @@ -458,6 +458,9 @@ int start_svm(void)
     2.4      
     2.5      if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
     2.6          return 0;
     2.7 +    svm_globals[cpu].hsa = alloc_host_save_area();
     2.8 +    if (! svm_globals[cpu].hsa)
     2.9 +        return 0;
    2.10      
    2.11      rdmsr(MSR_EFER, eax, edx);
    2.12      eax |= EFER_SVME;
    2.13 @@ -466,7 +469,6 @@ int start_svm(void)
    2.14      printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
    2.15  
    2.16      /* Initialize the HSA for this core */
    2.17 -    svm_globals[cpu].hsa = alloc_host_save_area();
    2.18      phys_hsa = (u64) virt_to_maddr( svm_globals[cpu].hsa ); 
    2.19      phys_hsa_lo = (u32) phys_hsa;
    2.20      phys_hsa_hi = (u32) (phys_hsa >> 32);    
     3.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Fri May 19 16:08:51 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Fri May 19 16:10:52 2006 +0100
     3.3 @@ -139,18 +139,21 @@ static int construct_vmcb_controls(struc
     3.4  
     3.5      /* The following is for I/O and MSR permision map */
     3.6      iopm = alloc_xenheap_pages(get_order_from_bytes(IOPM_SIZE));
     3.7 -
     3.8 -    ASSERT(iopm);
     3.9 -    memset(iopm, 0xff, IOPM_SIZE);
    3.10 -    clear_bit(PC_DEBUG_PORT, iopm);
    3.11 +    if (iopm)
    3.12 +    {
    3.13 +        memset(iopm, 0xff, IOPM_SIZE);
    3.14 +        clear_bit(PC_DEBUG_PORT, iopm);
    3.15 +    }
    3.16      msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE));
    3.17 -
    3.18 -    ASSERT(msrpm);
    3.19 -    memset(msrpm, 0xff, MSRPM_SIZE);
    3.20 +    if (msrpm)
    3.21 +        memset(msrpm, 0xff, MSRPM_SIZE);
    3.22  
    3.23      arch_svm->iopm = iopm;
    3.24      arch_svm->msrpm = msrpm;
    3.25  
    3.26 +    if (! iopm || ! msrpm)
    3.27 +        return 1;
    3.28 +
    3.29      vmcb->iopm_base_pa = (u64) virt_to_maddr(iopm);
    3.30      vmcb->msrpm_base_pa = (u64) virt_to_maddr(msrpm);
    3.31