ia64/xen-unstable

changeset 15193:471478a1b89e

Make map_domain_page_global fail

When the global mapping cache runs out, make map_domain_page_global
return NULL on failure rather than fire an assertion failure. This
also updates the callers to handle the error gracefully.

The only exception to this is the shadow pagetable code, which uses
map_domain_page_global to create a mapping for
v->arch.paging.shadow.guest_vtable; it's not clear this needs to be a
global mapping anyway.

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
author Jeremy Fitzhardinge <jeremy@xensource.com>
date Thu May 24 10:45:03 2007 +0100 (2007-05-24)
parents 96915ca8d5f2
children 020530a6ff5c
files xen/arch/x86/hvm/vlapic.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/x86_32/domain_page.c
line diff
     1.1 --- a/xen/arch/x86/hvm/vlapic.c	Thu May 24 10:18:55 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/vlapic.c	Thu May 24 10:45:03 2007 +0100
     1.3 @@ -918,12 +918,19 @@ int vlapic_init(struct vcpu *v)
     1.4      vlapic->regs_page = alloc_domheap_page(NULL);
     1.5      if ( vlapic->regs_page == NULL )
     1.6      {
     1.7 -        dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
     1.8 +        dprintk(XENLOG_ERR, "malloc vlapic regs_page error for vcpu %x\n",
     1.9                  v->vcpu_id);
    1.10          return -ENOMEM;
    1.11      }
    1.12  
    1.13      vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
    1.14 +    if ( vlapic->regs == NULL )
    1.15 +    {
    1.16 +        dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
    1.17 +                v->vcpu_id);
    1.18 +	return -ENOMEM;
    1.19 +    }
    1.20 +
    1.21      memset(vlapic->regs, 0, PAGE_SIZE);
    1.22  
    1.23      vlapic_reset(vlapic);
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Thu May 24 10:18:55 2007 +0100
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu May 24 10:45:03 2007 +0100
     2.3 @@ -3485,6 +3485,8 @@ sh_update_cr3(struct vcpu *v, int do_loc
     2.4          if ( v->arch.paging.shadow.guest_vtable )
     2.5              sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
     2.6          v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
     2.7 +        /* PAGING_LEVELS==4 implies 64-bit, which means that
     2.8 +         * map_domain_page_global can't fail */
     2.9      }
    2.10      else
    2.11          v->arch.paging.shadow.guest_vtable = __linear_l4_table;
    2.12 @@ -3515,6 +3517,9 @@ sh_update_cr3(struct vcpu *v, int do_loc
    2.13          if ( v->arch.paging.shadow.guest_vtable )
    2.14              sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
    2.15          v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
    2.16 +        /* Does this really need map_domain_page_global?  Handle the
    2.17 +         * error properly if so. */
    2.18 +        ASSERT( v->arch.paging.shadow.guest_vtable );
    2.19      }
    2.20      else
    2.21          v->arch.paging.shadow.guest_vtable = __linear_l2_table;
     3.1 --- a/xen/arch/x86/x86_32/domain_page.c	Thu May 24 10:18:55 2007 +0100
     3.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Thu May 24 10:45:03 2007 +0100
     3.3 @@ -218,17 +218,25 @@ void *map_domain_page_global(unsigned lo
     3.4  
     3.5          idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
     3.6          va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
     3.7 -        ASSERT(va < FIXADDR_START);
     3.8 +        if ( va >= FIXADDR_START )
     3.9 +        {
    3.10 +            va = 0;
    3.11 +            goto fail;
    3.12 +        }
    3.13      }
    3.14  
    3.15      set_bit(idx, inuse);
    3.16      inuse_cursor = idx + 1;
    3.17  
    3.18 +  fail:
    3.19      spin_unlock(&globalmap_lock);
    3.20  
    3.21 -    pl2e = virt_to_xen_l2e(va);
    3.22 -    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
    3.23 -    l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
    3.24 +    if ( likely(va != 0) )
    3.25 +    {
    3.26 +	pl2e = virt_to_xen_l2e(va);
    3.27 +	pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
    3.28 +	l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
    3.29 +    }
    3.30  
    3.31      return (void *)va;
    3.32  }