ia64/xen-unstable

changeset 11383:6bd1a39dbfc8

[XEN][POWERPC] split out an allocate_rma() function from arch_domain_create()
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Fri Aug 25 15:09:36 2006 -0500 (2006-08-25)
parents 215d5eae720c
children bc349d862a5d
files xen/arch/powerpc/domain.c xen/arch/powerpc/mm.c xen/include/asm-powerpc/mm.h
line diff
     1.1 --- a/xen/arch/powerpc/domain.c	Fri Aug 25 14:48:07 2006 -0500
     1.2 +++ b/xen/arch/powerpc/domain.c	Fri Aug 25 15:09:36 2006 -0500
     1.3 @@ -76,8 +76,9 @@ int arch_domain_create(struct domain *d)
     1.4  {
     1.5      unsigned long rma_base;
     1.6      unsigned long rma_sz;
     1.7 -    uint htab_order;
     1.8 -    uint nr_pages;
     1.9 +    uint rma_order_pages;
    1.10 +    uint htab_order_pages;
    1.11 +    int rc;
    1.12  
    1.13      if (d->domain_id == IDLE_DOMAIN_ID) {
    1.14          d->shared_info = (void *)alloc_xenheap_page();
    1.15 @@ -86,23 +87,16 @@ int arch_domain_create(struct domain *d)
    1.16          return 0;
    1.17      }
    1.18  
    1.19 -    d->arch.rma_order = cpu_default_rma_order_pages();
    1.20 -    rma_sz = rma_size(d->arch.rma_order);
    1.21 -
    1.22      /* allocate the real mode area */
    1.23 -    nr_pages =  1UL << d->arch.rma_order;
    1.24 -    d->max_pages = nr_pages;
    1.25 +    rma_order_pages = cpu_default_rma_order_pages();
    1.26 +    d->max_pages = 1UL << rma_order_pages;
    1.27      d->tot_pages = 0;
    1.28 -    d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
    1.29 -    if (NULL == d->arch.rma_page)
    1.30 -        return 1;
    1.31  
    1.32 +    rc = allocate_rma(d, rma_order_pages);
    1.33 +    if (rc)
    1.34 +        return rc;
    1.35      rma_base = page_to_maddr(d->arch.rma_page);
    1.36 -
    1.37 -    BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
    1.38 -
    1.39 -    printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_sz);
    1.40 -    memset((void *)rma_base, 0, rma_sz);
    1.41 +    rma_sz = rma_size(rma_order_pages);
    1.42  
    1.43      d->shared_info = (shared_info_t *)
    1.44          (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
    1.45 @@ -113,12 +107,12 @@ int arch_domain_create(struct domain *d)
    1.46      /* FIXME: we need to the the maximum addressible memory for this
    1.47       * domain to calculate this correctly. It should probably be set
    1.48       * by the managment tools */
    1.49 -    htab_order = d->arch.rma_order - 6; /* (1/64) */
    1.50 +    htab_order_pages = rma_order_pages - 6; /* (1/64) */
    1.51      if (test_bit(_DOMF_privileged, &d->domain_flags)) {
    1.52          /* bump the htab size of privleged domains */
    1.53 -        ++htab_order;
    1.54 +        ++htab_order_pages;
    1.55      }
    1.56 -    htab_alloc(d, htab_order);
    1.57 +    htab_alloc(d, htab_order_pages);
    1.58  
    1.59      return 0;
    1.60  }
     2.1 --- a/xen/arch/powerpc/mm.c	Fri Aug 25 14:48:07 2006 -0500
     2.2 +++ b/xen/arch/powerpc/mm.c	Fri Aug 25 15:09:36 2006 -0500
     2.3 @@ -239,6 +239,29 @@ static int mfn_in_hole(ulong mfn)
     2.4      return 0;
     2.5  }
     2.6  
     2.7 +int allocate_rma(struct domain *d, unsigned int order_pages)
     2.8 +{
     2.9 +    ulong rma_base;
    2.10 +    ulong rma_sz = rma_size(order_pages);
    2.11 +
    2.12 +    d->arch.rma_page = alloc_domheap_pages(d, order_pages, 0);
    2.13 +    if (d->arch.rma_page == NULL) {
    2.14 +        DPRINTK("Could not allocate order_pages=%d RMA for domain %u\n",
    2.15 +                order_pages, d->domain_id);
    2.16 +        return -ENOMEM;
    2.17 +    }
    2.18 +    d->arch.rma_order = order_pages;
    2.19 +
    2.20 +    rma_base = page_to_maddr(d->arch.rma_page);
    2.21 +    BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
    2.22 +
    2.23 +    /* XXX */
    2.24 +    printk("clearing RMA: 0x%lx[0x%lx]\n", rma_base, rma_sz);
    2.25 +    memset((void *)rma_base, 0, rma_sz);
    2.26 +
    2.27 +    return 0;
    2.28 +}
    2.29 +
    2.30  ulong pfn2mfn(struct domain *d, long pfn, int *type)
    2.31  {
    2.32      ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
     3.1 --- a/xen/include/asm-powerpc/mm.h	Fri Aug 25 14:48:07 2006 -0500
     3.2 +++ b/xen/include/asm-powerpc/mm.h	Fri Aug 25 15:09:36 2006 -0500
     3.3 @@ -258,6 +258,8 @@ static inline unsigned long gmfn_to_mfn(
     3.4  
     3.5  #define mfn_to_gmfn(_d, mfn) (mfn)
     3.6  
     3.7 +extern int allocate_rma(struct domain *d, unsigned int order_pages);
     3.8 +
     3.9  extern int steal_page(struct domain *d, struct page_info *page,
    3.10                          unsigned int memflags);
    3.11