ia64/xen-unstable

changeset 14239:b75609e1fa81

[POWERPC][XEN] Implement guest_physmap_{add,remove}_page().
- Use p2m array in pfn2mfn() and DOMCTL_getmemlist.
- Remove domain extent list.
- Create and use an m2p array for mfn_to_gmfn().
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Fri Mar 02 17:07:59 2007 -0600 (2007-03-02)
parents f56981f78d73
children 4c08045ff57c
files xen/arch/powerpc/domain.c xen/arch/powerpc/domain_build.c xen/arch/powerpc/domctl.c xen/arch/powerpc/iommu.c xen/arch/powerpc/memory.c xen/arch/powerpc/mm.c xen/arch/powerpc/ofd_fixup_memory.c xen/include/asm-powerpc/domain.h xen/include/asm-powerpc/mm.h
line diff
     1.1 --- a/xen/arch/powerpc/domain.c	Fri Mar 02 17:07:01 2007 -0600
     1.2 +++ b/xen/arch/powerpc/domain.c	Fri Mar 02 17:07:59 2007 -0600
     1.3 @@ -88,8 +88,6 @@ int arch_domain_create(struct domain *d)
     1.4      d->arch.large_page_sizes = cpu_large_page_orders(
     1.5          d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
     1.6  
     1.7 -    INIT_LIST_HEAD(&d->arch.extent_list);
     1.8 -
     1.9      d->arch.foreign_mfn_count = 1024;
    1.10      d->arch.foreign_mfns = xmalloc_array(uint, d->arch.foreign_mfn_count);
    1.11      BUG_ON(d->arch.foreign_mfns == NULL);
    1.12 @@ -311,7 +309,6 @@ void domain_relinquish_resources(struct 
    1.13  {
    1.14      relinquish_memory(d, &d->xenpage_list);
    1.15      relinquish_memory(d, &d->page_list);
    1.16 -    free_extents(d);
    1.17      xfree(d->arch.foreign_mfns);
    1.18      xfree(d->arch.p2m);
    1.19      return;
     2.1 --- a/xen/arch/powerpc/domain_build.c	Fri Mar 02 17:07:01 2007 -0600
     2.2 +++ b/xen/arch/powerpc/domain_build.c	Fri Mar 02 17:07:59 2007 -0600
     2.3 @@ -16,6 +16,8 @@
     2.4   * Copyright IBM Corp. 2005, 2007
     2.5   *
     2.6   * Authors: Jimi Xenidis <jimix@watson.ibm.com>
     2.7 + *          Ryan Harper <ryanh@us.ibm.com>
     2.8 + *          Hollis Blanchard <hollisb@us.ibm.com>
     2.9   */
    2.10  
    2.11  #include <xen/config.h>
    2.12 @@ -27,7 +29,9 @@
    2.13  #include <xen/shadow.h>
    2.14  #include <xen/domain.h>
    2.15  #include <xen/version.h>
    2.16 +#include <xen/shadow.h>
    2.17  #include <asm/processor.h>
    2.18 +#include <asm/platform.h>
    2.19  #include <asm/papr.h>
    2.20  #include <public/arch-powerpc.h>
    2.21  #include <public/libelf.h>
    2.22 @@ -73,6 +77,7 @@ int construct_dom0(struct domain *d,
    2.23      ulong mod_start = 0;
    2.24      ulong mod_len = 0;
    2.25      ulong shared_info_addr;
    2.26 +    uint extent_size = 1 << cpu_extent_order();
    2.27  
    2.28      /* Sanity! */
    2.29      BUG_ON(d->domain_id != 0);
    2.30 @@ -110,13 +115,32 @@ int construct_dom0(struct domain *d,
    2.31              dom0_nrpages = CONFIG_MIN_DOM0_PAGES;
    2.32      }
    2.33  
    2.34 -    /* DOM0 has to be at least RMA size. */
    2.35 +    /* Dom0 has to be at least RMA size. */
    2.36      if (dom0_nrpages < rma_nrpages) {
    2.37          dom0_nrpages = rma_nrpages;
    2.38 -        printk("Forcing DOM0 memory size to %u MiB\n", 
    2.39 +        printk("Increasing DOM0 memory size to %u MiB for RMA.\n", 
    2.40                  ((rma_nrpages << PAGE_SHIFT) >> 20));
    2.41      }
    2.42  
    2.43 +    /* Ensure Dom0 is cpu_extent_order aligned. Round up if 
    2.44 +       not and let user know we did so. */
    2.45 +    if (dom0_nrpages != ALIGN_UP(dom0_nrpages, extent_size)) {
    2.46 +        dom0_nrpages = ALIGN_UP(dom0_nrpages, extent_size);
    2.47 +        printk("Increasing DOM0 memory size to %u MiB for large pages.\n", 
    2.48 +                ((dom0_nrpages << PAGE_SHIFT) >> 20));
    2.49 +    }
    2.50 +
    2.51 +    /* XXX Dom0 currently can't extend past the IO hole. */
    2.52 +    if (dom0_nrpages > (platform_iohole_base() >> PAGE_SHIFT)) {
    2.53 +        dom0_nrpages = (platform_iohole_base() >> PAGE_SHIFT);
    2.54 +        printk("Limiting DOM0 memory size to %u MiB to avoid IO hole.\n", 
    2.55 +                ((dom0_nrpages << PAGE_SHIFT) >> 20));
    2.56 +    }
    2.57 +
    2.58 +    /* Set Dom0 max mem, triggering p2m table creation. */
    2.59 +    if ((guest_physmap_max_mem_pages(d, dom0_nrpages)) != 0)
    2.60 +        panic("Failed to set DOM0 max mem pages value\n");
    2.61 +
    2.62      d->max_pages = dom0_nrpages;
    2.63      if (0 > allocate_rma(d, cpu_default_rma_order_pages()))
    2.64          panic("Error allocating domain 0 RMA\n");
     3.1 --- a/xen/arch/powerpc/domctl.c	Fri Mar 02 17:07:01 2007 -0600
     3.2 +++ b/xen/arch/powerpc/domctl.c	Fri Mar 02 17:07:59 2007 -0600
     3.3 @@ -13,9 +13,10 @@
     3.4   * along with this program; if not, write to the Free Software
     3.5   * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
     3.6   *
     3.7 - * Copyright (C) IBM Corp. 2005
     3.8 + * Copyright IBM Corp. 2005, 2007
     3.9   *
    3.10   * Authors: Jimi Xenidis <jimix@watson.ibm.com>
    3.11 + *          Ryan Harper <ryanh@us.ibm.com>
    3.12   */
    3.13  
    3.14  #include <xen/config.h>
    3.15 @@ -50,7 +51,6 @@ long arch_do_domctl(struct xen_domctl *d
    3.16          struct domain *d = get_domain_by_id(domctl->domain);
    3.17          unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
    3.18          uint64_t mfn;
    3.19 -        struct list_head *list_ent;
    3.20  
    3.21          ret = -EINVAL;
    3.22          if ( d != NULL )
    3.23 @@ -58,18 +58,20 @@ long arch_do_domctl(struct xen_domctl *d
    3.24              ret = 0;
    3.25  
    3.26              spin_lock(&d->page_alloc_lock);
    3.27 -            list_ent = d->page_list.next;
    3.28 -            for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
    3.29 -            {
    3.30 -                mfn = page_to_mfn(list_entry(
    3.31 -                    list_ent, struct page_info, list));
    3.32 -                if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
    3.33 -                                          i, &mfn, 1) )
    3.34 +            for (i = 0; i < max_pfns; i++) {
    3.35 +                /* bail if index is beyond p2m size */
    3.36 +                if (i >= d->arch.p2m_entries)
    3.37 +                    break;
    3.38 +
    3.39 +                /* translate */
    3.40 +                mfn = d->arch.p2m[i];
    3.41 +
    3.42 +                if (copy_to_guest_offset(domctl->u.getmemlist.buffer,
    3.43 +                                          i, &mfn, 1))
    3.44                  {
    3.45                      ret = -EFAULT;
    3.46                      break;
    3.47                  }
    3.48 -                list_ent = mfn_to_page(mfn)->list.next;
    3.49              }
    3.50              spin_unlock(&d->page_alloc_lock);
    3.51  
     4.1 --- a/xen/arch/powerpc/iommu.c	Fri Mar 02 17:07:01 2007 -0600
     4.2 +++ b/xen/arch/powerpc/iommu.c	Fri Mar 02 17:07:59 2007 -0600
     4.3 @@ -13,7 +13,7 @@
     4.4   * along with this program; if not, write to the Free Software
     4.5   * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
     4.6   *
     4.7 - * Copyright (C) IBM Corp. 2005
     4.8 + * Copyright IBM Corp. 2005, 2007
     4.9   *
    4.10   * Authors: Jimi Xenidis <jimix@watson.ibm.com>
    4.11   */
    4.12 @@ -62,7 +62,6 @@ int iommu_put(u32 buid, ulong ioba, unio
    4.13          mfn = pfn2mfn(d, gmfn, &mtype);
    4.14          if (mfn != INVALID_MFN) {
    4.15              switch (mtype) {
    4.16 -            case PFN_TYPE_RMA:
    4.17              case PFN_TYPE_LOGICAL:
    4.18                  break;
    4.19              case PFN_TYPE_FOREIGN:
     5.1 --- a/xen/arch/powerpc/memory.c	Fri Mar 02 17:07:01 2007 -0600
     5.2 +++ b/xen/arch/powerpc/memory.c	Fri Mar 02 17:07:59 2007 -0600
     5.3 @@ -176,6 +176,7 @@ void memory_init(module_t *mod, int mcou
     5.4      DBG("total_pages: 0x%016lx\n", total_pages);
     5.5  
     5.6      init_frametable();
     5.7 +    init_machine_to_phys_table();
     5.8  
     5.9      numa_initmem_init(0, max_page);
    5.10  
     6.1 --- a/xen/arch/powerpc/mm.c	Fri Mar 02 17:07:01 2007 -0600
     6.2 +++ b/xen/arch/powerpc/mm.c	Fri Mar 02 17:07:59 2007 -0600
     6.3 @@ -17,6 +17,7 @@
     6.4   *
     6.5   * Authors: Hollis Blanchard <hollisb@us.ibm.com>
     6.6   *          Jimi Xenidis <jimix@watson.ibm.com>
     6.7 + *          Ryan Harper <ryanh@us.ibm.com>
     6.8   */
     6.9  
    6.10  #include <xen/config.h>
    6.11 @@ -29,6 +30,7 @@
    6.12  #include <asm/page.h>
    6.13  #include <asm/platform.h>
    6.14  #include <asm/string.h>
    6.15 +#include <asm/platform.h>
    6.16  #include <public/arch-powerpc.h>
    6.17  
    6.18  #ifdef VERBOSE
    6.19 @@ -44,6 +46,9 @@ struct page_info *frame_table;
    6.20  unsigned long max_page;
    6.21  unsigned long total_pages;
    6.22  
    6.23 +/* machine to phys mapping to used by all domains */
    6.24 +unsigned long *machine_phys_mapping;
    6.25 +
    6.26  void __init init_frametable(void)
    6.27  {
    6.28      unsigned long p;
    6.29 @@ -61,6 +66,24 @@ void __init init_frametable(void)
    6.30          clear_page((void *)((p + i) << PAGE_SHIFT));
    6.31  }
    6.32  
    6.33 +/* Array of PFNs, indexed by MFN. */
    6.34 +void __init init_machine_to_phys_table(void)
    6.35 +{
    6.36 +    unsigned long p;
    6.37 +    unsigned long nr_pages;
    6.38 +    int i;
    6.39 +
    6.40 +    nr_pages = PFN_UP(max_page * sizeof(unsigned long));
    6.41 +
    6.42 +    p = alloc_boot_pages(nr_pages, 1);
    6.43 +    if (p == 0)
    6.44 +        panic("Not enough memory for machine phys mapping table\n");
    6.45 +
    6.46 +    machine_phys_mapping = (unsigned long *)(p << PAGE_SHIFT);
    6.47 +    for (i = 0; i < nr_pages; i += 1)
    6.48 +        clear_page((void *)((p + i) << PAGE_SHIFT));
    6.49 +}
    6.50 +
    6.51  void share_xen_page_with_guest(
    6.52      struct page_info *page, struct domain *d, int readonly)
    6.53  {
    6.54 @@ -290,46 +313,16 @@ extern void copy_page(void *dp, void *sp
    6.55      }
    6.56  }
    6.57  
    6.58 -/* XXX should probably replace with faster data structure */
    6.59 -static uint add_extent(struct domain *d, struct page_info *pg, uint order)
    6.60 -{
    6.61 -    struct page_extents *pe;
    6.62 -
    6.63 -    pe = xmalloc(struct page_extents);
    6.64 -    if (pe == NULL)
    6.65 -        return -ENOMEM;
    6.66 -
    6.67 -    pe->pg = pg;
    6.68 -    pe->order = order;
    6.69 -
    6.70 -    list_add_tail(&pe->pe_list, &d->arch.extent_list);
    6.71 -
    6.72 -    return 0;
    6.73 -}
    6.74 -
    6.75 -void free_extents(struct domain *d)
    6.76 -{
    6.77 -    /* we just need to free the memory behind list */
    6.78 -    struct list_head *list;
    6.79 -    struct list_head *ent;
    6.80 -    struct list_head *next;
    6.81 -
    6.82 -    list = &d->arch.extent_list;
    6.83 -    ent = list->next;
    6.84 -
    6.85 -    while (ent != list) {
    6.86 -        next = ent->next;
    6.87 -        xfree(ent);
    6.88 -        ent = next;
    6.89 -    }
    6.90 -}
    6.91 -
    6.92 +/* Allocate (rma_nrpages - nrpages) more memory for domain in proper size. */
    6.93  uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
    6.94  {
    6.95 +    struct page_info *pg;
    6.96 +    ulong mfn;
    6.97 +    ulong gpfn = rma_nrpages; /* starting PFN at end of RMA */
    6.98      uint ext_order;
    6.99      uint ext_nrpages;
   6.100      uint total_nrpages;
   6.101 -    struct page_info *pg;
   6.102 +    int i;
   6.103  
   6.104      ext_order = cpu_extent_order();
   6.105      ext_nrpages = 1 << ext_order;
   6.106 @@ -337,16 +330,20 @@ uint allocate_extents(struct domain *d, 
   6.107      total_nrpages = rma_nrpages;
   6.108  
   6.109      /* We only allocate in nr_extsz chunks so if you are not divisible
   6.110 -     * you get more than you asked for */
   6.111 +     * you get more than you asked for. */
   6.112      while (total_nrpages < nrpages) {
   6.113          pg = alloc_domheap_pages(d, ext_order, 0);
   6.114          if (pg == NULL)
   6.115              return total_nrpages;
   6.116  
   6.117 -        if (add_extent(d, pg, ext_order) < 0) {
   6.118 -            free_domheap_pages(pg, ext_order);
   6.119 -            return total_nrpages;
   6.120 -        }
   6.121 +        /* Build p2m mapping for newly allocated extent. */
   6.122 +        mfn = page_to_mfn(pg);
   6.123 +        for (i = 0; i < (1 << ext_order); i++)
   6.124 +            guest_physmap_add_page(d, gpfn + i, mfn + i);
   6.125 +
   6.126 +        /* Bump starting PFN by extent size pages. */
   6.127 +        gpfn += ext_nrpages;
   6.128 +
   6.129          total_nrpages += ext_nrpages;
   6.130      }
   6.131  
   6.132 @@ -358,6 +355,7 @@ int allocate_rma(struct domain *d, unsig
   6.133      struct vcpu *v;
   6.134      ulong rma_base;
   6.135      ulong rma_sz;
   6.136 +    ulong mfn;
   6.137      int i;
   6.138  
   6.139      if (d->arch.rma_page)
   6.140 @@ -379,10 +377,14 @@ int allocate_rma(struct domain *d, unsig
   6.141      printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
   6.142             d->domain_id, rma_base, rma_sz);
   6.143  
   6.144 +    mfn = page_to_mfn(d->arch.rma_page);
   6.145 +
   6.146      for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
   6.147 -        /* Add in any extra CPUs that need flushing because of this page. */
   6.148          d->arch.rma_page[i].count_info |= PGC_page_RMA;
   6.149          clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
   6.150 +
   6.151 +        /* Set up p2m mapping for RMA. */
   6.152 +        guest_physmap_add_page(d, i, mfn+i);
   6.153      }
   6.154  
   6.155      /* shared_info uses last page of RMA */
   6.156 @@ -406,9 +408,6 @@ void free_rma_check(struct page_info *pa
   6.157  
   6.158  ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
   6.159  {
   6.160 -    ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
   6.161 -    ulong rma_size_mfn = 1UL << d->arch.rma_order;
   6.162 -    struct page_extents *pe;
   6.163      ulong mfn = INVALID_MFN;
   6.164      int t = PFN_TYPE_NONE;
   6.165      ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
   6.166 @@ -431,23 +430,9 @@ ulong pfn2mfn(struct domain *d, ulong pf
   6.167          t = PFN_TYPE_IO;
   6.168          mfn = pfn;
   6.169      } else {
   6.170 -        if (pfn < rma_size_mfn) {
   6.171 -            t = PFN_TYPE_RMA;
   6.172 -            mfn = pfn + rma_base_mfn;
   6.173 -        } else {
   6.174 -            ulong cur_pfn = rma_size_mfn;
   6.175 -
   6.176 -            list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
   6.177 -                uint pe_pages = 1UL << pe->order;
   6.178 -                uint end_pfn = cur_pfn + pe_pages;
   6.179 -
   6.180 -                if (pfn >= cur_pfn && pfn < end_pfn) {
   6.181 -                    t = PFN_TYPE_LOGICAL;
   6.182 -                    mfn = page_to_mfn(pe->pg) + (pfn - cur_pfn);
   6.183 -                    break;
   6.184 -                }
   6.185 -                cur_pfn += pe_pages;
   6.186 -            }
   6.187 +        if (pfn < d->arch.p2m_entries) {
   6.188 +            t = PFN_TYPE_LOGICAL;
   6.189 +            mfn = d->arch.p2m[pfn];
   6.190          }
   6.191  #ifdef DEBUG
   6.192          if (t != PFN_TYPE_NONE &&
   6.193 @@ -496,10 +481,12 @@ ulong pfn2mfn(struct domain *d, ulong pf
   6.194  
   6.195  unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
   6.196  {
   6.197 -    struct page_extents *pe;
   6.198 -    ulong cur_pfn;
   6.199 +    struct page_info *pg = mfn_to_page(mfn);
   6.200      ulong gnttab_mfn;
   6.201 -    ulong rma_mfn;
   6.202 +
   6.203 +    /* is this our mfn? */
   6.204 +    if (page_get_owner(pg) != d)
   6.205 +        return INVALID_M2P_ENTRY;
   6.206  
   6.207      /* XXX access d->grant_table->nr_grant_frames without lock.
   6.208       * Currently on powerpc dynamic expanding grant table is
   6.209 @@ -516,24 +503,8 @@ unsigned long mfn_to_gmfn(struct domain 
   6.210      if (d->is_privileged && platform_io_mfn(mfn))
   6.211          return mfn;
   6.212  
   6.213 -    rma_mfn = page_to_mfn(d->arch.rma_page);
   6.214 -    if (mfn >= rma_mfn &&
   6.215 -        mfn < (rma_mfn + (1 << d->arch.rma_order)))
   6.216 -        return mfn - rma_mfn;
   6.217 -
   6.218 -    /* Extent? */
   6.219 -    cur_pfn = 1UL << d->arch.rma_order;
   6.220 -    list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
   6.221 -        uint pe_pages = 1UL << pe->order;
   6.222 -        uint b_mfn = page_to_mfn(pe->pg);
   6.223 -        uint e_mfn = b_mfn + pe_pages;
   6.224 -
   6.225 -        if (mfn >= b_mfn && mfn < e_mfn) {
   6.226 -            return cur_pfn + (mfn - b_mfn);
   6.227 -        }
   6.228 -        cur_pfn += pe_pages;
   6.229 -    }
   6.230 -    return INVALID_M2P_ENTRY;
   6.231 +    /* check m2p table */
   6.232 +    return get_gpfn_from_mfn(mfn);
   6.233  }
   6.234  
   6.235  /* NB: caller holds d->page_alloc lock, sets d->max_pages = new_max */
   6.236 @@ -580,21 +551,53 @@ int guest_physmap_max_mem_pages(struct d
   6.237  void guest_physmap_add_page(
   6.238      struct domain *d, unsigned long gpfn, unsigned long mfn)
   6.239  {
   6.240 -    printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn);
   6.241 +    if (page_get_owner(mfn_to_page(mfn)) != d) {
   6.242 +        printk("Won't map foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id);
   6.243 +        return;
   6.244 +    }
   6.245 +
   6.246 +    /* Check that pfn is within guest table. */
   6.247 +    if (gpfn >= d->arch.p2m_entries) {
   6.248 +        printk("Won't map invalid PFN 0x%lx for DOM%d\n", gpfn, d->domain_id);
   6.249 +        return;
   6.250 +    }
   6.251 +
   6.252 +    /* Warn if there is an existing mapping. */
   6.253 +    /* XXX: probably shouldn't let this happen, but
   6.254 +       current interface doesn't throw errors.  =( */
   6.255 +    if (d->arch.p2m[gpfn] != INVALID_MFN)
   6.256 +        printk("Ack! PFN aliased. PFN%lx, old MFN=%x, new MFN=%lx\n",
   6.257 +                gpfn, d->arch.p2m[gpfn], mfn);
   6.258 +
   6.259 +    /* PFN and MFN ok, map in p2m table. */
   6.260 +    d->arch.p2m[gpfn] = mfn;
   6.261 +
   6.262 +    /* Map in m2p table. */
   6.263 +    set_gpfn_from_mfn(mfn, gpfn);
   6.264  }
   6.265 +
   6.266  void guest_physmap_remove_page(
   6.267      struct domain *d, unsigned long gpfn, unsigned long mfn)
   6.268  {
   6.269 -    panic("%s\n", __func__);
   6.270 +    if (page_get_owner(mfn_to_page(mfn)) != d) {
   6.271 +        printk("Won't unmap foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id);
   6.272 +        return;
   6.273 +    }
   6.274 +
   6.275 +    /* check that pfn is within guest table */
   6.276 +    if (gpfn >= d->arch.p2m_entries) {
   6.277 +        printk("Won't unmap invalid PFN 0x%lx for DOM%d\n", gpfn, d->domain_id);
   6.278 +        return;
   6.279 +    }
   6.280 +
   6.281 +    /* PFN and MFN ok, unmap from p2m table. */
   6.282 +    d->arch.p2m[gpfn] = INVALID_MFN;
   6.283 +
   6.284 +    /* Unmap from m2p table. */
   6.285 +    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
   6.286  }
   6.287 +
   6.288  void shadow_drop_references(
   6.289      struct domain *d, struct page_info *page)
   6.290  {
   6.291  }
   6.292 -
   6.293 -int arch_domain_add_extent(struct domain *d, struct page_info *page, int order)
   6.294 -{
   6.295 -    if (add_extent(d, page, order) < 0)
   6.296 -        return -ENOMEM;
   6.297 -    return 0;
   6.298 -}
     7.1 --- a/xen/arch/powerpc/ofd_fixup_memory.c	Fri Mar 02 17:07:01 2007 -0600
     7.2 +++ b/xen/arch/powerpc/ofd_fixup_memory.c	Fri Mar 02 17:07:59 2007 -0600
     7.3 @@ -13,14 +13,16 @@
     7.4   * along with this program; if not, write to the Free Software
     7.5   * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
     7.6   *
     7.7 - * Copyright (C) IBM Corp. 2006
     7.8 + * Copyright IBM Corp. 2006, 2007
     7.9   *
    7.10   * Authors: Jimi Xenidis <jimix@watson.ibm.com>
    7.11 + *          Ryan Harper <ryanh@us.ibm.com>
    7.12   */
    7.13  
    7.14  #include <xen/config.h>
    7.15  #include <xen/lib.h>
    7.16  #include <xen/sched.h>
    7.17 +#include <asm/platform.h>
    7.18  #include <public/xen.h>
    7.19  #include "of-devtree.h"
    7.20  #include "oftree.h"
    7.21 @@ -87,19 +89,34 @@ static void ofd_memory_extent_nodes(void
    7.22      ulong start;
    7.23      ulong size;
    7.24      ofdn_t n;
    7.25 -    struct page_extents *pe;
    7.26      ulong cur_pfn = 1UL << d->arch.rma_order;
    7.27  
    7.28 -    start = cur_pfn << PAGE_SHIFT;
    7.29 -    size = 0;
    7.30 -    list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
    7.31 +    /* if dom0 > 2G, shift ram past IO hole */
    7.32 +    if ((d->tot_pages << PAGE_SHIFT) > platform_iohole_base()) {
    7.33 +        /* memory@RMA up to IO hole */
    7.34 +        start = cur_pfn << PAGE_SHIFT;
    7.35 +        size = platform_iohole_base() - (cur_pfn << PAGE_SHIFT);
    7.36 +        n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
    7.37 +                                   start, size);
    7.38  
    7.39 -        size += 1UL << (pe->order + PAGE_SHIFT);
    7.40 -        if (pe->order != cpu_extent_order())
    7.41 -            panic("we don't handle this yet\n");
    7.42 +        BUG_ON(n <= 0);
    7.43 +
    7.44 +        /* XXX Our p2m translation currnetly doesn't allow dom0 memory above
    7.45 +         * the IO hole. */
    7.46 +#if 0
    7.47 +        /* remaining memory shifted up to memory@IOHOLE_END */
    7.48 +        start = platform_iohole_base()+platform_iohole_size();
    7.49 +        size = (d->tot_pages << PAGE_SHIFT) - platform_iohole_base();
    7.50 +        n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
    7.51 +                                   start, size);
    7.52 +#endif
    7.53 +    } else {
    7.54 +        /* we fit beneath the IO hole as one chunk */
    7.55 +        start = cur_pfn << PAGE_SHIFT;
    7.56 +        size = (d->tot_pages - cur_pfn) << PAGE_SHIFT;
    7.57 +        n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
    7.58 +                                   start, size);
    7.59      }
    7.60 -    n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
    7.61 -                               start, size);
    7.62      BUG_ON(n <= 0);
    7.63  }
    7.64  
     8.1 --- a/xen/include/asm-powerpc/domain.h	Fri Mar 02 17:07:01 2007 -0600
     8.2 +++ b/xen/include/asm-powerpc/domain.h	Fri Mar 02 17:07:59 2007 -0600
     8.3 @@ -38,9 +38,6 @@ struct arch_domain {
     8.4      struct page_info *rma_page;
     8.5      uint rma_order;
     8.6  
     8.7 -    /* list of extents beyond RMA */
     8.8 -    struct list_head extent_list;
     8.9 -
    8.10      uint foreign_mfn_count;
    8.11      uint *foreign_mfns;
    8.12  
     9.1 --- a/xen/include/asm-powerpc/mm.h	Fri Mar 02 17:07:01 2007 -0600
     9.2 +++ b/xen/include/asm-powerpc/mm.h	Fri Mar 02 17:07:59 2007 -0600
     9.3 @@ -81,15 +81,6 @@ struct page_info
     9.4  
     9.5  };
     9.6  
     9.7 -struct page_extents {
     9.8 -    /* Each frame can be threaded onto a doubly-linked list. */
     9.9 -    struct list_head pe_list;
    9.10 -
    9.11 -    /* page extent */
    9.12 -    struct page_info *pg;
    9.13 -    uint order;
    9.14 -};
    9.15 -
    9.16   /* The following page types are MUTUALLY EXCLUSIVE. */
    9.17  #define PGT_none            (0UL<<29) /* no special uses of this page */
    9.18  #define PGT_RMA             (1UL<<29) /* This page is an RMA page? */
    9.19 @@ -145,6 +136,7 @@ extern struct page_info *frame_table;
    9.20  extern unsigned long max_page;
    9.21  extern unsigned long total_pages;
    9.22  void init_frametable(void);
    9.23 +void init_machine_to_phys_table(void);
    9.24  void free_rma_check(struct page_info *page);
    9.25  
    9.26  static inline void put_page(struct page_info *page)
    9.27 @@ -226,14 +218,12 @@ typedef struct {
    9.28  } vm_assist_info_t;
    9.29  extern vm_assist_info_t vm_assist_info[];
    9.30  
    9.31 -
    9.32 -/* hope that accesses to this will fail spectacularly */
    9.33 -#undef machine_to_phys_mapping
    9.34 +extern unsigned long *machine_phys_mapping;
    9.35 +#define machine_to_phys_mapping  (machine_phys_mapping)
    9.36  #define INVALID_M2P_ENTRY        (~0UL)
    9.37  
    9.38 -/* do nothing, its all calculated */
    9.39 -#define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
    9.40 -#define get_gpfn_from_mfn(mfn) (mfn)
    9.41 +#define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
    9.42 +#define get_gpfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
    9.43  
    9.44  extern unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn);
    9.45  
    9.46 @@ -243,7 +233,6 @@ extern unsigned long paddr_to_maddr(unsi
    9.47  #define INVALID_MFN (~0U)
    9.48  
    9.49  #define PFN_TYPE_NONE 0
    9.50 -#define PFN_TYPE_RMA 1
    9.51  #define PFN_TYPE_LOGICAL 2
    9.52  #define PFN_TYPE_IO 3
    9.53  #define PFN_TYPE_FOREIGN 4
    9.54 @@ -258,7 +247,6 @@ static inline unsigned long gmfn_to_mfn(
    9.55      mfn = pfn2mfn(d, gmfn, &mtype);
    9.56      if (mfn != INVALID_MFN) {
    9.57          switch (mtype) {
    9.58 -        case PFN_TYPE_RMA:
    9.59          case PFN_TYPE_LOGICAL:
    9.60              break;
    9.61          default:
    9.62 @@ -280,10 +268,6 @@ long arch_memory_op(int op, XEN_GUEST_HA
    9.63  
    9.64  extern int allocate_rma(struct domain *d, unsigned int order_pages);
    9.65  extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
    9.66 -extern void free_extents(struct domain *d);
    9.67 -
    9.68 -extern int arch_domain_add_extent(struct domain *d, struct page_info *page,
    9.69 -        int order);
    9.70  
    9.71  extern int steal_page(struct domain *d, struct page_info *page,
    9.72                          unsigned int memflags);