direct-io.hg

changeset 12942:79bb96e0ba73

[XEN][POWERPC] Create a Domain Foreign Map space
The following patch creates a Domain Foreign Map space that is uses to
map granted memory into the Linear Map of the domain. The Linear Map
of Linux is the is the Kernel Virtual address space where VA = PA +
PAGE_OFFSET.
Also:
- lots of grant_* interfaces work now
- mm.[ch] cleanups
- first pass at extracting Page Table operations from PAPR interfaces
- get_page_type() fix logic bug
- recognize a grant table mapping by placing its gmfn at the end of
real memory.
- grant table usually mapped like an IO page, so force WIMG bits I=0
- mfn_to_gmfn and pfn2mfn get WAY to complex, need get a simpler model in.
- communicate the Domain Foreign Map to domains using /xen/foreign-map
- make sure all bit definitions are UL where possible
- now that we actually assign Xen heap pages to domains they must be
relinquished
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Sun Oct 08 11:34:24 2006 -0400 (2006-10-08)
parents 067bf06057cc
children 96670c42df39
files xen/arch/powerpc/domain.c xen/arch/powerpc/mm.c xen/arch/powerpc/ofd_fixup.c xen/arch/powerpc/papr/xlate.c xen/include/asm-powerpc/grant_table.h xen/include/asm-powerpc/mm.h
line diff
     1.1 --- a/xen/arch/powerpc/domain.c	Sat Oct 07 16:25:46 2006 -0400
     1.2 +++ b/xen/arch/powerpc/domain.c	Sun Oct 08 11:34:24 2006 -0400
     1.3 @@ -94,6 +94,7 @@ int arch_domain_create(struct domain *d)
     1.4  void arch_domain_destroy(struct domain *d)
     1.5  {
     1.6      shadow_teardown(d);
     1.7 +    /* shared_info is part of the RMA so no need to release it */
     1.8  }
     1.9  
    1.10  static void machine_fail(const char *s)
    1.11 @@ -290,6 +291,7 @@ static void relinquish_memory(struct dom
    1.12  
    1.13  void domain_relinquish_resources(struct domain *d)
    1.14  {
    1.15 +    relinquish_memory(d, &d->xenpage_list);
    1.16      relinquish_memory(d, &d->page_list);
    1.17      free_extents(d);
    1.18      return;
     2.1 --- a/xen/arch/powerpc/mm.c	Sat Oct 07 16:25:46 2006 -0400
     2.2 +++ b/xen/arch/powerpc/mm.c	Sun Oct 08 11:34:24 2006 -0400
     2.3 @@ -41,18 +41,107 @@ struct page_info *frame_table;
     2.4  unsigned long max_page;
     2.5  unsigned long total_pages;
     2.6  
     2.7 +void __init init_frametable(void)
     2.8 +{
     2.9 +    unsigned long p;
    2.10 +    unsigned long nr_pages;
    2.11 +    int i;
    2.12 +
    2.13 +    nr_pages = PFN_UP(max_page * sizeof(struct page_info));
    2.14 +
    2.15 +    p = alloc_boot_pages(nr_pages, 1);
    2.16 +    if (p == 0)
    2.17 +        panic("Not enough memory for frame table\n");
    2.18 +
    2.19 +    frame_table = (struct page_info *)(p << PAGE_SHIFT);
    2.20 +    for (i = 0; i < nr_pages; i += 1)
    2.21 +        clear_page((void *)((p + i) << PAGE_SHIFT));
    2.22 +}
    2.23 +
    2.24 +void share_xen_page_with_guest(
    2.25 +    struct page_info *page, struct domain *d, int readonly)
    2.26 +{
    2.27 +    if ( page_get_owner(page) == d )
    2.28 +        return;
    2.29 +
    2.30 +    /* this causes us to leak pages in the Domain and reuslts in
    2.31 +     * Zombie domains, I think we are missing a piece, until we find
    2.32 +     * it we disable the following code */
    2.33 +    set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
    2.34 +
    2.35 +    spin_lock(&d->page_alloc_lock);
    2.36 +
    2.37 +    /* The incremented type count pins as writable or read-only. */
    2.38 +    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
    2.39 +    page->u.inuse.type_info |= PGT_validated | 1;
    2.40 +
    2.41 +    page_set_owner(page, d);
    2.42 +    wmb(); /* install valid domain ptr before updating refcnt. */
    2.43 +    ASSERT(page->count_info == 0);
    2.44 +    page->count_info |= PGC_allocated | 1;
    2.45 +
    2.46 +    if ( unlikely(d->xenheap_pages++ == 0) )
    2.47 +        get_knownalive_domain(d);
    2.48 +    list_add_tail(&page->list, &d->xenpage_list);
    2.49 +
    2.50 +    spin_unlock(&d->page_alloc_lock);
    2.51 +}
    2.52 +
    2.53 +void share_xen_page_with_privileged_guests(
    2.54 +    struct page_info *page, int readonly)
    2.55 +{
    2.56 +        unimplemented();
    2.57 +}
    2.58 +
    2.59 +static int create_grant_va_mapping(
    2.60 +    unsigned long va, unsigned long frame, struct vcpu *v)
    2.61 +{
    2.62 +    if (v->domain->domain_id != 0) {
    2.63 +        printk("only Dom0 can map a grant entry\n");
    2.64 +        BUG();
    2.65 +        return GNTST_permission_denied;
    2.66 +    }
    2.67 +    return GNTST_okay;
    2.68 +}
    2.69 +
    2.70 +static int destroy_grant_va_mapping(
    2.71 +    unsigned long addr, unsigned long frame, struct domain *d)
    2.72 +{
    2.73 +    if (d->domain_id != 0) {
    2.74 +        printk("only Dom0 can map a grant entry\n");
    2.75 +        BUG();
    2.76 +        return GNTST_permission_denied;
    2.77 +    }
    2.78 +    return GNTST_okay;
    2.79 +}
    2.80 +
    2.81  int create_grant_host_mapping(
    2.82      unsigned long addr, unsigned long frame, unsigned int flags)
    2.83  {
    2.84 -    panic("%s called\n", __func__);
    2.85 -    return 1;
    2.86 +    if (flags & GNTMAP_application_map) {
    2.87 +        printk("%s: GNTMAP_application_map not supported\n", __func__);
    2.88 +        BUG();
    2.89 +        return GNTST_general_error;
    2.90 +    }
    2.91 +    if (flags & GNTMAP_contains_pte) {
    2.92 +        printk("%s: GNTMAP_contains_pte not supported\n", __func__);
    2.93 +        BUG();
    2.94 +        return GNTST_general_error;
    2.95 +    }
    2.96 +    return create_grant_va_mapping(addr, frame, current);
    2.97  }
    2.98  
    2.99  int destroy_grant_host_mapping(
   2.100      unsigned long addr, unsigned long frame, unsigned int flags)
   2.101  {
   2.102 -    panic("%s called\n", __func__);
   2.103 -    return 1;
   2.104 +    if (flags & GNTMAP_contains_pte) {
   2.105 +        printk("%s: GNTMAP_contains_pte not supported\n", __func__);
   2.106 +        BUG();
   2.107 +        return GNTST_general_error;
   2.108 +    }
   2.109 +
   2.110 +    /* may have force the remove here */
   2.111 +    return destroy_grant_va_mapping(addr, frame, current->domain);
   2.112  }
   2.113  
   2.114  int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
   2.115 @@ -138,7 +227,7 @@ int get_page_type(struct page_info *page
   2.116          {
   2.117              return 0;
   2.118          }
   2.119 -        if ( unlikely(!(x & PGT_validated)) )
   2.120 +        else if ( unlikely(!(x & PGT_validated)) )
   2.121          {
   2.122              /* Someone else is updating validation of this page. Wait... */
   2.123              while ( (y = page->u.inuse.type_info) == x )
   2.124 @@ -157,23 +246,6 @@ int get_page_type(struct page_info *page
   2.125      return 1;
   2.126  }
   2.127  
   2.128 -void __init init_frametable(void)
   2.129 -{
   2.130 -    unsigned long p;
   2.131 -    unsigned long nr_pages;
   2.132 -    int i;
   2.133 -
   2.134 -    nr_pages = PFN_UP(max_page * sizeof(struct page_info));
   2.135 -
   2.136 -    p = alloc_boot_pages(nr_pages, 1);
   2.137 -    if (p == 0)
   2.138 -        panic("Not enough memory for frame table\n");
   2.139 -
   2.140 -    frame_table = (struct page_info *)(p << PAGE_SHIFT);
   2.141 -    for (i = 0; i < nr_pages; i += 1)
   2.142 -        clear_page((void *)((p + i) << PAGE_SHIFT));
   2.143 -}
   2.144 -
   2.145  long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
   2.146  {
   2.147      printk("%s: no PPC specific memory ops\n", __func__);
   2.148 @@ -311,9 +383,18 @@ ulong pfn2mfn(struct domain *d, ulong pf
   2.149      struct page_extents *pe;
   2.150      ulong mfn = INVALID_MFN;
   2.151      int t = PFN_TYPE_NONE;
   2.152 +    ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
   2.153  
   2.154      /* quick tests first */
   2.155 -    if (d->is_privileged && cpu_io_mfn(pfn)) {
   2.156 +    if (pfn & foreign_map_pfn) {
   2.157 +        t = PFN_TYPE_FOREIGN;
   2.158 +        mfn = pfn & ~(foreign_map_pfn);
   2.159 +    } else if (pfn >= max_page && pfn < (max_page + NR_GRANT_FRAMES)) {
   2.160 +        /* Its a grant table access */
   2.161 +        t = PFN_TYPE_GNTTAB;
   2.162 +        mfn = gnttab_shared_mfn(d, d->grant_table, (pfn - max_page));
   2.163 +    } else if (test_bit(_DOMF_privileged, &d->domain_flags) &&
   2.164 +               cpu_io_mfn(pfn)) {
   2.165          t = PFN_TYPE_IO;
   2.166          mfn = pfn;
   2.167      } else {
   2.168 @@ -365,6 +446,43 @@ ulong pfn2mfn(struct domain *d, ulong pf
   2.169      return mfn;
   2.170  }
   2.171  
   2.172 +unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
   2.173 +{
   2.174 +    struct page_extents *pe;
   2.175 +    ulong cur_pfn;
   2.176 +    ulong gnttab_mfn;
   2.177 +    ulong rma_mfn;
   2.178 +
   2.179 +    /* grant? */
   2.180 +    gnttab_mfn = gnttab_shared_mfn(d, d->grant_table, 0);
   2.181 +    if (mfn >= gnttab_mfn && mfn < (gnttab_mfn + NR_GRANT_FRAMES))
   2.182 +        return max_page + (mfn - gnttab_mfn);
   2.183 +
   2.184 +    /* IO? */
   2.185 +    if (test_bit(_DOMF_privileged, &d->domain_flags) &&
   2.186 +        cpu_io_mfn(mfn))
   2.187 +        return mfn;
   2.188 +
   2.189 +    rma_mfn = page_to_mfn(d->arch.rma_page);
   2.190 +    if (mfn >= rma_mfn &&
   2.191 +        mfn < (rma_mfn + (1 << d->arch.rma_order)))
   2.192 +        return mfn - rma_mfn;
   2.193 +
   2.194 +    /* Extent? */
   2.195 +    cur_pfn = 1UL << d->arch.rma_order;
   2.196 +    list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
   2.197 +        uint pe_pages = 1UL << pe->order;
   2.198 +        uint b_mfn = page_to_mfn(pe->pg);
   2.199 +        uint e_mfn = b_mfn + pe_pages;
   2.200 +
   2.201 +        if (mfn >= b_mfn && mfn < e_mfn) {
   2.202 +            return cur_pfn + (mfn - b_mfn);
   2.203 +        }
   2.204 +        cur_pfn += pe_pages;
   2.205 +    }
   2.206 +    return INVALID_M2P_ENTRY;
   2.207 +}
   2.208 +
   2.209  void guest_physmap_add_page(
   2.210      struct domain *d, unsigned long gpfn, unsigned long mfn)
   2.211  {
     3.1 --- a/xen/arch/powerpc/ofd_fixup.c	Sat Oct 07 16:25:46 2006 -0400
     3.2 +++ b/xen/arch/powerpc/ofd_fixup.c	Sun Oct 08 11:34:24 2006 -0400
     3.3 @@ -352,6 +352,11 @@ static ofdn_t ofd_xen_props(void *m, str
     3.4          if (!rtas_entry)
     3.5              ofd_prop_add(m, n, "power-control", NULL, 0);
     3.6  
     3.7 +        /* tell dom0 where ranted pages go in the linear map */
     3.8 +        val[0] = cpu_foreign_map_order();
     3.9 +        val[1] = max_page;
    3.10 +        ofd_prop_add(m, n, "foreign-map", val, sizeof (val));
    3.11 +
    3.12          n = ofd_node_add(m, n, console, sizeof (console));
    3.13          if (n > 0) {
    3.14              val[0] = 0;
     4.1 --- a/xen/arch/powerpc/papr/xlate.c	Sat Oct 07 16:25:46 2006 -0400
     4.2 +++ b/xen/arch/powerpc/papr/xlate.c	Sun Oct 08 11:34:24 2006 -0400
     4.3 @@ -117,11 +117,8 @@ static void pte_tlbie(union pte volatile
     4.4  
     4.5  }
     4.6  
     4.7 -static void h_enter(struct cpu_user_regs *regs)
     4.8 +long pte_enter(ulong flags, ulong ptex, ulong vsid, ulong rpn)
     4.9  {
    4.10 -    ulong flags = regs->gprs[4];
    4.11 -    ulong ptex = regs->gprs[5];
    4.12 -
    4.13      union pte pte;
    4.14      union pte volatile *ppte;
    4.15      struct domain_htab *htab;
    4.16 @@ -141,13 +138,12 @@ static void h_enter(struct cpu_user_regs
    4.17      htab = &d->arch.htab;
    4.18      if (ptex > (1UL << htab->log_num_ptes)) {
    4.19          DBG("%s: bad ptex: 0x%lx\n", __func__, ptex);
    4.20 -        regs->gprs[3] = H_Parameter;
    4.21 -        return;
    4.22 +        return H_Parameter;
    4.23      }
    4.24  
    4.25      /* use local HPTE to avoid manual shifting & masking */
    4.26 -    pte.words.vsid = regs->gprs[6];
    4.27 -    pte.words.rpn = regs->gprs[7];
    4.28 +    pte.words.vsid = vsid;
    4.29 +    pte.words.rpn = rpn;
    4.30  
    4.31      if ( pte.bits.l ) {        /* large page? */
    4.32          /* figure out the page size for the selected large page */
    4.33 @@ -163,8 +159,7 @@ static void h_enter(struct cpu_user_regs
    4.34          if ( lp_size >= d->arch.large_page_sizes ) {
    4.35              DBG("%s: attempt to use unsupported lp_size %d\n",
    4.36                  __func__, lp_size);
    4.37 -            regs->gprs[3] = H_Parameter;
    4.38 -            return;
    4.39 +            return H_Parameter;
    4.40          }
    4.41  
    4.42          /* get correct pgshift value */
    4.43 @@ -180,19 +175,16 @@ static void h_enter(struct cpu_user_regs
    4.44      mfn = pfn2mfn(d, pfn, &mtype);
    4.45      if (mfn == INVALID_MFN) {
    4.46          DBG("%s: Bad PFN: 0x%lx\n", __func__, pfn);
    4.47 -        regs->gprs[3] =  H_Parameter;
    4.48 -        return;
    4.49 +        return H_Parameter;
    4.50      }
    4.51  
    4.52 -    if (mtype == PFN_TYPE_IO) {
    4.53 +    if (mtype == PFN_TYPE_IO &&!test_bit(_DOMF_privileged, &d->domain_flags)) {
    4.54          /* only a privilaged dom can access outside IO space */
    4.55 -        if ( !d->is_privileged ) {
    4.56 -            DBG("%s: unprivileged access to physical page: 0x%lx\n",
    4.57 -                __func__, pfn);
    4.58 -            regs->gprs[3] =  H_Privilege;
    4.59 -            return;
    4.60 -        }
    4.61 -
    4.62 +        DBG("%s: unprivileged access to physical page: 0x%lx\n",
    4.63 +            __func__, pfn);
    4.64 +        return H_Privilege;
    4.65 +    }
    4.66 +    if (mtype == PFN_TYPE_IO) {
    4.67          if ( !((pte.bits.w == 0)
    4.68               && (pte.bits.i == 1)
    4.69               && (pte.bits.g == 1)) ) {
    4.70 @@ -200,10 +192,14 @@ static void h_enter(struct cpu_user_regs
    4.71                  "w=%x i=%d m=%d, g=%d\n word 0x%lx\n", __func__,
    4.72                  pte.bits.w, pte.bits.i, pte.bits.m, pte.bits.g,
    4.73                  pte.words.rpn);
    4.74 -            regs->gprs[3] =  H_Parameter;
    4.75 -            return;
    4.76 +            return H_Parameter;
    4.77          }
    4.78      }
    4.79 +    if (mtype == PFN_TYPE_GNTTAB) {
    4.80 +        DBG("%s: Dom[%d] mapping grant table: 0x%lx\n",
    4.81 +            __func__, d->domain_id, pfn << PAGE_SHIFT);
    4.82 +        pte.bits.i = 0;
    4.83 +    }
    4.84      /* fixup the RPN field of our local PTE copy */
    4.85      pte.bits.rpn = mfn | lp_bits;
    4.86  
    4.87 @@ -224,14 +220,12 @@ static void h_enter(struct cpu_user_regs
    4.88  
    4.89          if (unlikely(!get_domain(f))) {
    4.90              DBG("%s: Rescinded, no domain: 0x%lx\n",  __func__, pfn);
    4.91 -            regs->gprs[3] = H_Rescinded;
    4.92 -            return;
    4.93 +            return H_Rescinded;
    4.94          }
    4.95          if (unlikely(!get_page(pg, f))) {
    4.96              put_domain(f);
    4.97              DBG("%s: Rescinded, no page: 0x%lx\n",  __func__, pfn);
    4.98 -            regs->gprs[3] = H_Rescinded;
    4.99 -            return;
   4.100 +            return H_Rescinded;
   4.101          }
   4.102      }
   4.103  
   4.104 @@ -288,10 +282,7 @@ static void h_enter(struct cpu_user_regs
   4.105                  : "b" (ppte), "r" (pte.words.rpn), "r" (pte.words.vsid)
   4.106                  : "memory");
   4.107  
   4.108 -            regs->gprs[3] = H_Success;
   4.109 -            regs->gprs[4] = idx;
   4.110 -
   4.111 -            return;
   4.112 +            return idx;
   4.113          }
   4.114      }
   4.115  
   4.116 @@ -304,7 +295,24 @@ static void h_enter(struct cpu_user_regs
   4.117      if (f != NULL)
   4.118          put_domain(f);
   4.119  
   4.120 -    regs->gprs[3] = H_PTEG_Full;
   4.121 +    return H_PTEG_Full;
   4.122 +}
   4.123 +
   4.124 +static void h_enter(struct cpu_user_regs *regs)
   4.125 +{
   4.126 +    ulong flags = regs->gprs[4];
   4.127 +    ulong ptex = regs->gprs[5];
   4.128 +    ulong vsid = regs->gprs[6];
   4.129 +    ulong rpn = regs->gprs[7];
   4.130 +    long ret;
   4.131 +
   4.132 +    ret = pte_enter(flags, ptex, vsid, rpn);
   4.133 +
   4.134 +    if (ret >= 0) {
   4.135 +        regs->gprs[3] = H_Success;
   4.136 +        regs->gprs[4] = ret;
   4.137 +    } else
   4.138 +        regs->gprs[3] = ret;
   4.139  }
   4.140  
   4.141  static void h_protect(struct cpu_user_regs *regs)
   4.142 @@ -332,7 +340,7 @@ static void h_protect(struct cpu_user_re
   4.143  
   4.144      /* the AVPN param occupies the bit-space of the word */
   4.145      if ( (flags & H_AVPN) && lpte.bits.avpn != avpn >> 7 ) {
   4.146 -        DBG("%s: %p: AVPN check failed: 0x%lx, 0x%lx\n", __func__,
   4.147 +        DBG_LOW("%s: %p: AVPN check failed: 0x%lx, 0x%lx\n", __func__,
   4.148              ppte, lpte.words.vsid, lpte.words.rpn);
   4.149          regs->gprs[3] = H_Not_Found;
   4.150          return;
   4.151 @@ -469,11 +477,8 @@ static void h_clear_mod(struct cpu_user_
   4.152      }
   4.153  }
   4.154  
   4.155 -static void h_remove(struct cpu_user_regs *regs)
   4.156 +long pte_remove(ulong flags, ulong ptex, ulong avpn, ulong *hi, ulong *lo)
   4.157  {
   4.158 -    ulong flags = regs->gprs[4];
   4.159 -    ulong ptex = regs->gprs[5];
   4.160 -    ulong avpn = regs->gprs[6];
   4.161      struct vcpu *v = get_current();
   4.162      struct domain *d = v->domain;
   4.163      struct domain_htab *htab = &d->arch.htab;
   4.164 @@ -485,29 +490,25 @@ static void h_remove(struct cpu_user_reg
   4.165  
   4.166      if ( ptex > (1UL << htab->log_num_ptes) ) {
   4.167          DBG("%s: bad ptex: 0x%lx\n", __func__, ptex);
   4.168 -        regs->gprs[3] = H_Parameter;
   4.169 -        return;
   4.170 +        return H_Parameter;
   4.171      }
   4.172      pte = &htab->map[ptex];
   4.173      lpte.words.vsid = pte->words.vsid;
   4.174      lpte.words.rpn = pte->words.rpn;
   4.175  
   4.176      if ((flags & H_AVPN) && lpte.bits.avpn != (avpn >> 7)) {
   4.177 -        DBG("%s: avpn doesn not match\n", __func__);
   4.178 -        regs->gprs[3] = H_Not_Found;
   4.179 -        return;
   4.180 +        DBG_LOW("%s: AVPN does not match\n", __func__);
   4.181 +        return H_Not_Found;
   4.182      }
   4.183  
   4.184      if ((flags & H_ANDCOND) && ((avpn & pte->words.vsid) != 0)) {
   4.185          DBG("%s: andcond does not match\n", __func__);
   4.186 -        regs->gprs[3] = H_Not_Found;
   4.187 -        return;
   4.188 +        return H_Not_Found;
   4.189      }
   4.190  
   4.191 -    regs->gprs[3] = H_Success;
   4.192      /* return old PTE in regs 4 and 5 */
   4.193 -    regs->gprs[4] = lpte.words.vsid;
   4.194 -    regs->gprs[5] = lpte.words.rpn;
   4.195 +    *hi = lpte.words.vsid;
   4.196 +    *lo = lpte.words.rpn;
   4.197  
   4.198  #ifdef DEBUG_LOW
   4.199      /* XXX - I'm very skeptical of doing ANYTHING if not bits.v */
   4.200 @@ -522,7 +523,7 @@ static void h_remove(struct cpu_user_reg
   4.201          if (!cpu_io_mfn(mfn)) {
   4.202              struct page_info *pg = mfn_to_page(mfn);
   4.203              struct domain *f = page_get_owner(pg);
   4.204 -
   4.205 +            
   4.206              if (f != d) {
   4.207                  put_domain(f);
   4.208                  put_page(pg);
   4.209 @@ -536,6 +537,27 @@ static void h_remove(struct cpu_user_reg
   4.210              : "memory");
   4.211  
   4.212      pte_tlbie(&lpte, ptex);
   4.213 +
   4.214 +    return H_Success;
   4.215 +}
   4.216 +
   4.217 +static void h_remove(struct cpu_user_regs *regs)
   4.218 +{
   4.219 +    ulong flags = regs->gprs[4];
   4.220 +    ulong ptex = regs->gprs[5];
   4.221 +    ulong avpn = regs->gprs[6];
   4.222 +    ulong hi, lo;
   4.223 +    long ret;
   4.224 +
   4.225 +    ret = pte_remove(flags, ptex, avpn, &hi, &lo);
   4.226 +
   4.227 +    regs->gprs[3] = ret;
   4.228 +
   4.229 +    if (ret == H_Success) {
   4.230 +        regs->gprs[4] = hi;
   4.231 +        regs->gprs[5] = lo;
   4.232 +    }
   4.233 +    return;
   4.234  }
   4.235  
   4.236  static void h_read(struct cpu_user_regs *regs)
     5.1 --- a/xen/include/asm-powerpc/grant_table.h	Sat Oct 07 16:25:46 2006 -0400
     5.2 +++ b/xen/include/asm-powerpc/grant_table.h	Sun Oct 08 11:34:24 2006 -0400
     5.3 @@ -29,6 +29,10 @@
     5.4   * Caller must own caller's BIGLOCK, is responsible for flushing the TLB, and
     5.5   * must hold a reference to the page.
     5.6   */
     5.7 +extern long pte_enter(ulong flags, ulong ptex, ulong vsid, ulong rpn);
     5.8 +extern long pte_remove(ulong flags, ulong ptex, ulong avpn,
     5.9 +                       ulong *hi, ulong *lo);
    5.10 +
    5.11  int create_grant_host_mapping(
    5.12      unsigned long addr, unsigned long frame, unsigned int flags);
    5.13  int destroy_grant_host_mapping(
    5.14 @@ -41,8 +45,7 @@ int destroy_grant_host_mapping(
    5.15              (d), XENSHARE_writable);                                     \
    5.16      } while ( 0 )
    5.17  
    5.18 -#define gnttab_shared_mfn(d, t, i)                      \
    5.19 -    ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i))
    5.20 +#define gnttab_shared_mfn(d, t, i) (((ulong)((t)->shared) >> PAGE_SHIFT) + (i))
    5.21  
    5.22  #define gnttab_shared_gmfn(d, t, i)                     \
    5.23      (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
    5.24 @@ -61,4 +64,13 @@ static inline void gnttab_clear_flag(uns
    5.25      clear_bit(lnr, laddr);
    5.26  }
    5.27  
    5.28 +static inline uint cpu_foreign_map_order(void)
    5.29 +{
    5.30 +    /* 16 GiB */
    5.31 +    return 34 - PAGE_SHIFT;
    5.32 +}
    5.33 +
    5.34 +#define GNTTAB_DEV_BUS(f) \
    5.35 +    ((f) | (1UL << (cpu_foreign_map_order() + PAGE_SHIFT)))
    5.36 +
    5.37  #endif  /* __ASM_PPC_GRANT_TABLE_H__ */
     6.1 --- a/xen/include/asm-powerpc/mm.h	Sat Oct 07 16:25:46 2006 -0400
     6.2 +++ b/xen/include/asm-powerpc/mm.h	Sun Oct 08 11:34:24 2006 -0400
     6.3 @@ -13,9 +13,10 @@
     6.4   * along with this program; if not, write to the Free Software
     6.5   * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
     6.6   *
     6.7 - * Copyright (C) IBM Corp. 2005
     6.8 + * Copyright (C) IBM Corp. 2005, 2006
     6.9   *
    6.10   * Authors: Hollis Blanchard <hollisb@us.ibm.com>
    6.11 + *          Jimi Xenidis <jimix@watson.ibm.com>
    6.12   */
    6.13  
    6.14  #ifndef _ASM_MM_H_
    6.15 @@ -90,35 +91,35 @@ struct page_extents {
    6.16  };
    6.17  
    6.18   /* The following page types are MUTUALLY EXCLUSIVE. */
    6.19 -#define PGT_none            (0<<29) /* no special uses of this page */
    6.20 -#define PGT_RMA             (1<<29) /* This page is an RMA page? */
    6.21 -#define PGT_writable_page   (7<<29) /* has writable mappings of this page? */
    6.22 -#define PGT_type_mask       (7<<29) /* Bits 29-31. */
    6.23 +#define PGT_none            (0UL<<29) /* no special uses of this page */
    6.24 +#define PGT_RMA             (1UL<<29) /* This page is an RMA page? */
    6.25 +#define PGT_writable_page   (7UL<<29) /* has writable mappings of this page? */
    6.26 +#define PGT_type_mask       (7UL<<29) /* Bits 29-31. */
    6.27  
    6.28   /* Owning guest has pinned this page to its current type? */
    6.29  #define _PGT_pinned         28
    6.30 -#define PGT_pinned          (1U<<_PGT_pinned)
    6.31 +#define PGT_pinned          (1UL<<_PGT_pinned)
    6.32   /* Has this page been validated for use as its current type? */
    6.33  #define _PGT_validated      27
    6.34 -#define PGT_validated       (1U<<_PGT_validated)
    6.35 +#define PGT_validated       (1UL<<_PGT_validated)
    6.36  
    6.37   /* 16-bit count of uses of this frame as its current type. */
    6.38 -#define PGT_count_mask      ((1U<<16)-1)
    6.39 +#define PGT_count_mask      ((1UL<<16)-1)
    6.40  
    6.41   /* Cleared when the owning guest 'frees' this page. */
    6.42  #define _PGC_allocated      31
    6.43 -#define PGC_allocated       (1U<<_PGC_allocated)
    6.44 +#define PGC_allocated       (1UL<<_PGC_allocated)
    6.45   /* Set on a *guest* page to mark it out-of-sync with its shadow */
    6.46  #define _PGC_out_of_sync     30
    6.47 -#define PGC_out_of_sync     (1U<<_PGC_out_of_sync)
    6.48 +#define PGC_out_of_sync     (1UL<<_PGC_out_of_sync)
    6.49   /* Set when is using a page as a page table */
    6.50  #define _PGC_page_table      29
    6.51 -#define PGC_page_table      (1U<<_PGC_page_table)
    6.52 +#define PGC_page_table      (1UL<<_PGC_page_table)
    6.53  /* Set when using page for RMA */
    6.54  #define _PGC_page_RMA      28
    6.55 -#define PGC_page_RMA      (1U<<_PGC_page_RMA)
    6.56 +#define PGC_page_RMA      (1UL<<_PGC_page_RMA)
    6.57   /* 29-bit count of references to this frame. */
    6.58 -#define PGC_count_mask      ((1U<<28)-1)
    6.59 +#define PGC_count_mask      ((1UL<<28)-1)
    6.60  
    6.61  #define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
    6.62  
    6.63 @@ -133,6 +134,13 @@ static inline u32 pickle_domptr(struct d
    6.64  #define page_get_owner(_p)    (unpickle_domptr((_p)->u.inuse._domain))
    6.65  #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
    6.66  
    6.67 +#define XENSHARE_writable 0
    6.68 +#define XENSHARE_readonly 1
    6.69 +extern void share_xen_page_with_guest(
    6.70 +    struct page_info *page, struct domain *d, int readonly);
    6.71 +extern void share_xen_page_with_privileged_guests(
    6.72 +    struct page_info *page, int readonly);
    6.73 +
    6.74  extern struct page_info *frame_table;
    6.75  extern unsigned long max_page;
    6.76  extern unsigned long total_pages;
    6.77 @@ -218,11 +226,47 @@ typedef struct {
    6.78  } vm_assist_info_t;
    6.79  extern vm_assist_info_t vm_assist_info[];
    6.80  
    6.81 -#define share_xen_page_with_guest(p, d, r) do { } while (0)
    6.82 -#define share_xen_page_with_privileged_guests(p, r) do { } while (0)
    6.83  
    6.84  /* hope that accesses to this will fail spectacularly */
    6.85 -#define machine_to_phys_mapping ((u32 *)-1UL)
    6.86 +#undef machine_to_phys_mapping
    6.87 +#define INVALID_M2P_ENTRY        (~0UL)
    6.88 +
    6.89 +/* do nothing, its all calculated */
    6.90 +#define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
    6.91 +#define get_gpfn_from_mfn(mfn) (mfn)
    6.92 +
    6.93 +extern unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn);
    6.94 +
    6.95 +extern unsigned long paddr_to_maddr(unsigned long paddr);
    6.96 +
    6.97 +#define INVALID_MFN (~0UL)
    6.98 +#define PFN_TYPE_NONE 0
    6.99 +#define PFN_TYPE_RMA 1
   6.100 +#define PFN_TYPE_LOGICAL 2
   6.101 +#define PFN_TYPE_IO 3
   6.102 +#define PFN_TYPE_FOREIGN 4
   6.103 +#define PFN_TYPE_GNTTAB 5
   6.104 +
   6.105 +extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
   6.106 +static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
   6.107 +{
   6.108 +    int mtype;
   6.109 +    ulong mfn;
   6.110 +    
   6.111 +    mfn = pfn2mfn(d, gmfn, &mtype);
   6.112 +    if (mfn != INVALID_MFN) {
   6.113 +        switch (mtype) {
   6.114 +        case PFN_TYPE_RMA:
   6.115 +        case PFN_TYPE_LOGICAL:
   6.116 +            break;
   6.117 +        default:
   6.118 +            WARN();
   6.119 +            mfn = INVALID_MFN;
   6.120 +            break;
   6.121 +        }
   6.122 +    }
   6.123 +    return mfn;
   6.124 +}
   6.125  
   6.126  extern int update_grant_va_mapping(unsigned long va,
   6.127                                     unsigned long val,