ia64/xen-unstable

changeset 10000:4fc1110f09c9

[IA64] xen: balloon driver support

preliminary changes for balloon driver support of xen/ia64.
This breaks domain destruction. This will be fixed later.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri May 12 08:47:07 2006 -0600 (2006-05-12)
parents 7bba3c5af9a8
children 8c0d89f8d0f7
files xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/xen/domain.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/grant_table.h xen/include/asm-ia64/mm.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Fri May 12 08:27:51 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Fri May 12 08:47:07 2006 -0600
     1.3 @@ -346,7 +346,7 @@ int vmx_build_physmap_table(struct domai
     1.4  	    for (j = io_ranges[i].start;
     1.5  		 j < io_ranges[i].start + io_ranges[i].size;
     1.6  		 j += PAGE_SIZE)
     1.7 -		assign_domain_page(d, j, io_ranges[i].type);
     1.8 +		__assign_domain_page(d, j, io_ranges[i].type);
     1.9  	}
    1.10  
    1.11  	/* Map normal memory below 3G */
     2.1 --- a/xen/arch/ia64/xen/domain.c	Fri May 12 08:27:51 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Fri May 12 08:47:07 2006 -0600
     2.3 @@ -395,6 +395,8 @@ static void relinquish_memory(struct dom
     2.4          /* Follow the list chain and /then/ potentially free the page. */
     2.5          ent = ent->next;
     2.6  #ifdef CONFIG_XEN_IA64_DOM0_VP
     2.7 +        //XXX this should be done at traversing the P2M table.
     2.8 +        //BUG_ON(get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
     2.9          if (page_get_owner(page) == d)
    2.10              set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
    2.11  #endif
    2.12 @@ -408,9 +410,9 @@ void domain_relinquish_resources(struct 
    2.13  {
    2.14      /* Relinquish every page of memory. */
    2.15  
    2.16 -    /* xenheap_list is not used in ia64. */
    2.17 -    BUG_ON(!list_empty(&d->xenpage_list));
    2.18 +    //XXX relase page traversing d->arch.mm.
    2.19  
    2.20 +    relinquish_memory(d, &d->xenpage_list);
    2.21      relinquish_memory(d, &d->page_list);
    2.22  }
    2.23  
    2.24 @@ -487,6 +489,53 @@ void new_thread(struct vcpu *v,
    2.25  	}
    2.26  }
    2.27  
    2.28 +// stolen from share_xen_page_with_guest() in xen/arch/x86/mm.c
    2.29 +void
    2.30 +share_xen_page_with_guest(struct page_info *page,
    2.31 +                          struct domain *d, int readonly)
    2.32 +{
    2.33 +    if ( page_get_owner(page) == d )
    2.34 +        return;
    2.35 +
    2.36 +#if 1
    2.37 +    if (readonly) {
    2.38 +        printk("%s:%d readonly is not supported yet\n", __func__, __LINE__);
    2.39 +    }
    2.40 +#endif
    2.41 +
    2.42 +    // alloc_xenheap_pages() doesn't initialize page owner.
    2.43 +    //BUG_ON(page_get_owner(page) != NULL);
    2.44 +#if 0
    2.45 +    if (get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY) {
    2.46 +        printk("%s:%d page 0x%p mfn 0x%lx gpfn 0x%lx\n", __func__, __LINE__,
    2.47 +               page, page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)));
    2.48 +    }
    2.49 +#endif
    2.50 +    // grant_table_destroy() release these pages.
    2.51 +    // but it doesn't clear m2p entry. So there might remain stale entry.
    2.52 +    // We clear such a stale entry here.
    2.53 +    set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
    2.54 +
    2.55 +    spin_lock(&d->page_alloc_lock);
    2.56 +
    2.57 +#ifndef __ia64__
    2.58 +    /* The incremented type count pins as writable or read-only. */
    2.59 +    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
    2.60 +    page->u.inuse.type_info |= PGT_validated | 1;
    2.61 +#endif
    2.62 +
    2.63 +    page_set_owner(page, d);
    2.64 +    wmb(); /* install valid domain ptr before updating refcnt. */
    2.65 +    ASSERT(page->count_info == 0);
    2.66 +    page->count_info |= PGC_allocated | 1;
    2.67 +
    2.68 +    if ( unlikely(d->xenheap_pages++ == 0) )
    2.69 +        get_knownalive_domain(d);
    2.70 +    list_add_tail(&page->list, &d->xenpage_list);
    2.71 +
    2.72 +    spin_unlock(&d->page_alloc_lock);
    2.73 +}
    2.74 +
    2.75  //XXX !xxx_present() should be used instread of !xxx_none()?
    2.76  static pte_t*
    2.77  lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
    2.78 @@ -586,6 +635,7 @@ struct page_info *
    2.79  {
    2.80      struct page_info *p = NULL;
    2.81      unsigned long maddr;
    2.82 +    int ret;
    2.83  
    2.84      BUG_ON(!pte_none(*pte));
    2.85  
    2.86 @@ -606,14 +656,13 @@ struct page_info *
    2.87  #endif
    2.88  
    2.89      p = alloc_domheap_page(d);
    2.90 -    // zero out pages for security reasons
    2.91 -    if (p)
    2.92 -        clear_page(page_to_virt(p));
    2.93 -
    2.94      if (unlikely(!p)) {
    2.95          printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    2.96          return(p);
    2.97      }
    2.98 +
    2.99 +    // zero out pages for security reasons
   2.100 +    clear_page(page_to_virt(p));
   2.101      maddr = page_to_maddr (p);
   2.102      if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
   2.103                   && maddr < __get_cpu_var(vhpt_pend))) {
   2.104 @@ -623,13 +672,14 @@ struct page_info *
   2.105                 maddr);
   2.106      }
   2.107  
   2.108 +    ret = get_page(p, d);
   2.109 +    BUG_ON(ret == 0);
   2.110      set_pte(pte, pfn_pte(maddr >> PAGE_SHIFT,
   2.111                           __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
   2.112  
   2.113      //XXX CONFIG_XEN_IA64_DOM0_VP
   2.114      //    TODO racy
   2.115 -    if ((mpaddr & GPFN_IO_MASK) == GPFN_MEM)
   2.116 -        set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
   2.117 +    set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
   2.118      return p;
   2.119  }
   2.120  
   2.121 @@ -673,21 +723,37 @@ assign_new_domain0_page(struct domain *d
   2.122  }
   2.123  
   2.124  /* map a physical address to the specified metaphysical addr */
   2.125 -void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
   2.126 +void
   2.127 +__assign_domain_page(struct domain *d,
   2.128 +                     unsigned long mpaddr, unsigned long physaddr)
   2.129  {
   2.130 -	pte_t *pte;
   2.131 +    pte_t *pte;
   2.132  
   2.133 -	pte = lookup_alloc_domain_pte(d, mpaddr);
   2.134 -	if (pte_none(*pte)) {
   2.135 -		set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
   2.136 -			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
   2.137 +    pte = lookup_alloc_domain_pte(d, mpaddr);
   2.138 +    if (pte_none(*pte)) {
   2.139 +        set_pte(pte,
   2.140 +                pfn_pte(physaddr >> PAGE_SHIFT,
   2.141 +                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
   2.142 +    } else
   2.143 +        printk("%s: mpaddr %lx already mapped!\n", __func__, mpaddr);
   2.144 +}
   2.145  
   2.146 -	//XXX CONFIG_XEN_IA64_DOM0_VP
   2.147 -	//    TODO racy
   2.148 -	if ((physaddr & GPFN_IO_MASK) == GPFN_MEM)
   2.149 -		set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
   2.150 -	}
   2.151 -	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
   2.152 +/* get_page() and map a physical address to the specified metaphysical addr */
   2.153 +void
   2.154 +assign_domain_page(struct domain *d,
   2.155 +                   unsigned long mpaddr, unsigned long physaddr)
   2.156 +{
   2.157 +    struct page_info* page = mfn_to_page(physaddr >> PAGE_SHIFT);
   2.158 +    int ret;
   2.159 +
   2.160 +    ret = get_page(page, d);
   2.161 +    BUG_ON(ret == 0);
   2.162 +    __assign_domain_page(d, mpaddr, physaddr);
   2.163 +
   2.164 +    //XXX CONFIG_XEN_IA64_DOM0_VP
   2.165 +    //    TODO racy
   2.166 +    if ((physaddr & GPFN_IO_MASK) == GPFN_MEM)
   2.167 +        set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
   2.168  }
   2.169  
   2.170  #ifdef CONFIG_XEN_IA64_DOM0_VP
   2.171 @@ -698,7 +764,7 @@ assign_domain_same_page(struct domain *d
   2.172      //XXX optimization
   2.173      unsigned long end = mpaddr + size;
   2.174      for (; mpaddr < end; mpaddr += PAGE_SIZE) {
   2.175 -        assign_domain_page(d, mpaddr, mpaddr);
   2.176 +        __assign_domain_page(d, mpaddr, mpaddr);
   2.177      }
   2.178  }
   2.179  
   2.180 @@ -740,9 +806,51 @@ domain_page_flush(struct domain* d, unsi
   2.181      // flush tlb
   2.182      flush_tlb_all();
   2.183  }
   2.184 +#endif
   2.185  
   2.186 +//XXX heavily depends on the struct page_info layout.
   2.187 +//
   2.188 +// if (page_get_owner(page) == d &&
   2.189 +//     test_and_clear_bit(_PGC_allocated, &page->count_info)) {
   2.190 +//     put_page(page);
   2.191 +// }
   2.192  static void
   2.193 -zap_domain_page_one(struct domain *d, unsigned long mpaddr)
   2.194 +try_to_clear_PGC_allocate(struct domain* d, struct page_info* page)
   2.195 +{
   2.196 +    u32 _d, _nd;
   2.197 +    u64 x, nx, y;
   2.198 +
   2.199 +    _d = pickle_domptr(d);
   2.200 +    y = *((u64*)&page->count_info);
   2.201 +    do {
   2.202 +        x = y;
   2.203 +        _nd = x >> 32;
   2.204 +        nx = x - 1;
   2.205 +        __clear_bit(_PGC_allocated, &nx);
   2.206 +
   2.207 +        if (unlikely(!(x & PGC_allocated)) || unlikely(_nd != _d)) {
   2.208 +            struct domain* nd = unpickle_domptr(_nd);
   2.209 +            if (nd == NULL) {
   2.210 +                DPRINTK("gnttab_transfer: Bad page %p: ed=%p(%u) 0x%x, "
   2.211 +                        "sd=%p 0x%x,"
   2.212 +                        " caf=%016lx, taf=%" PRtype_info "\n",
   2.213 +                        (void *) page_to_mfn(page),
   2.214 +                        d, d->domain_id, _d,
   2.215 +                        nd, _nd,
   2.216 +                        x,
   2.217 +                        page->u.inuse.type_info);
   2.218 +            }
   2.219 +            break;
   2.220 +        }
   2.221 +
   2.222 +        BUG_ON((nx & PGC_count_mask) < 1);
   2.223 +        y = cmpxchg((u64*)&page->count_info, x, nx);
   2.224 +    } while (unlikely(y != x));
   2.225 +}
   2.226 +
   2.227 +#ifdef CONFIG_XEN_IA64_DOM0_VP
   2.228 +static void
   2.229 +zap_domain_page_one(struct domain *d, unsigned long mpaddr, int do_put_page)
   2.230  {
   2.231      struct mm_struct *mm = d->arch.mm;
   2.232      pte_t *pte;
   2.233 @@ -760,6 +868,7 @@ zap_domain_page_one(struct domain *d, un
   2.234      old_pte = ptep_get_and_clear(mm, mpaddr, pte);
   2.235      mfn = pte_pfn(old_pte);
   2.236      page = mfn_to_page(mfn);
   2.237 +    BUG_ON((page->count_info & PGC_count_mask) == 0);
   2.238  
   2.239      if (page_get_owner(page) == d) {
   2.240          BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
   2.241 @@ -768,7 +877,10 @@ zap_domain_page_one(struct domain *d, un
   2.242  
   2.243      domain_page_flush(d, mpaddr, mfn, INVALID_MFN);
   2.244  
   2.245 -    put_page(page);
   2.246 +    if (do_put_page) {
   2.247 +        try_to_clear_PGC_allocate(d, page);
   2.248 +        put_page(page);
   2.249 +    }
   2.250  }
   2.251  #endif
   2.252  
   2.253 @@ -942,12 +1054,14 @@ dom0vp_zap_physmap(struct domain *d, uns
   2.254          goto out;
   2.255      }
   2.256  
   2.257 -    zap_domain_page_one(d, gpfn << PAGE_SHIFT);
   2.258 +    zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1);
   2.259  
   2.260  out:
   2.261      return ret;
   2.262  }
   2.263  
   2.264 +// caller must get_page(mfn_to_page(mfn)) before
   2.265 +// caller must call set_gpfn_from_mfn().
   2.266  static void
   2.267  assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
   2.268                             unsigned long mfn, unsigned int flags)
   2.269 @@ -978,8 +1092,10 @@ assign_domain_page_replace(struct domain
   2.270  
   2.271          domain_page_flush(d, mpaddr, old_mfn, mfn);
   2.272  
   2.273 +        try_to_clear_PGC_allocate(d, old_page);
   2.274          put_page(old_page);
   2.275      } else {
   2.276 +        BUG_ON(!mfn_valid(mfn));
   2.277          BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
   2.278                 get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
   2.279      }
   2.280 @@ -1007,6 +1123,7 @@ dom0vp_add_physmap(struct domain* d, uns
   2.281      }
   2.282  
   2.283      assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* flags:XXX */);
   2.284 +    //don't update p2m table because this page belongs to rd, not d.
   2.285  out1:
   2.286      put_domain(rd);
   2.287  out0:
   2.288 @@ -1022,6 +1139,8 @@ create_grant_host_mapping(unsigned long 
   2.289  			  unsigned long mfn, unsigned int flags)
   2.290  {
   2.291      struct domain* d = current->domain;
   2.292 +    struct page_info* page;
   2.293 +    int ret;
   2.294  
   2.295      if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
   2.296          DPRINTK("%s: flags 0x%x\n", __func__, flags);
   2.297 @@ -1035,6 +1154,9 @@ create_grant_host_mapping(unsigned long 
   2.298          flags &= ~GNTMAP_readonly;
   2.299      }
   2.300  
   2.301 +    page = mfn_to_page(mfn);
   2.302 +    ret = get_page(page, page_get_owner(page));
   2.303 +    BUG_ON(ret == 0);
   2.304      assign_domain_page_replace(d, gpaddr, mfn, flags);
   2.305  
   2.306      return GNTST_okay;
   2.307 @@ -1049,6 +1171,7 @@ destroy_grant_host_mapping(unsigned long
   2.308      pte_t* pte;
   2.309      pte_t old_pte;
   2.310      unsigned long old_mfn = INVALID_MFN;
   2.311 +    struct page_info* old_page;
   2.312  
   2.313      if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
   2.314          DPRINTK("%s: flags 0x%x\n", __func__, flags);
   2.315 @@ -1062,9 +1185,6 @@ destroy_grant_host_mapping(unsigned long
   2.316          flags &= ~GNTMAP_readonly;
   2.317      }
   2.318  
   2.319 -    // get_page(mfn_to_page(mfn)) is not needed.
   2.320 -    // the caller, __gnttab_map_grant_ref() does it.
   2.321 -
   2.322      pte = lookup_noalloc_domain_pte(d, gpaddr);
   2.323      if (pte == NULL || !pte_present(*pte) || pte_pfn(*pte) != mfn)
   2.324          return GNTST_general_error;//XXX GNTST_bad_pseudo_phys_addr
   2.325 @@ -1076,6 +1196,10 @@ destroy_grant_host_mapping(unsigned long
   2.326      }
   2.327      domain_page_flush(d, gpaddr, old_mfn, INVALID_MFN);
   2.328  
   2.329 +    old_page = mfn_to_page(old_mfn);
   2.330 +    BUG_ON(page_get_owner(old_page) == d);//try_to_clear_PGC_allocate(d, page) is not needed.
   2.331 +    put_page(old_page);
   2.332 +
   2.333      return GNTST_okay;
   2.334  }
   2.335  
   2.336 @@ -1093,13 +1217,8 @@ steal_page_for_grant_transfer(struct dom
   2.337      unsigned long mpaddr = get_gpfn_from_mfn(page_to_mfn(page)) << PAGE_SHIFT;
   2.338      struct page_info *new;
   2.339  
   2.340 -    // zap_domain_page_one() does put_page(page)
   2.341 -    if (get_page(page, d) == 0) {
   2.342 -        DPRINTK("%s:%d page %p mfn %ld d 0x%p id %d\n",
   2.343 -                __func__, __LINE__, page, page_to_mfn(page), d, d->domain_id);
   2.344 -        return -1;
   2.345 -    }
   2.346 -    zap_domain_page_one(d, mpaddr);
   2.347 +    zap_domain_page_one(d, mpaddr, 0);
   2.348 +    put_page(page);
   2.349  
   2.350      spin_lock(&d->page_alloc_lock);
   2.351  
   2.352 @@ -1170,8 +1289,14 @@ void
   2.353  guest_physmap_add_page(struct domain *d, unsigned long gpfn,
   2.354                         unsigned long mfn)
   2.355  {
   2.356 +    int ret;
   2.357 +
   2.358 +    ret = get_page(mfn_to_page(mfn), d);
   2.359 +    BUG_ON(ret == 0);
   2.360      assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* XXX */);
   2.361 -    set_gpfn_from_mfn(mfn, gpfn);
   2.362 +    set_gpfn_from_mfn(mfn, gpfn);//XXX SMP
   2.363 +
   2.364 +    //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> PAGE_SHIFT));
   2.365  }
   2.366  
   2.367  void
   2.368 @@ -1179,7 +1304,7 @@ guest_physmap_remove_page(struct domain 
   2.369                            unsigned long mfn)
   2.370  {
   2.371      BUG_ON(mfn == 0);//XXX
   2.372 -    zap_domain_page_one(d, gpfn << PAGE_SHIFT);
   2.373 +    zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1);
   2.374  }
   2.375  #endif
   2.376  
     3.1 --- a/xen/include/asm-ia64/domain.h	Fri May 12 08:27:51 2006 -0600
     3.2 +++ b/xen/include/asm-ia64/domain.h	Fri May 12 08:47:07 2006 -0600
     3.3 @@ -119,6 +119,7 @@ extern struct mm_struct init_mm;
     3.4  
     3.5  struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
     3.6  void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
     3.7 +void __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
     3.8  void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
     3.9  void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
    3.10  #ifdef CONFIG_XEN_IA64_DOM0_VP
     4.1 --- a/xen/include/asm-ia64/grant_table.h	Fri May 12 08:27:51 2006 -0600
     4.2 +++ b/xen/include/asm-ia64/grant_table.h	Fri May 12 08:47:07 2006 -0600
     4.3 @@ -14,6 +14,7 @@
     4.4  
     4.5  // for grant transfer
     4.6  #define steal_page_for_grant_transfer(d, p)  0
     4.7 +
     4.8  #else
     4.9  // for grant map/unmap
    4.10  int create_grant_host_mapping(unsigned long gpaddr, unsigned long mfn, unsigned int flags);
    4.11 @@ -22,9 +23,17 @@ int destroy_grant_host_mapping(unsigned 
    4.12  // for grant transfer
    4.13  int steal_page_for_grant_transfer(struct domain *d, struct page_info *page);
    4.14  void guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
    4.15 +
    4.16  #endif
    4.17  
    4.18 -#define gnttab_create_shared_page(d, t, i) ((void)0)
    4.19 +// for grant table shared page
    4.20 +#define gnttab_create_shared_page(d, t, i)                              \
    4.21 +    do {                                                                \
    4.22 +        share_xen_page_with_guest(                                      \
    4.23 +            virt_to_page((char *)(t)->shared + ((i) << PAGE_SHIFT)),    \
    4.24 +            (d), XENSHARE_writable);                                    \
    4.25 +    } while (0)
    4.26 +
    4.27  
    4.28  /* Guest physical address of the grant table.  */
    4.29  #define IA64_GRANT_TABLE_PADDR (1UL << 40)
     5.1 --- a/xen/include/asm-ia64/mm.h	Fri May 12 08:27:51 2006 -0600
     5.2 +++ b/xen/include/asm-ia64/mm.h	Fri May 12 08:47:07 2006 -0600
     5.3 @@ -128,8 +128,10 @@ static inline u32 pickle_domptr(struct d
     5.4  #define page_get_owner(_p)	(unpickle_domptr((_p)->u.inuse._domain))
     5.5  #define page_set_owner(_p, _d)	((_p)->u.inuse._domain = pickle_domptr(_d))
     5.6  
     5.7 -/* Dummy now */
     5.8 -#define share_xen_page_with_guest(p, d, r) do { } while (0)
     5.9 +#define XENSHARE_writable 0
    5.10 +#define XENSHARE_readonly 1
    5.11 +void share_xen_page_with_guest(struct page_info *page,
    5.12 +                               struct domain *d, int readonly);
    5.13  #define share_xen_page_with_privileged_guests(p, r) do { } while (0)
    5.14  
    5.15  extern struct page_info *frame_table;
    5.16 @@ -471,6 +473,4 @@ extern unsigned long ____lookup_domain_m
    5.17  /* Arch-specific portion of memory_op hypercall. */
    5.18  #define arch_memory_op(op, arg) (-ENOSYS)
    5.19  
    5.20 -extern void assign_domain_page(struct domain *d, unsigned long mpaddr,
    5.21 -			       unsigned long physaddr);
    5.22  #endif /* __ASM_IA64_MM_H__ */