ia64/xen-unstable

view xen/include/asm-ia64/mm.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 1c01814f9a25
children
line source
1 /*
2 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 * dom0 vp model support
5 */
6 #ifndef __ASM_IA64_MM_H__
7 #define __ASM_IA64_MM_H__
9 #include <xen/config.h>
10 #ifdef LINUX_2_6
11 #include <linux/gfp.h>
12 #endif
13 #include <xen/list.h>
14 #include <xen/spinlock.h>
15 #include <xen/perfc.h>
17 #include <asm/processor.h>
18 #include <asm/atomic.h>
19 #include <asm/tlbflush.h>
20 #include <asm/flushtlb.h>
21 #include <asm/io.h>
23 #include <public/xen.h>
25 /*
26 * The following is for page_alloc.c.
27 */
29 typedef unsigned long page_flags_t;
31 /*
32 * Per-page-frame information.
33 *
34 * Every architecture must ensure the following:
35 * 1. 'struct page_info' contains a 'struct list_head list'.
36 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
37 */
38 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
40 #define PRtype_info "016lx"
42 #ifdef CONFIG_IA64_SHRINK_PAGE_LIST
43 /*
44 * See include/xen/mm.h.
45 * To compress page_list_entry, all the physical address must
46 * be addressed by (32 + PAGE_SHIFT) .
47 * However this is lower than IA64_MAX_PHYS_BITS = 50.
48 */
49 #undef page_list_entry
50 struct page_list_entry
51 {
52 u32 next, prev;
53 };
54 #endif
56 #ifdef CONFIG_IA64_PICKLE_DOMAIN
57 typedef u32 __ia64_domain_t;
58 #else
59 typedef unsigned long __ia64_domain_t;
60 #endif
62 struct page_info
63 {
64 /* Each frame can be threaded onto a doubly-linked list. */
65 struct page_list_entry list;
67 /* Reference count and various PGC_xxx flags and fields. */
68 unsigned long count_info;
70 /* Context-dependent fields follow... */
71 union {
73 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
74 struct {
75 /* Type reference count and various PGT_xxx flags and fields. */
76 unsigned long type_info;
77 /* Owner of this page (NULL if page is anonymous). */
78 __ia64_domain_t _domain; /* pickled format */
79 } inuse;
81 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
82 struct {
83 /* Order-size of the free chunk this page is the head of. */
84 u32 order;
85 /* Do TLBs need flushing for safety before next page use? */
86 bool_t need_tlbflush;
87 } free;
89 } u;
91 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
92 u32 tlbflush_timestamp;
93 };
95 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
97 /*
98 * Still small set of flags defined by far on IA-64.
99 * IA-64 should make it a definition same as x86_64.
100 */
101 #define PG_shift(idx) (BITS_PER_LONG - (idx))
102 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
104 /* The following page types are MUTUALLY EXCLUSIVE. */
105 #define PGT_none PG_mask(0, 3) /* no special uses of this page */
106 #define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */
107 #define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */
108 #define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */
109 #define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */
110 /* Value 5 reserved. See asm-x86/mm.h */
111 /* Value 6 reserved. See asm-x86/mm.h */
112 #define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */
113 #define PGT_type_mask PG_mask(7, 3) /* Bits 29-31. */
115 /* Owning guest has pinned this page to its current type? */
116 #define _PGT_pinned PG_shift(4)
117 #define PGT_pinned PG_mask(1, 4)
118 /* Has this page been validated for use as its current type? */
119 #define _PGT_validated PG_shift(5)
120 #define PGT_validated PG_mask(1, 5)
122 /* Count of uses of this frame as its current type. */
123 #define PGT_count_width PG_shift(7)
124 #define PGT_count_mask ((1UL<<PGT_count_width)-1)
126 /* Cleared when the owning guest 'frees' this page. */
127 #define _PGC_allocated PG_shift(1)
128 #define PGC_allocated PG_mask(1, 1)
129 /* Page is Xen heap? */
130 # define _PGC_xen_heap PG_shift(2)
131 # define PGC_xen_heap PG_mask(1, 2)
132 /* bit PG_shift(3) reserved. See asm-x86/mm.h */
133 /* PG_mask(7, 6) reserved. See asm-x86/mm.h*/
135 /* Page is broken? */
136 #define _PGC_broken PG_shift(7)
137 #define PGC_broken PG_mask(1, 7)
138 /* Page is offline pending ? */
139 #define _PGC_offlining PG_shift(8)
140 #define PGC_offlining PG_mask(1, 8)
141 /* Page is offlined */
142 #define _PGC_offlined PG_shift(9)
143 #define PGC_offlined PG_mask(1, 9)
144 #define PGC_offlined_broken (PGC_offlined | PGC_broken)
146 #define is_page_offlining(page) ((page)->count_info & PGC_offlining)
147 #define is_page_offlined(page) ((page)->count_info & PGC_offlined)
148 #define is_page_broken(page) ((page)->count_info & PGC_broken)
149 #define is_page_online(page) (!is_page_offlined(page))
151 /* Count of references to this frame. */
152 #define PGC_count_width PG_shift(9)
153 #define PGC_count_mask ((1UL<<PGC_count_width)-1)
155 extern unsigned long xen_fixed_mfn_start;
156 extern unsigned long xen_fixed_mfn_end;
157 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
158 #define is_xen_heap_mfn(mfn) (mfn_valid(mfn) && \
159 is_xen_heap_page(mfn_to_page(mfn)))
160 #define is_xen_fixed_mfn(mfn) \
161 (xen_fixed_mfn_start <= (mfn) && (mfn) <= xen_fixed_mfn_end)
163 #ifdef CONFIG_IA64_PICKLE_DOMAIN
164 #define page_get_owner(_p) \
165 ((struct domain *)((_p)->v.inuse._domain ? \
166 mfn_to_virt((_p)->v.inuse._domain) : NULL))
167 #define page_set_owner(_p,_d) \
168 ((_p)->v.inuse._domain = (_d) ? virt_to_mfn(_d) : 0)
169 #else
170 #define page_get_owner(_p) ((struct domain *)(_p)->u.inuse._domain)
171 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = (unsigned long)(_d))
172 #endif
174 #define XENSHARE_writable 0
175 #define XENSHARE_readonly 1
176 void share_xen_page_with_guest(struct page_info *page,
177 struct domain *d, int readonly);
178 void share_xen_page_with_privileged_guests(struct page_info *page,
179 int readonly);
181 extern unsigned long frametable_pg_dir[];
182 extern struct page_info *frame_table;
183 extern unsigned long frame_table_size;
184 extern struct list_head free_list;
185 extern spinlock_t free_list_lock;
186 extern unsigned int free_pfns;
187 extern unsigned long max_page;
189 extern void __init init_frametable(void);
190 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
192 static inline void put_page(struct page_info *page)
193 {
194 unsigned long nx, x, y = page->count_info;
196 do {
197 ASSERT((y & PGC_count_mask) != 0);
198 x = y;
199 nx = x - 1;
200 }
201 while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x));
203 if (unlikely((nx & PGC_count_mask) == 0))
204 free_domheap_page(page);
205 }
207 static inline struct domain *page_get_owner_and_reference(
208 struct page_info *page)
209 {
210 unsigned long x, y = page->count_info;
212 do {
213 x = y;
214 /*
215 * Count == 0: Page is not allocated, so we cannot take a reference.
216 * Count == -1: Reference count would wrap, which is invalid.
217 * Count == -2: Remaining unused ref is reserved for get_page_light().
218 */
219 /*
220 * On ia64, get_page_light() isn't defined so that it doesn't
221 * make sense to take care of Count == -2.
222 * Just for consistency with x86.
223 */
224 if ( unlikely(((x + 2) & PGC_count_mask) <= 2) )
225 return NULL;
226 y = cmpxchg_acq(&page->count_info, x, x + 1);
227 } while (unlikely(y != x));
229 return page_get_owner(page);
230 }
232 /* count_info and ownership are checked atomically. */
233 static inline int get_page(struct page_info *page,
234 struct domain *domain)
235 {
236 struct domain *owner = page_get_owner_and_reference(page);
238 if (likely(owner == domain))
239 return 1;
241 if (owner != NULL)
242 put_page(page);
244 /* if (!domain->is_dying) */ /* XXX: header inclusion hell */
245 gdprintk(XENLOG_INFO,
246 "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%" PRtype_info "\n",
247 page_to_mfn(page), domain,
248 owner, page->count_info, page->u.inuse.type_info);
249 return 0;
250 }
252 int is_iomem_page(unsigned long mfn);
254 extern void put_page_type(struct page_info *page);
255 extern int get_page_type(struct page_info *page, unsigned long type);
257 static inline void put_page_and_type(struct page_info *page)
258 {
259 put_page_type(page);
260 put_page(page);
261 }
264 static inline int get_page_and_type(struct page_info *page,
265 struct domain *domain,
266 unsigned long type)
267 {
268 int rc = get_page(page, domain);
270 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
271 {
272 put_page(page);
273 rc = 0;
274 }
276 return rc;
277 }
279 #define set_machinetophys(_mfn, _pfn) do { } while(0);
281 #ifdef MEMORY_GUARD
282 void *memguard_init(void *heap_start);
283 void memguard_guard_stack(void *p);
284 void memguard_guard_range(void *p, unsigned long l);
285 void memguard_unguard_range(void *p, unsigned long l);
286 #else
287 #define memguard_init(_s) (_s)
288 #define memguard_guard_stack(_p) ((void)0)
289 #define memguard_guard_range(_p,_l) ((void)0)
290 #define memguard_unguard_range(_p,_l) ((void)0)
291 #endif
293 // prototype of misc memory stuff
294 //unsigned long __get_free_pages(unsigned int mask, unsigned int order);
295 //void __free_pages(struct page_info *page, unsigned int order);
296 void *pgtable_quicklist_alloc(void);
297 void pgtable_quicklist_free(void *pgtable_entry);
299 // FOLLOWING FROM linux-2.6.7/include/mm.h
301 /*
302 * This struct defines a memory VMM memory area. There is one of these
303 * per VM-area/task. A VM area is any part of the process virtual memory
304 * space that has a special rule for the page-fault handlers (ie a shared
305 * library, the executable area etc).
306 */
307 struct vm_area_struct {
308 struct mm_struct * vm_mm; /* The address space we belong to. */
309 unsigned long vm_start; /* Our start address within vm_mm. */
310 unsigned long vm_end; /* The first byte after our end address
311 within vm_mm. */
313 /* linked list of VM areas per task, sorted by address */
314 struct vm_area_struct *vm_next;
316 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
317 unsigned long vm_flags; /* Flags, listed below. */
319 #ifndef XEN
320 struct rb_node vm_rb;
322 // XEN doesn't need all the backing store stuff
323 /*
324 * For areas with an address space and backing store,
325 * linkage into the address_space->i_mmap prio tree, or
326 * linkage to the list of like vmas hanging off its node, or
327 * linkage of vma in the address_space->i_mmap_nonlinear list.
328 */
329 union {
330 struct {
331 struct list_head list;
332 void *parent; /* aligns with prio_tree_node parent */
333 struct vm_area_struct *head;
334 } vm_set;
336 struct prio_tree_node prio_tree_node;
337 } shared;
339 /*
340 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
341 * list, after a COW of one of the file pages. A MAP_SHARED vma
342 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
343 * or brk vma (with NULL file) can only be in an anon_vma list.
344 */
345 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
346 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
348 /* Function pointers to deal with this struct. */
349 struct vm_operations_struct * vm_ops;
351 /* Information about our backing store: */
352 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
353 units, *not* PAGE_CACHE_SIZE */
354 struct file * vm_file; /* File we map to (can be NULL). */
355 void * vm_private_data; /* was vm_pte (shared mem) */
357 #ifdef CONFIG_NUMA
358 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
359 #endif
360 #endif
361 };
362 /*
363 * vm_flags..
364 */
365 #define VM_READ 0x00000001 /* currently active flags */
366 #define VM_WRITE 0x00000002
367 #define VM_EXEC 0x00000004
368 #define VM_SHARED 0x00000008
370 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
371 #define VM_MAYWRITE 0x00000020
372 #define VM_MAYEXEC 0x00000040
373 #define VM_MAYSHARE 0x00000080
375 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
376 #define VM_GROWSUP 0x00000200
377 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
378 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
380 #define VM_EXECUTABLE 0x00001000
381 #define VM_LOCKED 0x00002000
382 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
384 /* Used by sys_madvise() */
385 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
386 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
388 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
389 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
390 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
391 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
392 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
393 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
395 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
396 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
397 #endif
399 #ifdef CONFIG_STACK_GROWSUP
400 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
401 #else
402 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
403 #endif
405 #if 0 /* removed when rebasing to 2.6.13 */
406 /*
407 * The zone field is never updated after free_area_init_core()
408 * sets it, so none of the operations on it need to be atomic.
409 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
410 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
411 */
412 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
413 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
415 static inline unsigned long page_zonenum(struct page_info *page)
416 {
417 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
418 }
419 static inline unsigned long page_to_nid(struct page_info *page)
420 {
421 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
422 }
424 struct zone;
425 extern struct zone *zone_table[];
427 static inline struct zone *page_zone(struct page_info *page)
428 {
429 return zone_table[page->flags >> NODEZONE_SHIFT];
430 }
432 static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num)
433 {
434 page->flags &= ~(~0UL << NODEZONE_SHIFT);
435 page->flags |= nodezone_num << NODEZONE_SHIFT;
436 }
437 #endif
439 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
440 extern unsigned long max_mapnr;
441 #endif
443 static inline void *lowmem_page_address(struct page_info *page)
444 {
445 return __va(page_to_mfn(page) << PAGE_SHIFT);
446 }
448 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
449 #define HASHED_PAGE_VIRTUAL
450 #endif
452 #if defined(WANT_PAGE_VIRTUAL)
453 #define page_address(page) ((page)->virtual)
454 #define set_page_address(page, address) \
455 do { \
456 (page)->virtual = (address); \
457 } while(0)
458 #define page_address_init() do { } while(0)
459 #endif
461 #if defined(HASHED_PAGE_VIRTUAL)
462 void *page_address(struct page_info *page);
463 void set_page_address(struct page_info *page, void *virtual);
464 void page_address_init(void);
465 #endif
467 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
468 #define page_address(page) lowmem_page_address(page)
469 #define set_page_address(page, address) do { } while(0)
470 #define page_address_init() do { } while(0)
471 #endif
474 #ifndef CONFIG_DEBUG_PAGEALLOC
475 static inline void
476 kernel_map_pages(struct page_info *page, int numpages, int enable)
477 {
478 }
479 #endif
481 extern unsigned long num_physpages;
482 extern unsigned long totalram_pages;
483 extern int nr_swap_pages;
485 extern void alloc_dom_xen_and_dom_io(void);
486 extern int mm_teardown(struct domain* d);
487 extern void mm_final_teardown(struct domain* d);
488 extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
489 extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
490 extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);
491 extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
492 extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
493 extern int deassign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
494 unsigned long phys_addr, unsigned long size);
495 struct p2m_entry;
496 extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);
497 extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);
498 extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr);
499 extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size, unsigned long flags);
500 extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);
501 int domain_page_mapped(struct domain *d, unsigned long mpaddr);
502 int efi_mmio(unsigned long physaddr, unsigned long size);
503 extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
504 extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
505 extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
506 extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);
507 extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid);
508 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
509 extern void expose_p2m_init(void);
510 extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn);
511 extern void foreign_p2m_init(struct domain* d);
512 extern void foreign_p2m_destroy(struct domain* d);
513 extern unsigned long dom0vp_expose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid, XEN_GUEST_HANDLE(char) buffer, unsigned long flags);
514 extern unsigned long dom0vp_unexpose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid);
515 extern unsigned long dom0vp_get_memmap(domid_t domid, XEN_GUEST_HANDLE(char) buffer);
516 #else
517 #define expose_p2m_init() do { } while (0)
518 #define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS)
519 #define foreign_p2m_init(d) do { } while (0)
520 #define foreign_p2m_destroy(d) do { } while (0)
521 #define dom0vp_expose_foreign_p2m(dest_dom, dest_gpfn, domid, buffer, flags) (-ENOSYS)
522 #define dom0vp_unexpose_foreign_p2m(dest_dom, dest_gpfn, domid) (-ENOSYS)
523 #define __dom0vp_add_memdesc(d, memmap_info, memdesc) (-ENOSYS)
524 #define dom0vp_get_memmap(domid, buffer) (-ENOSYS)
525 #endif
527 int
528 p2m_pod_decrease_reservation(struct domain *d,
529 xen_pfn_t gpfn, unsigned int order);
530 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
531 unsigned int order);
533 extern volatile unsigned long *mpt_table;
534 extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
535 extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__,
536 u64* itir, struct p2m_entry* entry);
537 #define machine_to_phys_mapping mpt_table
539 #define INVALID_M2P_ENTRY (~0UL)
540 #define VALID_M2P(_e) (!((_e) & (1UL<<63)))
542 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
543 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
545 /* If pmt table is provided by control pannel later, we need __get_user
546 * here. However if it's allocated by HV, we should access it directly
547 */
549 #define mfn_to_gmfn(_d, mfn) \
550 get_gpfn_from_mfn(mfn)
552 #define gmfn_to_mfn(_d, gpfn) \
553 gmfn_to_mfn_foreign((_d), (gpfn))
555 #define __gpfn_invalid(_d, gpfn) \
556 (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) == INVALID_MFN)
558 #define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
560 #define __gpa_to_mpa(_d, gpa) \
561 ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
563 #define __mpa_to_gpa(madr) \
564 ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \
565 ((madr) & ~PAGE_MASK))
567 /* Internal use only: returns 0 in case of bad address. */
568 extern unsigned long paddr_to_maddr(unsigned long paddr);
570 /* Arch-specific portion of memory_op hypercall. */
571 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
573 int steal_page(
574 struct domain *d, struct page_info *page, unsigned int memflags);
575 int donate_page(
576 struct domain *d, struct page_info *page, unsigned int memflags);
578 #define domain_clamp_alloc_bitsize(d, b) (b)
580 unsigned long domain_get_maximum_gpfn(struct domain *d);
582 extern struct domain *dom_xen, *dom_io; /* for vmcoreinfo */
584 #endif /* __ASM_IA64_MM_H__ */