ia64/xen-unstable

annotate xen/include/asm-ia64/mm.h @ 12794:9787cb7262e8

[IA64] changed foreign domain page mapping semantic.

x86 foreign HVM domain page mapping semantic was changed to use gmfn
instead mfn. It applies to domains with auto_translated_mode enabled,
and all ia64 domains enable auto_translated_mode. This patch changes
ia64 foreign domain page mapping to use gmfn and fixes ia64 domU buidler.
However this patch breaks domain save/restore/dump-core.
They should also be fixed-up

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Tue Dec 05 10:59:32 2006 -0700 (2006-12-05)
parents b39844e292f6
children 0deb53ff4b67 1e5a83fb928b
rev   line source
awilliam@10240 1 /*
awilliam@10240 2 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
awilliam@10240 3 * VA Linux Systems Japan K.K.
awilliam@10240 4 * dom0 vp model support
awilliam@10240 5 */
iap10@3833 6 #ifndef __ASM_IA64_MM_H__
iap10@3833 7 #define __ASM_IA64_MM_H__
iap10@3833 8
iap10@3833 9 #include <xen/config.h>
iap10@3833 10 #ifdef LINUX_2_6
djm@7733 11 #include <linux/gfp.h>
iap10@3833 12 #endif
iap10@3833 13 #include <xen/list.h>
iap10@3833 14 #include <xen/spinlock.h>
iap10@3833 15 #include <xen/perfc.h>
iap10@3833 16 #include <xen/sched.h>
iap10@3833 17
iap10@3833 18 #include <asm/processor.h>
iap10@3833 19 #include <asm/atomic.h>
awilliam@10013 20 #include <asm/tlbflush.h>
awilliam@11812 21 #include <asm/flushtlb.h>
iap10@3833 22 #include <asm/io.h>
iap10@3833 23
iap10@3833 24 #include <public/xen.h>
iap10@3833 25
iap10@3833 26 /*
iap10@3833 27 * The following is for page_alloc.c.
iap10@3833 28 */
iap10@3833 29
iap10@3833 30 typedef unsigned long page_flags_t;
iap10@3833 31
iap10@3833 32 /*
iap10@3833 33 * Per-page-frame information.
djm@5452 34 *
djm@5452 35 * Every architecture must ensure the following:
kaf24@8726 36 * 1. 'struct page_info' contains a 'struct list_head list'.
djm@5452 37 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
iap10@3833 38 */
djm@5452 39 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
iap10@3833 40
awilliam@9398 41 #define PRtype_info "016lx"
djm@6461 42
awilliam@9271 43 struct page_info
iap10@3833 44 {
iap10@3833 45 /* Each frame can be threaded onto a doubly-linked list. */
iap10@3833 46 struct list_head list;
djm@4282 47
djm@4282 48 /* Reference count and various PGC_xxx flags and fields. */
djm@4282 49 u32 count_info;
djm@4282 50
iap10@3833 51 /* Context-dependent fields follow... */
iap10@3833 52 union {
iap10@3833 53
awilliam@9162 54 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
iap10@3833 55 struct {
awilliam@9162 56 /* Owner of this page (NULL if page is anonymous). */
awilliam@9162 57 u32 _domain; /* pickled format */
iap10@3833 58 /* Type reference count and various PGT_xxx flags and fields. */
awilliam@9162 59 unsigned long type_info;
awilliam@9162 60 } __attribute__ ((packed)) inuse;
iap10@3833 61
awilliam@9162 62 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
iap10@3833 63 struct {
awilliam@9162 64 /* Order-size of the free chunk this page is the head of. */
awilliam@9162 65 u32 order;
iap10@3833 66 /* Mask of possibly-tainted TLBs. */
djm@5305 67 cpumask_t cpumask;
awilliam@9162 68 } __attribute__ ((packed)) free;
iap10@3833 69
iap10@3833 70 } u;
awilliam@9162 71
awilliam@9162 72 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
awilliam@9162 73 u32 tlbflush_timestamp;
awilliam@9162 74
djm@8478 75 #if 0
iap10@3833 76 // following added for Linux compiling
iap10@3833 77 page_flags_t flags;
iap10@3833 78 atomic_t _count;
iap10@3833 79 struct list_head lru; // is this the same as above "list"?
djm@8478 80 #endif
iap10@3833 81 };
iap10@3833 82
iap10@3833 83 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
iap10@3833 84
awilliam@9170 85 /*
awilliam@9170 86 * Still small set of flags defined by far on IA-64.
awilliam@9170 87 * IA-64 should make it a definition same as x86_64.
awilliam@9170 88 */
djm@5452 89 /* The following page types are MUTUALLY EXCLUSIVE. */
iap10@3833 90 #define PGT_none (0<<29) /* no special uses of this page */
iap10@3833 91 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
iap10@3833 92 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
iap10@3833 93 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
iap10@3833 94 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
awilliam@9170 95 /* Value 5 reserved. See asm-x86/mm.h */
awilliam@9170 96 /* Value 6 reserved. See asm-x86/mm.h */
awilliam@9170 97 #define PGT_writable_page (7<<29) /* has writable mappings of this page? */
awilliam@9170 98 #define PGT_type_mask (7<<29) /* Bits 29-31. */
djm@5452 99
iap10@3833 100 /* Has this page been validated for use as its current type? */
iap10@3833 101 #define _PGT_validated 28
iap10@3833 102 #define PGT_validated (1<<_PGT_validated)
awilliam@9170 103 /* Owning guest has pinned this page to its current type? */
djm@5452 104 #define _PGT_pinned 27
djm@5452 105 #define PGT_pinned (1U<<_PGT_pinned)
djm@5452 106
awilliam@9170 107 /* 16-bit count of uses of this frame as its current type. */
awilliam@9162 108 #define PGT_count_mask ((1U<<16)-1)
iap10@3833 109
awilliam@9170 110 /* Cleared when the owning guest 'frees' this page. */
djm@4282 111 #define _PGC_allocated 31
djm@4282 112 #define PGC_allocated (1U<<_PGC_allocated)
awilliam@9170 113 /* Bit 30 reserved. See asm-x86/mm.h */
awilliam@9170 114 /* Bit 29 reserved. See asm-x86/mm.h */
awilliam@9170 115 /* 29-bit count of references to this frame. */
awilliam@9170 116 #define PGC_count_mask ((1U<<29)-1)
djm@4282 117
kaf24@8726 118 #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_maddr(_pfn) < xenheap_phys_end) \
kaf24@8726 119 && (page_to_maddr(_pfn) >= xen_pstart))
djm@4282 120
awilliam@11606 121 extern void *xen_heap_start;
awilliam@11606 122 #define __pickle(a) ((unsigned long)a - (unsigned long)xen_heap_start)
awilliam@11606 123 #define __unpickle(a) (void *)(a + xen_heap_start)
awilliam@11606 124
awilliam@11606 125 static inline struct domain *unpickle_domptr(u64 _d)
awilliam@11606 126 { return (_d == 0) ? NULL : __unpickle(_d); }
djm@5452 127 static inline u32 pickle_domptr(struct domain *_d)
awilliam@11606 128 { return (_d == NULL) ? 0 : (u32)__pickle(_d); }
djm@4282 129
djm@4282 130 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
djm@4282 131 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
djm@4282 132
awilliam@10000 133 #define XENSHARE_writable 0
awilliam@10000 134 #define XENSHARE_readonly 1
awilliam@10000 135 void share_xen_page_with_guest(struct page_info *page,
awilliam@10000 136 struct domain *d, int readonly);
awilliam@10151 137 void share_xen_page_with_privileged_guests(struct page_info *page,
awilliam@10151 138 int readonly);
djm@5452 139
kaf24@8726 140 extern struct page_info *frame_table;
iap10@3833 141 extern unsigned long frame_table_size;
iap10@3833 142 extern struct list_head free_list;
iap10@3833 143 extern spinlock_t free_list_lock;
iap10@3833 144 extern unsigned int free_pfns;
iap10@3833 145 extern unsigned long max_page;
djm@4282 146
djm@4282 147 extern void __init init_frametable(void);
iap10@3833 148 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
iap10@3833 149
kaf24@8726 150 static inline void put_page(struct page_info *page)
iap10@3833 151 {
djm@5452 152 u32 nx, x, y = page->count_info;
djm@5452 153
djm@5452 154 do {
djm@5452 155 x = y;
djm@5452 156 nx = x - 1;
djm@5452 157 }
awilliam@10266 158 while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x));
djm@5452 159
djm@5452 160 if (unlikely((nx & PGC_count_mask) == 0))
djm@5452 161 free_domheap_page(page);
iap10@3833 162 }
iap10@3833 163
djm@5452 164 /* count_info and ownership are checked atomically. */
kaf24@8726 165 static inline int get_page(struct page_info *page,
iap10@3833 166 struct domain *domain)
iap10@3833 167 {
djm@5452 168 u64 x, nx, y = *((u64*)&page->count_info);
djm@5452 169 u32 _domain = pickle_domptr(domain);
djm@5452 170
djm@5452 171 do {
djm@5452 172 x = y;
djm@5452 173 nx = x + 1;
djm@5452 174 if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
djm@5452 175 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
djm@5452 176 unlikely((x >> 32) != _domain)) { /* Wrong owner? */
awilliam@9398 177
kaf24@12038 178 gdprintk(XENLOG_INFO, "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%"
awilliam@9398 179 PRtype_info "\n", page_to_mfn(page), domain,
awilliam@9398 180 unpickle_domptr(x >> 32), x, page->u.inuse.type_info);
djm@5452 181 return 0;
djm@5452 182 }
djm@5452 183 }
awilliam@10266 184 while(unlikely((y = cmpxchg_acq((u64*)&page->count_info, x, nx)) != x));
djm@5452 185 return 1;
iap10@3833 186 }
iap10@3833 187
awilliam@9162 188 extern void put_page_type(struct page_info *page);
awilliam@9162 189 extern int get_page_type(struct page_info *page, u32 type);
awilliam@9162 190
kaf24@8726 191 static inline void put_page_and_type(struct page_info *page)
djm@5483 192 {
djm@5483 193 put_page_type(page);
djm@5483 194 put_page(page);
djm@5483 195 }
djm@5483 196
djm@5483 197
kaf24@8726 198 static inline int get_page_and_type(struct page_info *page,
djm@5483 199 struct domain *domain,
djm@5483 200 u32 type)
djm@5483 201 {
djm@5483 202 int rc = get_page(page, domain);
djm@5483 203
djm@5483 204 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
djm@5483 205 {
djm@5483 206 put_page(page);
djm@5483 207 rc = 0;
djm@5483 208 }
djm@5483 209
djm@5483 210 return rc;
djm@5483 211 }
djm@5452 212
kfraser@10823 213 static inline int page_is_removable(struct page_info *page)
kfraser@10823 214 {
kfraser@10823 215 return ((page->count_info & PGC_count_mask) == 2);
kfraser@10823 216 }
kfraser@10823 217
iap10@3833 218 #define set_machinetophys(_mfn, _pfn) do { } while(0);
iap10@3833 219
djm@4282 220 #ifdef MEMORY_GUARD
djm@4282 221 void *memguard_init(void *heap_start);
djm@4282 222 void memguard_guard_stack(void *p);
djm@4282 223 void memguard_guard_range(void *p, unsigned long l);
djm@4282 224 void memguard_unguard_range(void *p, unsigned long l);
djm@4282 225 #else
djm@4282 226 #define memguard_init(_s) (_s)
djm@4282 227 #define memguard_guard_stack(_p) ((void)0)
djm@4282 228 #define memguard_guard_range(_p,_l) ((void)0)
djm@4282 229 #define memguard_unguard_range(_p,_l) ((void)0)
djm@4282 230 #endif
djm@4282 231
djm@6462 232 // prototype of misc memory stuff
awilliam@9011 233 //unsigned long __get_free_pages(unsigned int mask, unsigned int order);
awilliam@9271 234 //void __free_pages(struct page_info *page, unsigned int order);
djm@6462 235 void *pgtable_quicklist_alloc(void);
djm@6462 236 void pgtable_quicklist_free(void *pgtable_entry);
djm@6462 237
iap10@3833 238 // FOLLOWING FROM linux-2.6.7/include/mm.h
iap10@3833 239
iap10@3833 240 /*
iap10@3833 241 * This struct defines a memory VMM memory area. There is one of these
iap10@3833 242 * per VM-area/task. A VM area is any part of the process virtual memory
iap10@3833 243 * space that has a special rule for the page-fault handlers (ie a shared
iap10@3833 244 * library, the executable area etc).
iap10@3833 245 */
iap10@3833 246 struct vm_area_struct {
iap10@3833 247 struct mm_struct * vm_mm; /* The address space we belong to. */
iap10@3833 248 unsigned long vm_start; /* Our start address within vm_mm. */
iap10@3833 249 unsigned long vm_end; /* The first byte after our end address
iap10@3833 250 within vm_mm. */
iap10@3833 251
iap10@3833 252 /* linked list of VM areas per task, sorted by address */
iap10@3833 253 struct vm_area_struct *vm_next;
iap10@3833 254
iap10@3833 255 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
iap10@3833 256 unsigned long vm_flags; /* Flags, listed below. */
iap10@3833 257
iap10@3833 258 #ifndef XEN
iap10@3833 259 struct rb_node vm_rb;
iap10@3833 260
iap10@3833 261 // XEN doesn't need all the backing store stuff
iap10@3833 262 /*
iap10@3833 263 * For areas with an address space and backing store,
iap10@3833 264 * linkage into the address_space->i_mmap prio tree, or
iap10@3833 265 * linkage to the list of like vmas hanging off its node, or
iap10@3833 266 * linkage of vma in the address_space->i_mmap_nonlinear list.
iap10@3833 267 */
iap10@3833 268 union {
iap10@3833 269 struct {
iap10@3833 270 struct list_head list;
iap10@3833 271 void *parent; /* aligns with prio_tree_node parent */
iap10@3833 272 struct vm_area_struct *head;
iap10@3833 273 } vm_set;
iap10@3833 274
iap10@3833 275 struct prio_tree_node prio_tree_node;
iap10@3833 276 } shared;
iap10@3833 277
iap10@3833 278 /*
iap10@3833 279 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
iap10@3833 280 * list, after a COW of one of the file pages. A MAP_SHARED vma
iap10@3833 281 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
iap10@3833 282 * or brk vma (with NULL file) can only be in an anon_vma list.
iap10@3833 283 */
iap10@3833 284 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
iap10@3833 285 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
iap10@3833 286
iap10@3833 287 /* Function pointers to deal with this struct. */
iap10@3833 288 struct vm_operations_struct * vm_ops;
iap10@3833 289
iap10@3833 290 /* Information about our backing store: */
iap10@3833 291 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
iap10@3833 292 units, *not* PAGE_CACHE_SIZE */
iap10@3833 293 struct file * vm_file; /* File we map to (can be NULL). */
iap10@3833 294 void * vm_private_data; /* was vm_pte (shared mem) */
iap10@3833 295
iap10@3833 296 #ifdef CONFIG_NUMA
iap10@3833 297 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
iap10@3833 298 #endif
iap10@3833 299 #endif
iap10@3833 300 };
iap10@3833 301 /*
iap10@3833 302 * vm_flags..
iap10@3833 303 */
iap10@3833 304 #define VM_READ 0x00000001 /* currently active flags */
iap10@3833 305 #define VM_WRITE 0x00000002
iap10@3833 306 #define VM_EXEC 0x00000004
iap10@3833 307 #define VM_SHARED 0x00000008
iap10@3833 308
iap10@3833 309 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
iap10@3833 310 #define VM_MAYWRITE 0x00000020
iap10@3833 311 #define VM_MAYEXEC 0x00000040
iap10@3833 312 #define VM_MAYSHARE 0x00000080
iap10@3833 313
iap10@3833 314 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
iap10@3833 315 #define VM_GROWSUP 0x00000200
iap10@3833 316 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
iap10@3833 317 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
iap10@3833 318
iap10@3833 319 #define VM_EXECUTABLE 0x00001000
iap10@3833 320 #define VM_LOCKED 0x00002000
iap10@3833 321 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
iap10@3833 322
iap10@3833 323 /* Used by sys_madvise() */
iap10@3833 324 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
iap10@3833 325 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
iap10@3833 326
iap10@3833 327 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
iap10@3833 328 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
iap10@3833 329 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
iap10@3833 330 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
iap10@3833 331 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
iap10@3833 332 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
iap10@3833 333
iap10@3833 334 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
iap10@3833 335 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
iap10@3833 336 #endif
iap10@3833 337
iap10@3833 338 #ifdef CONFIG_STACK_GROWSUP
iap10@3833 339 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
iap10@3833 340 #else
iap10@3833 341 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
iap10@3833 342 #endif
iap10@3833 343
djm@6458 344 #if 0 /* removed when rebasing to 2.6.13 */
iap10@3833 345 /*
iap10@3833 346 * The zone field is never updated after free_area_init_core()
iap10@3833 347 * sets it, so none of the operations on it need to be atomic.
iap10@3833 348 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
iap10@3833 349 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
iap10@3833 350 */
iap10@3833 351 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
iap10@3833 352 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
iap10@3833 353
awilliam@9271 354 static inline unsigned long page_zonenum(struct page_info *page)
iap10@3833 355 {
iap10@3833 356 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
iap10@3833 357 }
awilliam@9271 358 static inline unsigned long page_to_nid(struct page_info *page)
iap10@3833 359 {
iap10@3833 360 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
iap10@3833 361 }
iap10@3833 362
iap10@3833 363 struct zone;
iap10@3833 364 extern struct zone *zone_table[];
iap10@3833 365
awilliam@9271 366 static inline struct zone *page_zone(struct page_info *page)
iap10@3833 367 {
iap10@3833 368 return zone_table[page->flags >> NODEZONE_SHIFT];
iap10@3833 369 }
iap10@3833 370
awilliam@9271 371 static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num)
iap10@3833 372 {
iap10@3833 373 page->flags &= ~(~0UL << NODEZONE_SHIFT);
iap10@3833 374 page->flags |= nodezone_num << NODEZONE_SHIFT;
iap10@3833 375 }
djm@6458 376 #endif
iap10@3833 377
iap10@3833 378 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
iap10@3833 379 extern unsigned long max_mapnr;
iap10@3833 380 #endif
iap10@3833 381
awilliam@9271 382 static inline void *lowmem_page_address(struct page_info *page)
iap10@3833 383 {
kaf24@8726 384 return __va(page_to_mfn(page) << PAGE_SHIFT);
iap10@3833 385 }
iap10@3833 386
iap10@3833 387 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
iap10@3833 388 #define HASHED_PAGE_VIRTUAL
iap10@3833 389 #endif
iap10@3833 390
iap10@3833 391 #if defined(WANT_PAGE_VIRTUAL)
iap10@3833 392 #define page_address(page) ((page)->virtual)
iap10@3833 393 #define set_page_address(page, address) \
iap10@3833 394 do { \
iap10@3833 395 (page)->virtual = (address); \
iap10@3833 396 } while(0)
iap10@3833 397 #define page_address_init() do { } while(0)
iap10@3833 398 #endif
iap10@3833 399
iap10@3833 400 #if defined(HASHED_PAGE_VIRTUAL)
awilliam@9271 401 void *page_address(struct page_info *page);
awilliam@9271 402 void set_page_address(struct page_info *page, void *virtual);
iap10@3833 403 void page_address_init(void);
iap10@3833 404 #endif
iap10@3833 405
iap10@3833 406 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
iap10@3833 407 #define page_address(page) lowmem_page_address(page)
iap10@3833 408 #define set_page_address(page, address) do { } while(0)
iap10@3833 409 #define page_address_init() do { } while(0)
iap10@3833 410 #endif
iap10@3833 411
iap10@3833 412
iap10@3833 413 #ifndef CONFIG_DEBUG_PAGEALLOC
iap10@3833 414 static inline void
awilliam@9271 415 kernel_map_pages(struct page_info *page, int numpages, int enable)
iap10@3833 416 {
iap10@3833 417 }
iap10@3833 418 #endif
iap10@3833 419
iap10@3833 420 extern unsigned long num_physpages;
iap10@3833 421 extern unsigned long totalram_pages;
iap10@3833 422 extern int nr_swap_pages;
iap10@3833 423
awilliam@10240 424 extern void alloc_dom_xen_and_dom_io(void);
awilliam@12524 425 extern void mm_teardown(struct domain* d);
awilliam@12524 426 extern void mm_final_teardown(struct domain* d);
awilliam@10240 427 extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
awilliam@10240 428 extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
awilliam@11726 429 extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);
awilliam@10240 430 extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
awilliam@10240 431 extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
awilliam@10423 432 struct p2m_entry;
awilliam@10423 433 extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);
awilliam@10255 434 extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);
awilliam@10570 435 extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr);
awilliam@10240 436 extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long size);
awilliam@10240 437 extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);
awilliam@10250 438 int domain_page_mapped(struct domain *d, unsigned long mpaddr);
awilliam@10250 439 int efi_mmio(unsigned long physaddr, unsigned long size);
awilliam@10240 440 extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
awilliam@10240 441 extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
awilliam@10240 442 extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
awilliam@10240 443 extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);
awilliam@12794 444 extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid);
awilliam@11726 445 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
awilliam@11726 446 extern void expose_p2m_init(void);
awilliam@11726 447 extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn);
awilliam@11726 448 #else
awilliam@11726 449 #define expose_p2m_init() do { } while (0)
awilliam@11726 450 #define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS)
awilliam@11726 451 #endif
awilliam@10240 452
awilliam@10420 453 extern volatile unsigned long *mpt_table;
awilliam@9002 454 extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
awilliam@10423 455 extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry);
adsharma@4993 456 #define machine_to_phys_mapping mpt_table
adsharma@4993 457
awilliam@9082 458 #define INVALID_M2P_ENTRY (~0UL)
awilliam@9082 459 #define VALID_M2P(_e) (!((_e) & (1UL<<63)))
kaf24@8708 460
kaf24@8736 461 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
kaf24@8736 462 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
kaf24@8708 463
adsharma@4993 464 /* If pmt table is provided by control pannel later, we need __get_user
adsharma@4993 465 * here. However if it's allocated by HV, we should access it directly
adsharma@4993 466 */
adsharma@4993 467
kaf24@8726 468 #define mfn_to_gmfn(_d, mfn) \
awilliam@10264 469 get_gpfn_from_mfn(mfn)
adsharma@4993 470
kaf24@8726 471 #define gmfn_to_mfn(_d, gpfn) \
kaf24@8726 472 gmfn_to_mfn_foreign((_d), (gpfn))
djm@5455 473
djm@5455 474 #define __gpfn_invalid(_d, gpfn) \
awilliam@10423 475 (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) & GPFN_INV_MASK)
djm@5455 476
kaf24@8726 477 #define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
djm@5455 478
djm@5455 479 /* Return I/O type if trye */
djm@5455 480 #define __gpfn_is_io(_d, gpfn) \
awilliam@9011 481 ({ \
awilliam@9011 482 u64 pte, ret=0; \
awilliam@10423 483 pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
awilliam@9011 484 if(!(pte&GPFN_INV_MASK)) \
awilliam@9011 485 ret = pte & GPFN_IO_MASK; \
awilliam@9011 486 ret; \
awilliam@9011 487 })
djm@5455 488
djm@5455 489 #define __gpfn_is_mem(_d, gpfn) \
awilliam@9011 490 ({ \
awilliam@9011 491 u64 pte, ret=0; \
awilliam@10423 492 pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
awilliam@9011 493 if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM)) \
awilliam@9011 494 ret = 1; \
awilliam@9011 495 ret; \
awilliam@9011 496 })
djm@5455 497
djm@5455 498
djm@7333 499 #define __gpa_to_mpa(_d, gpa) \
kaf24@8726 500 ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
adsharma@4993 501
awilliam@11932 502 #define __mpa_to_gpa(madr) \
awilliam@11932 503 ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \
awilliam@11932 504 ((madr) & ~PAGE_MASK))
awilliam@11932 505
kaf24@8059 506 /* Arch-specific portion of memory_op hypercall. */
awilliam@11336 507 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
kaf24@8059 508
kfraser@10418 509 int steal_page(
kfraser@10418 510 struct domain *d, struct page_info *page, unsigned int memflags);
kfraser@10418 511
iap10@3833 512 #endif /* __ASM_IA64_MM_H__ */