ia64/xen-unstable

view xen/include/asm-ia64/mm.h @ 9756:14a34d811e81

[IA64] introduce P2M conversion

introduce P2M conversion functions necessary for dom0vp model.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@ldap.hp.com
date Tue Apr 25 13:06:57 2006 -0600 (2006-04-25)
parents 4ed269e73e95
children 4fc1110f09c9
line source
1 #ifndef __ASM_IA64_MM_H__
2 #define __ASM_IA64_MM_H__
4 #include <xen/config.h>
5 #ifdef LINUX_2_6
6 #include <linux/gfp.h>
7 #endif
8 #include <xen/list.h>
9 #include <xen/spinlock.h>
10 #include <xen/perfc.h>
11 #include <xen/sched.h>
13 #include <asm/processor.h>
14 #include <asm/atomic.h>
15 #include <asm/flushtlb.h>
16 #include <asm/io.h>
18 #include <public/xen.h>
20 /*
21 * The following is for page_alloc.c.
22 */
24 typedef unsigned long page_flags_t;
26 /*
27 * Per-page-frame information.
28 *
29 * Every architecture must ensure the following:
30 * 1. 'struct page_info' contains a 'struct list_head list'.
31 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
32 */
33 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
35 #define PRtype_info "016lx"
37 struct page_info
38 {
39 /* Each frame can be threaded onto a doubly-linked list. */
40 struct list_head list;
42 /* Reference count and various PGC_xxx flags and fields. */
43 u32 count_info;
45 /* Context-dependent fields follow... */
46 union {
48 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
49 struct {
50 /* Owner of this page (NULL if page is anonymous). */
51 u32 _domain; /* pickled format */
52 /* Type reference count and various PGT_xxx flags and fields. */
53 unsigned long type_info;
54 } __attribute__ ((packed)) inuse;
56 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
57 struct {
58 /* Order-size of the free chunk this page is the head of. */
59 u32 order;
60 /* Mask of possibly-tainted TLBs. */
61 cpumask_t cpumask;
62 } __attribute__ ((packed)) free;
64 } u;
66 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
67 u32 tlbflush_timestamp;
69 #if 0
70 // following added for Linux compiling
71 page_flags_t flags;
72 atomic_t _count;
73 struct list_head lru; // is this the same as above "list"?
74 #endif
75 };
77 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
79 /*
80 * Still small set of flags defined by far on IA-64.
81 * IA-64 should make it a definition same as x86_64.
82 */
83 /* The following page types are MUTUALLY EXCLUSIVE. */
84 #define PGT_none (0<<29) /* no special uses of this page */
85 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
86 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
87 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
88 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
89 /* Value 5 reserved. See asm-x86/mm.h */
90 /* Value 6 reserved. See asm-x86/mm.h */
91 #define PGT_writable_page (7<<29) /* has writable mappings of this page? */
92 #define PGT_type_mask (7<<29) /* Bits 29-31. */
94 /* Has this page been validated for use as its current type? */
95 #define _PGT_validated 28
96 #define PGT_validated (1<<_PGT_validated)
97 /* Owning guest has pinned this page to its current type? */
98 #define _PGT_pinned 27
99 #define PGT_pinned (1U<<_PGT_pinned)
101 /* The 27 most significant bits of virt address if this is a page table. */
102 #define PGT_va_shift 32
103 #define PGT_va_mask ((unsigned long)((1U<<28)-1)<<PGT_va_shift)
104 /* Is the back pointer still mutable (i.e. not fixed yet)? */
105 #define PGT_va_mutable ((unsigned long)((1U<<28)-1)<<PGT_va_shift)
106 /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
107 #define PGT_va_unknown ((unsigned long)((1U<<28)-2)<<PGT_va_shift)
109 /* 16-bit count of uses of this frame as its current type. */
110 #define PGT_count_mask ((1U<<16)-1)
112 /* Cleared when the owning guest 'frees' this page. */
113 #define _PGC_allocated 31
114 #define PGC_allocated (1U<<_PGC_allocated)
115 /* Bit 30 reserved. See asm-x86/mm.h */
116 /* Bit 29 reserved. See asm-x86/mm.h */
117 /* 29-bit count of references to this frame. */
118 #define PGC_count_mask ((1U<<29)-1)
120 #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_maddr(_pfn) < xenheap_phys_end) \
121 && (page_to_maddr(_pfn) >= xen_pstart))
123 static inline struct domain *unpickle_domptr(u32 _d)
124 { return (_d == 0) ? NULL : __va(_d); }
125 static inline u32 pickle_domptr(struct domain *_d)
126 { return (_d == NULL) ? 0 : (u32)__pa(_d); }
128 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
129 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
131 /* Dummy now */
132 #define share_xen_page_with_guest(p, d, r) do { } while (0)
133 #define share_xen_page_with_privileged_guests(p, r) do { } while (0)
135 extern struct page_info *frame_table;
136 extern unsigned long frame_table_size;
137 extern struct list_head free_list;
138 extern spinlock_t free_list_lock;
139 extern unsigned int free_pfns;
140 extern unsigned long max_page;
142 extern void __init init_frametable(void);
143 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
145 extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
147 static inline void put_page(struct page_info *page)
148 {
149 u32 nx, x, y = page->count_info;
151 do {
152 x = y;
153 nx = x - 1;
154 }
155 while (unlikely((y = cmpxchg(&page->count_info, x, nx)) != x));
157 if (unlikely((nx & PGC_count_mask) == 0))
158 free_domheap_page(page);
159 }
161 /* count_info and ownership are checked atomically. */
162 static inline int get_page(struct page_info *page,
163 struct domain *domain)
164 {
165 u64 x, nx, y = *((u64*)&page->count_info);
166 u32 _domain = pickle_domptr(domain);
168 do {
169 x = y;
170 nx = x + 1;
171 if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
172 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
173 unlikely((x >> 32) != _domain)) { /* Wrong owner? */
175 DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%"
176 PRtype_info "\n", page_to_mfn(page), domain,
177 unpickle_domptr(x >> 32), x, page->u.inuse.type_info);
178 return 0;
179 }
180 }
181 while(unlikely((y = cmpxchg((u64*)&page->count_info, x, nx)) != x));
182 return 1;
183 }
185 extern void put_page_type(struct page_info *page);
186 extern int get_page_type(struct page_info *page, u32 type);
188 static inline void put_page_and_type(struct page_info *page)
189 {
190 put_page_type(page);
191 put_page(page);
192 }
195 static inline int get_page_and_type(struct page_info *page,
196 struct domain *domain,
197 u32 type)
198 {
199 int rc = get_page(page, domain);
201 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
202 {
203 put_page(page);
204 rc = 0;
205 }
207 return rc;
208 }
210 #define set_machinetophys(_mfn, _pfn) do { } while(0);
212 #ifdef MEMORY_GUARD
213 void *memguard_init(void *heap_start);
214 void memguard_guard_stack(void *p);
215 void memguard_guard_range(void *p, unsigned long l);
216 void memguard_unguard_range(void *p, unsigned long l);
217 #else
218 #define memguard_init(_s) (_s)
219 #define memguard_guard_stack(_p) ((void)0)
220 #define memguard_guard_range(_p,_l) ((void)0)
221 #define memguard_unguard_range(_p,_l) ((void)0)
222 #endif
224 // prototype of misc memory stuff
225 //unsigned long __get_free_pages(unsigned int mask, unsigned int order);
226 //void __free_pages(struct page_info *page, unsigned int order);
227 void *pgtable_quicklist_alloc(void);
228 void pgtable_quicklist_free(void *pgtable_entry);
230 // FOLLOWING FROM linux-2.6.7/include/mm.h
232 /*
233 * This struct defines a memory VMM memory area. There is one of these
234 * per VM-area/task. A VM area is any part of the process virtual memory
235 * space that has a special rule for the page-fault handlers (ie a shared
236 * library, the executable area etc).
237 */
238 struct vm_area_struct {
239 struct mm_struct * vm_mm; /* The address space we belong to. */
240 unsigned long vm_start; /* Our start address within vm_mm. */
241 unsigned long vm_end; /* The first byte after our end address
242 within vm_mm. */
244 /* linked list of VM areas per task, sorted by address */
245 struct vm_area_struct *vm_next;
247 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
248 unsigned long vm_flags; /* Flags, listed below. */
250 #ifndef XEN
251 struct rb_node vm_rb;
253 // XEN doesn't need all the backing store stuff
254 /*
255 * For areas with an address space and backing store,
256 * linkage into the address_space->i_mmap prio tree, or
257 * linkage to the list of like vmas hanging off its node, or
258 * linkage of vma in the address_space->i_mmap_nonlinear list.
259 */
260 union {
261 struct {
262 struct list_head list;
263 void *parent; /* aligns with prio_tree_node parent */
264 struct vm_area_struct *head;
265 } vm_set;
267 struct prio_tree_node prio_tree_node;
268 } shared;
270 /*
271 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
272 * list, after a COW of one of the file pages. A MAP_SHARED vma
273 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
274 * or brk vma (with NULL file) can only be in an anon_vma list.
275 */
276 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
277 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
279 /* Function pointers to deal with this struct. */
280 struct vm_operations_struct * vm_ops;
282 /* Information about our backing store: */
283 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
284 units, *not* PAGE_CACHE_SIZE */
285 struct file * vm_file; /* File we map to (can be NULL). */
286 void * vm_private_data; /* was vm_pte (shared mem) */
288 #ifdef CONFIG_NUMA
289 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
290 #endif
291 #endif
292 };
293 /*
294 * vm_flags..
295 */
296 #define VM_READ 0x00000001 /* currently active flags */
297 #define VM_WRITE 0x00000002
298 #define VM_EXEC 0x00000004
299 #define VM_SHARED 0x00000008
301 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
302 #define VM_MAYWRITE 0x00000020
303 #define VM_MAYEXEC 0x00000040
304 #define VM_MAYSHARE 0x00000080
306 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
307 #define VM_GROWSUP 0x00000200
308 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
309 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
311 #define VM_EXECUTABLE 0x00001000
312 #define VM_LOCKED 0x00002000
313 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
315 /* Used by sys_madvise() */
316 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
317 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
319 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
320 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
321 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
322 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
323 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
324 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
326 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
327 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
328 #endif
330 #ifdef CONFIG_STACK_GROWSUP
331 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
332 #else
333 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
334 #endif
336 #if 0 /* removed when rebasing to 2.6.13 */
337 /*
338 * The zone field is never updated after free_area_init_core()
339 * sets it, so none of the operations on it need to be atomic.
340 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
341 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
342 */
343 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
344 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
346 static inline unsigned long page_zonenum(struct page_info *page)
347 {
348 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
349 }
350 static inline unsigned long page_to_nid(struct page_info *page)
351 {
352 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
353 }
355 struct zone;
356 extern struct zone *zone_table[];
358 static inline struct zone *page_zone(struct page_info *page)
359 {
360 return zone_table[page->flags >> NODEZONE_SHIFT];
361 }
363 static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num)
364 {
365 page->flags &= ~(~0UL << NODEZONE_SHIFT);
366 page->flags |= nodezone_num << NODEZONE_SHIFT;
367 }
368 #endif
370 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
371 extern unsigned long max_mapnr;
372 #endif
374 static inline void *lowmem_page_address(struct page_info *page)
375 {
376 return __va(page_to_mfn(page) << PAGE_SHIFT);
377 }
379 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
380 #define HASHED_PAGE_VIRTUAL
381 #endif
383 #if defined(WANT_PAGE_VIRTUAL)
384 #define page_address(page) ((page)->virtual)
385 #define set_page_address(page, address) \
386 do { \
387 (page)->virtual = (address); \
388 } while(0)
389 #define page_address_init() do { } while(0)
390 #endif
392 #if defined(HASHED_PAGE_VIRTUAL)
393 void *page_address(struct page_info *page);
394 void set_page_address(struct page_info *page, void *virtual);
395 void page_address_init(void);
396 #endif
398 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
399 #define page_address(page) lowmem_page_address(page)
400 #define set_page_address(page, address) do { } while(0)
401 #define page_address_init() do { } while(0)
402 #endif
405 #ifndef CONFIG_DEBUG_PAGEALLOC
406 static inline void
407 kernel_map_pages(struct page_info *page, int numpages, int enable)
408 {
409 }
410 #endif
412 extern unsigned long num_physpages;
413 extern unsigned long totalram_pages;
414 extern int nr_swap_pages;
416 extern unsigned long *mpt_table;
417 extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
418 extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps);
419 extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
420 #ifdef CONFIG_XEN_IA64_DOM0_VP
421 extern unsigned long __lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
422 extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
423 #endif
424 #define machine_to_phys_mapping mpt_table
426 #define INVALID_M2P_ENTRY (~0UL)
427 #define VALID_M2P(_e) (!((_e) & (1UL<<63)))
428 #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
430 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
431 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
433 /* If pmt table is provided by control pannel later, we need __get_user
434 * here. However if it's allocated by HV, we should access it directly
435 */
437 #define mfn_to_gmfn(_d, mfn) \
438 machine_to_phys_mapping[(mfn)]
440 #define gmfn_to_mfn(_d, gpfn) \
441 gmfn_to_mfn_foreign((_d), (gpfn))
443 #define __gpfn_invalid(_d, gpfn) \
444 (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_INV_MASK)
446 #define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
448 /* Return I/O type if trye */
449 #define __gpfn_is_io(_d, gpfn) \
450 ({ \
451 u64 pte, ret=0; \
452 pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)); \
453 if(!(pte&GPFN_INV_MASK)) \
454 ret = pte & GPFN_IO_MASK; \
455 ret; \
456 })
458 #define __gpfn_is_mem(_d, gpfn) \
459 ({ \
460 u64 pte, ret=0; \
461 pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)); \
462 if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM)) \
463 ret = 1; \
464 ret; \
465 })
468 #define __gpa_to_mpa(_d, gpa) \
469 ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
471 /* Arch-specific portion of memory_op hypercall. */
472 #define arch_memory_op(op, arg) (-ENOSYS)
474 extern void assign_domain_page(struct domain *d, unsigned long mpaddr,
475 unsigned long physaddr);
476 #endif /* __ASM_IA64_MM_H__ */