ia64/xen-unstable

view xen/include/asm-ia64/mm.h @ 6462:af3750d1ec53

Bug fixes from Kevin (x2) and Anthony
Missing prototypes (Kevin)
Bad n_rid_blocks computation (Anthony)
Bad pte when single-entry dtlb lookup is successful (Kevin)
author djm@kirby.fc.hp.com
date Fri Sep 02 11:59:08 2005 -0600 (2005-09-02)
parents 68d8a0a1aeb7
children bf3fdeeba48b
line source
1 #ifndef __ASM_IA64_MM_H__
2 #define __ASM_IA64_MM_H__
4 #include <xen/config.h>
5 #ifdef LINUX_2_6
6 #include <xen/gfp.h>
7 #endif
8 #include <xen/list.h>
9 #include <xen/spinlock.h>
10 #include <xen/perfc.h>
11 #include <xen/sched.h>
13 #include <linux/rbtree.h>
15 #include <asm/processor.h>
16 #include <asm/atomic.h>
17 #include <asm/flushtlb.h>
18 #include <asm/io.h>
20 #include <public/xen.h>
22 /*
23 * The following is for page_alloc.c.
24 */
26 typedef unsigned long page_flags_t;
28 /*
29 * Per-page-frame information.
30 *
31 * Every architecture must ensure the following:
32 * 1. 'struct pfn_info' contains a 'struct list_head list'.
33 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
34 */
35 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
37 #define PRtype_info "08x"
39 struct page
40 {
41 /* Each frame can be threaded onto a doubly-linked list. */
42 struct list_head list;
44 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
45 u32 tlbflush_timestamp;
47 /* Reference count and various PGC_xxx flags and fields. */
48 u32 count_info;
50 /* Context-dependent fields follow... */
51 union {
53 /* Page is in use by a domain. */
54 struct {
55 /* Owner of this page. */
56 u32 _domain;
57 /* Type reference count and various PGT_xxx flags and fields. */
58 u32 type_info;
59 } inuse;
61 /* Page is on a free list. */
62 struct {
63 /* Mask of possibly-tainted TLBs. */
64 cpumask_t cpumask;
65 /* Order-size of the free chunk this page is the head of. */
66 u8 order;
67 } free;
69 } u;
70 // following added for Linux compiling
71 page_flags_t flags;
72 atomic_t _count;
73 struct list_head lru; // is this the same as above "list"?
74 };
76 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
78 /* Still small set of flags defined by far on IA-64 */
79 /* The following page types are MUTUALLY EXCLUSIVE. */
80 #define PGT_none (0<<29) /* no special uses of this page */
81 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
82 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
83 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
84 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
85 #define PGT_writeable_page (5<<29) /* has writable mappings of this page? */
86 #define PGT_type_mask (5<<29) /* Bits 29-31. */
88 /* Has this page been validated for use as its current type? */
89 #define _PGT_validated 28
90 #define PGT_validated (1<<_PGT_validated)
91 /* Owning guest has pinned this page to its current type? */
92 #define _PGT_pinned 27
93 #define PGT_pinned (1U<<_PGT_pinned)
95 /* 27-bit count of uses of this frame as its current type. */
96 #define PGT_count_mask ((1U<<27)-1)
98 /* Cleared when the owning guest 'frees' this page. */
99 #define _PGC_allocated 31
100 #define PGC_allocated (1U<<_PGC_allocated)
101 /* Set when the page is used as a page table */
102 #define _PGC_page_table 30
103 #define PGC_page_table (1U<<_PGC_page_table)
104 /* 30-bit count of references to this frame. */
105 #define PGC_count_mask ((1U<<30)-1)
107 #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
108 && (page_to_phys(_pfn) >= xen_pstart))
110 static inline struct domain *unpickle_domptr(u32 _d)
111 { return (_d == 0) ? NULL : __va(_d); }
112 static inline u32 pickle_domptr(struct domain *_d)
113 { return (_d == NULL) ? 0 : (u32)__pa(_d); }
115 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
116 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
118 /* Dummy now */
119 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
121 extern struct pfn_info *frame_table;
122 extern unsigned long frame_table_size;
123 extern struct list_head free_list;
124 extern spinlock_t free_list_lock;
125 extern unsigned int free_pfns;
126 extern unsigned long max_page;
128 #ifdef CONFIG_VIRTUAL_MEM_MAP
129 void __init init_frametable(void *frametable_vstart, unsigned long nr_pages);
130 #else
131 extern void __init init_frametable(void);
132 #endif
133 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
135 static inline void put_page(struct pfn_info *page)
136 {
137 #ifdef CONFIG_VTI // doesn't work with non-VTI in grant tables yet
138 u32 nx, x, y = page->count_info;
140 do {
141 x = y;
142 nx = x - 1;
143 }
144 while (unlikely((y = cmpxchg(&page->count_info, x, nx)) != x));
146 if (unlikely((nx & PGC_count_mask) == 0))
147 free_domheap_page(page);
148 #endif
149 }
151 /* count_info and ownership are checked atomically. */
152 static inline int get_page(struct pfn_info *page,
153 struct domain *domain)
154 {
155 #ifdef CONFIG_VTI
156 u64 x, nx, y = *((u64*)&page->count_info);
157 u32 _domain = pickle_domptr(domain);
159 do {
160 x = y;
161 nx = x + 1;
162 if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
163 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
164 unlikely((x >> 32) != _domain)) { /* Wrong owner? */
165 DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
166 page_to_pfn(page), domain, unpickle_domptr(d),
167 x, page->u.inuse.typeinfo);
168 return 0;
169 }
170 }
171 while(unlikely(y = cmpxchg(&page->count_info, x, nx)) != x);
172 #endif
173 return 1;
174 }
176 /* No type info now */
177 #define put_page_type(page)
178 #define get_page_type(page, type) 1
179 static inline void put_page_and_type(struct pfn_info *page)
180 {
181 put_page_type(page);
182 put_page(page);
183 }
186 static inline int get_page_and_type(struct pfn_info *page,
187 struct domain *domain,
188 u32 type)
189 {
190 int rc = get_page(page, domain);
192 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
193 {
194 put_page(page);
195 rc = 0;
196 }
198 return rc;
199 }
201 #define set_machinetophys(_mfn, _pfn) do { } while(0);
203 #ifdef MEMORY_GUARD
204 void *memguard_init(void *heap_start);
205 void memguard_guard_stack(void *p);
206 void memguard_guard_range(void *p, unsigned long l);
207 void memguard_unguard_range(void *p, unsigned long l);
208 #else
209 #define memguard_init(_s) (_s)
210 #define memguard_guard_stack(_p) ((void)0)
211 #define memguard_guard_range(_p,_l) ((void)0)
212 #define memguard_unguard_range(_p,_l) ((void)0)
213 #endif
215 // prototype of misc memory stuff
216 unsigned long __get_free_pages(unsigned int mask, unsigned int order);
217 void __free_pages(struct page *page, unsigned int order);
218 void *pgtable_quicklist_alloc(void);
219 void pgtable_quicklist_free(void *pgtable_entry);
221 // FOLLOWING FROM linux-2.6.7/include/mm.h
223 /*
224 * This struct defines a memory VMM memory area. There is one of these
225 * per VM-area/task. A VM area is any part of the process virtual memory
226 * space that has a special rule for the page-fault handlers (ie a shared
227 * library, the executable area etc).
228 */
229 struct vm_area_struct {
230 struct mm_struct * vm_mm; /* The address space we belong to. */
231 unsigned long vm_start; /* Our start address within vm_mm. */
232 unsigned long vm_end; /* The first byte after our end address
233 within vm_mm. */
235 /* linked list of VM areas per task, sorted by address */
236 struct vm_area_struct *vm_next;
238 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
239 unsigned long vm_flags; /* Flags, listed below. */
241 #ifndef XEN
242 struct rb_node vm_rb;
244 // XEN doesn't need all the backing store stuff
245 /*
246 * For areas with an address space and backing store,
247 * linkage into the address_space->i_mmap prio tree, or
248 * linkage to the list of like vmas hanging off its node, or
249 * linkage of vma in the address_space->i_mmap_nonlinear list.
250 */
251 union {
252 struct {
253 struct list_head list;
254 void *parent; /* aligns with prio_tree_node parent */
255 struct vm_area_struct *head;
256 } vm_set;
258 struct prio_tree_node prio_tree_node;
259 } shared;
261 /*
262 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
263 * list, after a COW of one of the file pages. A MAP_SHARED vma
264 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
265 * or brk vma (with NULL file) can only be in an anon_vma list.
266 */
267 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
268 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
270 /* Function pointers to deal with this struct. */
271 struct vm_operations_struct * vm_ops;
273 /* Information about our backing store: */
274 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
275 units, *not* PAGE_CACHE_SIZE */
276 struct file * vm_file; /* File we map to (can be NULL). */
277 void * vm_private_data; /* was vm_pte (shared mem) */
279 #ifdef CONFIG_NUMA
280 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
281 #endif
282 #endif
283 };
284 /*
285 * vm_flags..
286 */
287 #define VM_READ 0x00000001 /* currently active flags */
288 #define VM_WRITE 0x00000002
289 #define VM_EXEC 0x00000004
290 #define VM_SHARED 0x00000008
292 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
293 #define VM_MAYWRITE 0x00000020
294 #define VM_MAYEXEC 0x00000040
295 #define VM_MAYSHARE 0x00000080
297 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
298 #define VM_GROWSUP 0x00000200
299 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
300 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
302 #define VM_EXECUTABLE 0x00001000
303 #define VM_LOCKED 0x00002000
304 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
306 /* Used by sys_madvise() */
307 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
308 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
310 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
311 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
312 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
313 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
314 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
315 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
317 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
318 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
319 #endif
321 #ifdef CONFIG_STACK_GROWSUP
322 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
323 #else
324 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
325 #endif
327 #if 0 /* removed when rebasing to 2.6.13 */
328 /*
329 * The zone field is never updated after free_area_init_core()
330 * sets it, so none of the operations on it need to be atomic.
331 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
332 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
333 */
334 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
335 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
337 static inline unsigned long page_zonenum(struct page *page)
338 {
339 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
340 }
341 static inline unsigned long page_to_nid(struct page *page)
342 {
343 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
344 }
346 struct zone;
347 extern struct zone *zone_table[];
349 static inline struct zone *page_zone(struct page *page)
350 {
351 return zone_table[page->flags >> NODEZONE_SHIFT];
352 }
354 static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
355 {
356 page->flags &= ~(~0UL << NODEZONE_SHIFT);
357 page->flags |= nodezone_num << NODEZONE_SHIFT;
358 }
359 #endif
361 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
362 extern unsigned long max_mapnr;
363 #endif
365 static inline void *lowmem_page_address(struct page *page)
366 {
367 return __va(page_to_pfn(page) << PAGE_SHIFT);
368 }
370 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
371 #define HASHED_PAGE_VIRTUAL
372 #endif
374 #if defined(WANT_PAGE_VIRTUAL)
375 #define page_address(page) ((page)->virtual)
376 #define set_page_address(page, address) \
377 do { \
378 (page)->virtual = (address); \
379 } while(0)
380 #define page_address_init() do { } while(0)
381 #endif
383 #if defined(HASHED_PAGE_VIRTUAL)
384 void *page_address(struct page *page);
385 void set_page_address(struct page *page, void *virtual);
386 void page_address_init(void);
387 #endif
389 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
390 #define page_address(page) lowmem_page_address(page)
391 #define set_page_address(page, address) do { } while(0)
392 #define page_address_init() do { } while(0)
393 #endif
396 #ifndef CONFIG_DEBUG_PAGEALLOC
397 static inline void
398 kernel_map_pages(struct page *page, int numpages, int enable)
399 {
400 }
401 #endif
403 extern unsigned long num_physpages;
404 extern unsigned long totalram_pages;
405 extern int nr_swap_pages;
407 #ifdef CONFIG_VTI
408 extern unsigned long *mpt_table;
409 #undef machine_to_phys_mapping
410 #define machine_to_phys_mapping mpt_table
412 #define INVALID_M2P_ENTRY (~0U)
413 #define VALID_M2P(_e) (!((_e) & (1U<<63)))
414 #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
415 /* If pmt table is provided by control pannel later, we need __get_user
416 * here. However if it's allocated by HV, we should access it directly
417 */
418 #define phys_to_machine_mapping(d, gpfn) \
419 ((d) == dom0 ? gpfn : \
420 (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \
421 INVALID_MFN))
423 #define __mfn_to_gpfn(_d, mfn) \
424 machine_to_phys_mapping[(mfn)]
426 #define __gpfn_to_mfn(_d, gpfn) \
427 phys_to_machine_mapping((_d), (gpfn))
429 #define __gpfn_invalid(_d, gpfn) \
430 (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
432 #define __gpfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
434 /* Return I/O type if trye */
435 #define __gpfn_is_io(_d, gpfn) \
436 (__gpfn_valid(_d, gpfn) ? \
437 (__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) : 0)
439 #define __gpfn_is_mem(_d, gpfn) \
440 (__gpfn_valid(_d, gpfn) ? \
441 ((__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
444 #define __gpa_to_mpa(_d, gpa) \
445 ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
446 #endif // CONFIG_VTI
448 #endif /* __ASM_IA64_MM_H__ */