ia64/xen-unstable

view xen/include/asm-ia64/mm.h @ 5305:1bc9c3554d61

bitkeeper revision 1.1665 (42a07586HA9yxpF1JLzGs-3hmLbG5g)

More include file cleanup fixes (for CONFIG_VTI)
Also a cpumask cleanup fix
Signed-off by: Dan Magenheimer <dan.magenheimer@hp.com>
author djm@sportsman.spdomain
date Fri Jun 03 15:21:42 2005 +0000 (2005-06-03)
parents c91f74efda05
children a8d6aae1c5ac 38763dfb2287 f494f01b62a8 85fab828d6ff 649cd37aa1ab
line source
1 #ifndef __ASM_IA64_MM_H__
2 #define __ASM_IA64_MM_H__
4 #include <xen/config.h>
5 #ifdef LINUX_2_6
6 #include <xen/gfp.h>
7 #endif
8 #include <xen/list.h>
9 #include <xen/spinlock.h>
10 #include <xen/perfc.h>
11 #include <xen/sched.h>
13 #include <linux/rbtree.h>
15 #include <asm/processor.h>
16 #include <asm/atomic.h>
17 #include <asm/flushtlb.h>
18 #include <asm/io.h>
20 #include <public/xen.h>
22 /*
23 * The following is for page_alloc.c.
24 */
26 typedef unsigned long page_flags_t;
28 /*
29 * Per-page-frame information.
30 */
32 //FIXME: This can go away when common/dom0_ops.c is fully arch-independent
33 #if 0
34 struct pfn_info
35 {
36 /* Each frame can be threaded onto a doubly-linked list. */
37 struct list_head list;
38 /* Context-dependent fields follow... */
39 union {
41 /* Page is in use by a domain. */
42 struct {
43 /* Owner of this page. */
44 struct domain *domain;
45 /* Reference count and various PGC_xxx flags and fields. */
46 u32 count_info;
47 /* Type reference count and various PGT_xxx flags and fields. */
48 u32 type_info;
49 } inuse;
51 /* Page is on a free list. */
52 struct {
53 /* Mask of possibly-tainted TLBs. */
54 unsigned long cpu_mask;
55 /* Must be at same offset as 'u.inuse.count_flags'. */
56 u32 __unavailable;
57 /* Order-size of the free chunk this page is the head of. */
58 u8 order;
59 } free;
61 } u;
63 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
64 u32 tlbflush_timestamp;
65 };
66 #endif
68 struct page
69 {
70 /* Each frame can be threaded onto a doubly-linked list. */
71 struct list_head list;
73 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
74 u32 tlbflush_timestamp;
76 /* Reference count and various PGC_xxx flags and fields. */
77 u32 count_info;
79 /* Context-dependent fields follow... */
80 union {
82 /* Page is in use by a domain. */
83 struct {
84 /* Owner of this page. */
85 u64 _domain;
86 /* Type reference count and various PGT_xxx flags and fields. */
87 u32 type_info;
88 } inuse;
90 /* Page is on a free list. */
91 struct {
92 /* Mask of possibly-tainted TLBs. */
93 cpumask_t cpumask;
94 /* Order-size of the free chunk this page is the head of. */
95 u8 order;
96 } free;
98 } u;
99 // following added for Linux compiling
100 page_flags_t flags;
101 atomic_t _count;
102 struct list_head lru; // is this the same as above "list"?
103 };
105 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
107 //FIXME: These can go away when common/dom0_ops.c is fully arch-independent
108 /* The following page types are MUTUALLY EXCLUSIVE. */
109 #define PGT_none (0<<29) /* no special uses of this page */
110 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
111 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
112 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
113 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
114 #define PGT_gdt_page (5<<29) /* using this page in a GDT? */
115 #define PGT_ldt_page (6<<29) /* using this page in an LDT? */
116 #define PGT_writeable_page (7<<29) /* has writable mappings of this page? */
117 #define PGT_type_mask (7<<29) /* Bits 29-31. */
118 /* Has this page been validated for use as its current type? */
119 #define _PGT_validated 28
120 #define PGT_validated (1<<_PGT_validated)
121 /* 28-bit count of uses of this frame as its current type. */
122 #define PGT_count_mask ((1<<28)-1)
124 /* Cleared when the owning guest 'frees' this page. */
125 #define _PGC_allocated 31
126 #define PGC_allocated (1U<<_PGC_allocated)
127 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
129 #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
130 && (page_to_phys(_pfn) >= xen_pstart))
132 #define pickle_domptr(_d) ((u64)(_d))
133 #define unpickle_domptr(_d) ((struct domain*)(_d))
135 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
136 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
138 extern struct pfn_info *frame_table;
139 extern unsigned long frame_table_size;
140 extern struct list_head free_list;
141 extern spinlock_t free_list_lock;
142 extern unsigned int free_pfns;
143 extern unsigned long max_page;
145 #ifdef CONFIG_VIRTUAL_MEM_MAP
146 void __init init_frametable(void *frametable_vstart, unsigned long nr_pages);
147 #else
148 extern void __init init_frametable(void);
149 #endif
150 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
152 static inline void put_page(struct pfn_info *page)
153 {
154 dummy();
155 }
158 static inline int get_page(struct pfn_info *page,
159 struct domain *domain)
160 {
161 dummy();
162 }
164 #define set_machinetophys(_mfn, _pfn) do { } while(0);
166 #ifdef MEMORY_GUARD
167 void *memguard_init(void *heap_start);
168 void memguard_guard_stack(void *p);
169 void memguard_guard_range(void *p, unsigned long l);
170 void memguard_unguard_range(void *p, unsigned long l);
171 #else
172 #define memguard_init(_s) (_s)
173 #define memguard_guard_stack(_p) ((void)0)
174 #define memguard_guard_range(_p,_l) ((void)0)
175 #define memguard_unguard_range(_p,_l) ((void)0)
176 #endif
178 // FOLLOWING FROM linux-2.6.7/include/mm.h
180 /*
181 * This struct defines a memory VMM memory area. There is one of these
182 * per VM-area/task. A VM area is any part of the process virtual memory
183 * space that has a special rule for the page-fault handlers (ie a shared
184 * library, the executable area etc).
185 */
186 struct vm_area_struct {
187 struct mm_struct * vm_mm; /* The address space we belong to. */
188 unsigned long vm_start; /* Our start address within vm_mm. */
189 unsigned long vm_end; /* The first byte after our end address
190 within vm_mm. */
192 /* linked list of VM areas per task, sorted by address */
193 struct vm_area_struct *vm_next;
195 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
196 unsigned long vm_flags; /* Flags, listed below. */
198 #ifndef XEN
199 struct rb_node vm_rb;
201 // XEN doesn't need all the backing store stuff
202 /*
203 * For areas with an address space and backing store,
204 * linkage into the address_space->i_mmap prio tree, or
205 * linkage to the list of like vmas hanging off its node, or
206 * linkage of vma in the address_space->i_mmap_nonlinear list.
207 */
208 union {
209 struct {
210 struct list_head list;
211 void *parent; /* aligns with prio_tree_node parent */
212 struct vm_area_struct *head;
213 } vm_set;
215 struct prio_tree_node prio_tree_node;
216 } shared;
218 /*
219 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
220 * list, after a COW of one of the file pages. A MAP_SHARED vma
221 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
222 * or brk vma (with NULL file) can only be in an anon_vma list.
223 */
224 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
225 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
227 /* Function pointers to deal with this struct. */
228 struct vm_operations_struct * vm_ops;
230 /* Information about our backing store: */
231 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
232 units, *not* PAGE_CACHE_SIZE */
233 struct file * vm_file; /* File we map to (can be NULL). */
234 void * vm_private_data; /* was vm_pte (shared mem) */
236 #ifdef CONFIG_NUMA
237 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
238 #endif
239 #endif
240 };
241 /*
242 * vm_flags..
243 */
244 #define VM_READ 0x00000001 /* currently active flags */
245 #define VM_WRITE 0x00000002
246 #define VM_EXEC 0x00000004
247 #define VM_SHARED 0x00000008
249 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
250 #define VM_MAYWRITE 0x00000020
251 #define VM_MAYEXEC 0x00000040
252 #define VM_MAYSHARE 0x00000080
254 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
255 #define VM_GROWSUP 0x00000200
256 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
257 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
259 #define VM_EXECUTABLE 0x00001000
260 #define VM_LOCKED 0x00002000
261 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
263 /* Used by sys_madvise() */
264 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
265 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
267 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
268 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
269 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
270 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
271 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
272 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
274 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
275 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
276 #endif
278 #ifdef CONFIG_STACK_GROWSUP
279 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
280 #else
281 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
282 #endif
284 /*
285 * The zone field is never updated after free_area_init_core()
286 * sets it, so none of the operations on it need to be atomic.
287 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
288 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
289 */
290 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
291 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
293 static inline unsigned long page_zonenum(struct page *page)
294 {
295 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
296 }
297 static inline unsigned long page_to_nid(struct page *page)
298 {
299 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
300 }
302 struct zone;
303 extern struct zone *zone_table[];
305 static inline struct zone *page_zone(struct page *page)
306 {
307 return zone_table[page->flags >> NODEZONE_SHIFT];
308 }
310 static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
311 {
312 page->flags &= ~(~0UL << NODEZONE_SHIFT);
313 page->flags |= nodezone_num << NODEZONE_SHIFT;
314 }
316 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
317 extern unsigned long max_mapnr;
318 #endif
320 static inline void *lowmem_page_address(struct page *page)
321 {
322 return __va(page_to_pfn(page) << PAGE_SHIFT);
323 }
325 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
326 #define HASHED_PAGE_VIRTUAL
327 #endif
329 #if defined(WANT_PAGE_VIRTUAL)
330 #define page_address(page) ((page)->virtual)
331 #define set_page_address(page, address) \
332 do { \
333 (page)->virtual = (address); \
334 } while(0)
335 #define page_address_init() do { } while(0)
336 #endif
338 #if defined(HASHED_PAGE_VIRTUAL)
339 void *page_address(struct page *page);
340 void set_page_address(struct page *page, void *virtual);
341 void page_address_init(void);
342 #endif
344 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
345 #define page_address(page) lowmem_page_address(page)
346 #define set_page_address(page, address) do { } while(0)
347 #define page_address_init() do { } while(0)
348 #endif
351 #ifndef CONFIG_DEBUG_PAGEALLOC
352 static inline void
353 kernel_map_pages(struct page *page, int numpages, int enable)
354 {
355 }
356 #endif
358 extern unsigned long num_physpages;
359 extern unsigned long totalram_pages;
360 extern int nr_swap_pages;
362 #ifdef CONFIG_VTI
363 extern unsigned long *mpt_table;
364 #undef machine_to_phys_mapping
365 #define machine_to_phys_mapping mpt_table
367 /* If pmt table is provided by control pannel later, we need __get_user
368 * here. However if it's allocated by HV, we should access it directly
369 */
370 #define phys_to_machine_mapping(d, gpfn) \
371 ((d) == dom0 ? gpfn : (d)->arch.pmt[(gpfn)])
373 #define __mfn_to_gpfn(_d, mfn) \
374 machine_to_phys_mapping[(mfn)]
376 #define __gpfn_to_mfn(_d, gpfn) \
377 phys_to_machine_mapping((_d), (gpfn))
378 #endif // CONFIG_VTI
380 #endif /* __ASM_IA64_MM_H__ */