direct-io.hg

view xen/include/asm-ia64/mm.h @ 5511:14b5ff859896

bitkeeper revision 1.1713.2.10 (42b6e03fU-_0SObu_XHzbqce6VSEBw)

Fixed non-CONFIG_VTI change that affects CONFIG-VTI
author djm@kirby.fc.hp.com
date Mon Jun 20 15:26:55 2005 +0000 (2005-06-20)
parents bb1b5a578752
children 78b0596ef957
line source
1 #ifndef __ASM_IA64_MM_H__
2 #define __ASM_IA64_MM_H__
4 #include <xen/config.h>
5 #ifdef LINUX_2_6
6 #include <xen/gfp.h>
7 #endif
8 #include <xen/list.h>
9 #include <xen/spinlock.h>
10 #include <xen/perfc.h>
11 #include <xen/sched.h>
13 #include <linux/rbtree.h>
15 #include <asm/processor.h>
16 #include <asm/atomic.h>
17 #include <asm/flushtlb.h>
18 #include <asm/io.h>
20 #include <public/xen.h>
22 /*
23 * The following is for page_alloc.c.
24 */
26 typedef unsigned long page_flags_t;
28 /*
29 * Per-page-frame information.
30 *
31 * Every architecture must ensure the following:
32 * 1. 'struct pfn_info' contains a 'struct list_head list'.
33 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
34 */
35 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
37 struct page
38 {
39 /* Each frame can be threaded onto a doubly-linked list. */
40 struct list_head list;
42 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
43 u32 tlbflush_timestamp;
45 /* Reference count and various PGC_xxx flags and fields. */
46 u32 count_info;
48 /* Context-dependent fields follow... */
49 union {
51 /* Page is in use by a domain. */
52 struct {
53 /* Owner of this page. */
54 u32 _domain;
55 /* Type reference count and various PGT_xxx flags and fields. */
56 u32 type_info;
57 } inuse;
59 /* Page is on a free list. */
60 struct {
61 /* Mask of possibly-tainted TLBs. */
62 cpumask_t cpumask;
63 /* Order-size of the free chunk this page is the head of. */
64 u8 order;
65 } free;
67 } u;
68 // following added for Linux compiling
69 page_flags_t flags;
70 atomic_t _count;
71 struct list_head lru; // is this the same as above "list"?
72 };
74 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
76 /* Still small set of flags defined by far on IA-64 */
77 /* The following page types are MUTUALLY EXCLUSIVE. */
78 #define PGT_none (0<<29) /* no special uses of this page */
79 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
80 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
81 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
82 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
83 #define PGT_writeable_page (5<<29) /* has writable mappings of this page? */
84 #define PGT_type_mask (5<<29) /* Bits 29-31. */
86 /* Has this page been validated for use as its current type? */
87 #define _PGT_validated 28
88 #define PGT_validated (1<<_PGT_validated)
89 /* Owning guest has pinned this page to its current type? */
90 #define _PGT_pinned 27
91 #define PGT_pinned (1U<<_PGT_pinned)
93 /* 27-bit count of uses of this frame as its current type. */
94 #define PGT_count_mask ((1U<<27)-1)
96 /* Cleared when the owning guest 'frees' this page. */
97 #define _PGC_allocated 31
98 #define PGC_allocated (1U<<_PGC_allocated)
99 /* Set when the page is used as a page table */
100 #define _PGC_page_table 30
101 #define PGC_page_table (1U<<_PGC_page_table)
102 /* 30-bit count of references to this frame. */
103 #define PGC_count_mask ((1U<<30)-1)
105 #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
106 && (page_to_phys(_pfn) >= xen_pstart))
108 static inline struct domain *unpickle_domptr(u32 _d)
109 { return (_d == 0) ? NULL : __va(_d); }
110 static inline u32 pickle_domptr(struct domain *_d)
111 { return (_d == NULL) ? 0 : (u32)__pa(_d); }
113 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
114 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
116 /* Dummy now */
117 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
119 extern struct pfn_info *frame_table;
120 extern unsigned long frame_table_size;
121 extern struct list_head free_list;
122 extern spinlock_t free_list_lock;
123 extern unsigned int free_pfns;
124 extern unsigned long max_page;
126 #ifdef CONFIG_VIRTUAL_MEM_MAP
127 void __init init_frametable(void *frametable_vstart, unsigned long nr_pages);
128 #else
129 extern void __init init_frametable(void);
130 #endif
131 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
133 static inline void put_page(struct pfn_info *page)
134 {
135 #ifdef CONFIG_VTI // doesn't work with non-VTI in grant tables yet
136 u32 nx, x, y = page->count_info;
138 do {
139 x = y;
140 nx = x - 1;
141 }
142 while (unlikely((y = cmpxchg(&page->count_info, x, nx)) != x));
144 if (unlikely((nx & PGC_count_mask) == 0))
145 free_domheap_page(page);
146 #endif
147 }
149 /* count_info and ownership are checked atomically. */
150 static inline int get_page(struct pfn_info *page,
151 struct domain *domain)
152 {
153 #ifdef CONFIG_VTI
154 u64 x, nx, y = *((u64*)&page->count_info);
155 u32 _domain = pickle_domptr(domain);
157 do {
158 x = y;
159 nx = x + 1;
160 if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
161 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
162 unlikely((x >> 32) != _domain)) { /* Wrong owner? */
163 DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
164 page_to_pfn(page), domain, unpickle_domptr(d),
165 x, page->u.inuse.typeinfo);
166 return 0;
167 }
168 }
169 while(unlikely(y = cmpxchg(&page->count_info, x, nx)) != x);
170 #endif
171 return 1;
172 }
174 /* No type info now */
175 #define put_page_type(page)
176 #define get_page_type(page, type) 1
177 static inline void put_page_and_type(struct pfn_info *page)
178 {
179 put_page_type(page);
180 put_page(page);
181 }
184 static inline int get_page_and_type(struct pfn_info *page,
185 struct domain *domain,
186 u32 type)
187 {
188 int rc = get_page(page, domain);
190 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
191 {
192 put_page(page);
193 rc = 0;
194 }
196 return rc;
197 }
199 #define set_machinetophys(_mfn, _pfn) do { } while(0);
201 #ifdef MEMORY_GUARD
202 void *memguard_init(void *heap_start);
203 void memguard_guard_stack(void *p);
204 void memguard_guard_range(void *p, unsigned long l);
205 void memguard_unguard_range(void *p, unsigned long l);
206 #else
207 #define memguard_init(_s) (_s)
208 #define memguard_guard_stack(_p) ((void)0)
209 #define memguard_guard_range(_p,_l) ((void)0)
210 #define memguard_unguard_range(_p,_l) ((void)0)
211 #endif
213 // FOLLOWING FROM linux-2.6.7/include/mm.h
215 /*
216 * This struct defines a memory VMM memory area. There is one of these
217 * per VM-area/task. A VM area is any part of the process virtual memory
218 * space that has a special rule for the page-fault handlers (ie a shared
219 * library, the executable area etc).
220 */
221 struct vm_area_struct {
222 struct mm_struct * vm_mm; /* The address space we belong to. */
223 unsigned long vm_start; /* Our start address within vm_mm. */
224 unsigned long vm_end; /* The first byte after our end address
225 within vm_mm. */
227 /* linked list of VM areas per task, sorted by address */
228 struct vm_area_struct *vm_next;
230 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
231 unsigned long vm_flags; /* Flags, listed below. */
233 #ifndef XEN
234 struct rb_node vm_rb;
236 // XEN doesn't need all the backing store stuff
237 /*
238 * For areas with an address space and backing store,
239 * linkage into the address_space->i_mmap prio tree, or
240 * linkage to the list of like vmas hanging off its node, or
241 * linkage of vma in the address_space->i_mmap_nonlinear list.
242 */
243 union {
244 struct {
245 struct list_head list;
246 void *parent; /* aligns with prio_tree_node parent */
247 struct vm_area_struct *head;
248 } vm_set;
250 struct prio_tree_node prio_tree_node;
251 } shared;
253 /*
254 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
255 * list, after a COW of one of the file pages. A MAP_SHARED vma
256 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
257 * or brk vma (with NULL file) can only be in an anon_vma list.
258 */
259 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
260 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
262 /* Function pointers to deal with this struct. */
263 struct vm_operations_struct * vm_ops;
265 /* Information about our backing store: */
266 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
267 units, *not* PAGE_CACHE_SIZE */
268 struct file * vm_file; /* File we map to (can be NULL). */
269 void * vm_private_data; /* was vm_pte (shared mem) */
271 #ifdef CONFIG_NUMA
272 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
273 #endif
274 #endif
275 };
276 /*
277 * vm_flags..
278 */
279 #define VM_READ 0x00000001 /* currently active flags */
280 #define VM_WRITE 0x00000002
281 #define VM_EXEC 0x00000004
282 #define VM_SHARED 0x00000008
284 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
285 #define VM_MAYWRITE 0x00000020
286 #define VM_MAYEXEC 0x00000040
287 #define VM_MAYSHARE 0x00000080
289 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
290 #define VM_GROWSUP 0x00000200
291 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
292 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
294 #define VM_EXECUTABLE 0x00001000
295 #define VM_LOCKED 0x00002000
296 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
298 /* Used by sys_madvise() */
299 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
300 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
302 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
303 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
304 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
305 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
306 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
307 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
309 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
310 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
311 #endif
313 #ifdef CONFIG_STACK_GROWSUP
314 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
315 #else
316 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
317 #endif
319 /*
320 * The zone field is never updated after free_area_init_core()
321 * sets it, so none of the operations on it need to be atomic.
322 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
323 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
324 */
325 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
326 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
328 static inline unsigned long page_zonenum(struct page *page)
329 {
330 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
331 }
332 static inline unsigned long page_to_nid(struct page *page)
333 {
334 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
335 }
337 struct zone;
338 extern struct zone *zone_table[];
340 static inline struct zone *page_zone(struct page *page)
341 {
342 return zone_table[page->flags >> NODEZONE_SHIFT];
343 }
345 static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
346 {
347 page->flags &= ~(~0UL << NODEZONE_SHIFT);
348 page->flags |= nodezone_num << NODEZONE_SHIFT;
349 }
351 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
352 extern unsigned long max_mapnr;
353 #endif
355 static inline void *lowmem_page_address(struct page *page)
356 {
357 return __va(page_to_pfn(page) << PAGE_SHIFT);
358 }
360 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
361 #define HASHED_PAGE_VIRTUAL
362 #endif
364 #if defined(WANT_PAGE_VIRTUAL)
365 #define page_address(page) ((page)->virtual)
366 #define set_page_address(page, address) \
367 do { \
368 (page)->virtual = (address); \
369 } while(0)
370 #define page_address_init() do { } while(0)
371 #endif
373 #if defined(HASHED_PAGE_VIRTUAL)
374 void *page_address(struct page *page);
375 void set_page_address(struct page *page, void *virtual);
376 void page_address_init(void);
377 #endif
379 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
380 #define page_address(page) lowmem_page_address(page)
381 #define set_page_address(page, address) do { } while(0)
382 #define page_address_init() do { } while(0)
383 #endif
386 #ifndef CONFIG_DEBUG_PAGEALLOC
387 static inline void
388 kernel_map_pages(struct page *page, int numpages, int enable)
389 {
390 }
391 #endif
393 extern unsigned long num_physpages;
394 extern unsigned long totalram_pages;
395 extern int nr_swap_pages;
397 #ifdef CONFIG_VTI
398 extern unsigned long *mpt_table;
399 #undef machine_to_phys_mapping
400 #define machine_to_phys_mapping mpt_table
402 #define INVALID_M2P_ENTRY (~0U)
403 #define VALID_M2P(_e) (!((_e) & (1U<<63)))
404 #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
405 /* If pmt table is provided by control pannel later, we need __get_user
406 * here. However if it's allocated by HV, we should access it directly
407 */
408 #define phys_to_machine_mapping(d, gpfn) \
409 ((d) == dom0 ? gpfn : \
410 (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \
411 INVALID_MFN))
413 #define __mfn_to_gpfn(_d, mfn) \
414 machine_to_phys_mapping[(mfn)]
416 #define __gpfn_to_mfn(_d, gpfn) \
417 phys_to_machine_mapping((_d), (gpfn))
419 #define __gpfn_invalid(_d, gpfn) \
420 (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
422 #define __gpfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
424 /* Return I/O type if trye */
425 #define __gpfn_is_io(_d, gpfn) \
426 (__gpfn_valid(_d, gpfn) ? \
427 (__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) : 0)
429 #define __gpfn_is_mem(_d, gpfn) \
430 (__gpfn_valid(_d, gpfn) ? \
431 ((__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
434 #define __gpa_to_mpa(_d, gpa) \
435 ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
436 #endif // CONFIG_VTI
438 #endif /* __ASM_IA64_MM_H__ */