ia64/xen-unstable

view xen/include/asm-ia64/mm.h @ 17871:0df3bf8aac1e

[IA64] trivial compilation fix caused by c/s 17847:8a0415fac759.

This patch fixes the following compilation error caused
by c/s 17847:8a0415fac759.

machine_kexec.c: In function 'arch_crash_save_vmcoreinfo':
machine_kexec.c:201: error: 'frametable_pg_dir' undeclared (first use in this function)
machine_kexec.c:201: error: (Each undeclared identifier is reported only once
machine_kexec.c:201: error: for each function it appears in.)

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 16 16:35:17 2008 +0100 (2008-06-16)
parents 8a0415fac759
children ba543f51c6f1
line source
1 /*
2 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 * dom0 vp model support
5 */
6 #ifndef __ASM_IA64_MM_H__
7 #define __ASM_IA64_MM_H__
9 #include <xen/config.h>
10 #ifdef LINUX_2_6
11 #include <linux/gfp.h>
12 #endif
13 #include <xen/list.h>
14 #include <xen/spinlock.h>
15 #include <xen/perfc.h>
16 #include <xen/sched.h>
18 #include <asm/processor.h>
19 #include <asm/atomic.h>
20 #include <asm/tlbflush.h>
21 #include <asm/flushtlb.h>
22 #include <asm/io.h>
24 #include <public/xen.h>
26 /*
27 * The following is for page_alloc.c.
28 */
30 typedef unsigned long page_flags_t;
32 /*
33 * Per-page-frame information.
34 *
35 * Every architecture must ensure the following:
36 * 1. 'struct page_info' contains a 'struct list_head list'.
37 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
38 */
39 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
41 #define PRtype_info "016lx"
43 struct page_info
44 {
45 /* Each frame can be threaded onto a doubly-linked list. */
46 struct list_head list;
48 /* Reference count and various PGC_xxx flags and fields. */
49 u32 count_info;
51 /* Context-dependent fields follow... */
52 union {
54 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
55 struct {
56 /* Owner of this page (NULL if page is anonymous). */
57 u32 _domain; /* pickled format */
58 /* Type reference count and various PGT_xxx flags and fields. */
59 unsigned long type_info;
60 } __attribute__ ((packed)) inuse;
62 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
63 struct {
64 /* Order-size of the free chunk this page is the head of. */
65 u32 order;
66 /* Mask of possibly-tainted TLBs. */
67 cpumask_t cpumask;
68 } __attribute__ ((packed)) free;
70 } u;
72 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
73 u32 tlbflush_timestamp;
75 #if 0
76 // following added for Linux compiling
77 page_flags_t flags;
78 atomic_t _count;
79 struct list_head lru; // is this the same as above "list"?
80 #endif
81 };
83 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
85 /*
86 * Still small set of flags defined by far on IA-64.
87 * IA-64 should make it a definition same as x86_64.
88 */
89 /* The following page types are MUTUALLY EXCLUSIVE. */
90 #define PGT_none (0UL<<29) /* no special uses of this page */
91 #define PGT_l1_page_table (1UL<<29) /* using this page as an L1 page table? */
92 #define PGT_l2_page_table (2UL<<29) /* using this page as an L2 page table? */
93 #define PGT_l3_page_table (3UL<<29) /* using this page as an L3 page table? */
94 #define PGT_l4_page_table (4UL<<29) /* using this page as an L4 page table? */
95 /* Value 5 reserved. See asm-x86/mm.h */
96 /* Value 6 reserved. See asm-x86/mm.h */
97 #define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */
98 #define PGT_type_mask (7UL<<29) /* Bits 29-31. */
100 /* Has this page been validated for use as its current type? */
101 #define _PGT_validated 28
102 #define PGT_validated (1UL<<_PGT_validated)
103 /* Owning guest has pinned this page to its current type? */
104 #define _PGT_pinned 27
105 #define PGT_pinned (1UL<<_PGT_pinned)
107 /* 16-bit count of uses of this frame as its current type. */
108 #define PGT_count_mask ((1UL<<16)-1)
110 /* Cleared when the owning guest 'frees' this page. */
111 #define _PGC_allocated 31
112 #define PGC_allocated (1UL<<_PGC_allocated)
113 /* Bit 30 reserved. See asm-x86/mm.h */
114 /* Bit 29 reserved. See asm-x86/mm.h */
115 /* 29-bit count of references to this frame. */
116 #define PGC_count_mask ((1UL<<29)-1)
118 #define is_xen_heap_mfn(mfn) (((mfn) < paddr_to_pfn(xenheap_phys_end)) \
119 && ((mfn) >= paddr_to_pfn(xen_pstart)))
120 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
122 extern void* xen_pickle_offset;
123 #define __pickle(a) ((unsigned long)a - (unsigned long)xen_pickle_offset)
124 #define __unpickle(a) (void *)(a + xen_pickle_offset)
126 static inline struct domain *unpickle_domptr(u64 _d)
127 { return (_d == 0) ? NULL : __unpickle(_d); }
128 static inline u32 pickle_domptr(struct domain *_d)
129 { return (_d == NULL) ? 0 : (u32)__pickle(_d); }
131 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
132 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
134 #define XENSHARE_writable 0
135 #define XENSHARE_readonly 1
136 void share_xen_page_with_guest(struct page_info *page,
137 struct domain *d, int readonly);
138 void share_xen_page_with_privileged_guests(struct page_info *page,
139 int readonly);
141 extern unsigned long frametable_pg_dir[];
142 extern struct page_info *frame_table;
143 extern unsigned long frame_table_size;
144 extern struct list_head free_list;
145 extern spinlock_t free_list_lock;
146 extern unsigned int free_pfns;
147 extern unsigned long max_page;
149 extern void __init init_frametable(void);
150 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
152 static inline void put_page(struct page_info *page)
153 {
154 u32 nx, x, y = page->count_info;
156 do {
157 x = y;
158 nx = x - 1;
159 }
160 while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x));
162 if (unlikely((nx & PGC_count_mask) == 0))
163 free_domheap_page(page);
164 }
166 /* count_info and ownership are checked atomically. */
167 static inline int get_page(struct page_info *page,
168 struct domain *domain)
169 {
170 u64 x, nx, y = *((u64*)&page->count_info);
171 u32 _domain = pickle_domptr(domain);
173 do {
174 x = y;
175 nx = x + 1;
176 if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
177 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
178 unlikely((x >> 32) != _domain)) { /* Wrong owner? */
180 gdprintk(XENLOG_INFO, "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%"
181 PRtype_info "\n", page_to_mfn(page), domain,
182 unpickle_domptr(x >> 32), x, page->u.inuse.type_info);
183 return 0;
184 }
185 }
186 while(unlikely((y = cmpxchg_acq((u64*)&page->count_info, x, nx)) != x));
187 return 1;
188 }
190 int is_iomem_page(unsigned long mfn);
192 extern void put_page_type(struct page_info *page);
193 extern int get_page_type(struct page_info *page, u32 type);
195 static inline void put_page_and_type(struct page_info *page)
196 {
197 put_page_type(page);
198 put_page(page);
199 }
202 static inline int get_page_and_type(struct page_info *page,
203 struct domain *domain,
204 u32 type)
205 {
206 int rc = get_page(page, domain);
208 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
209 {
210 put_page(page);
211 rc = 0;
212 }
214 return rc;
215 }
217 #define set_machinetophys(_mfn, _pfn) do { } while(0);
219 #ifdef MEMORY_GUARD
220 void *memguard_init(void *heap_start);
221 void memguard_guard_stack(void *p);
222 void memguard_guard_range(void *p, unsigned long l);
223 void memguard_unguard_range(void *p, unsigned long l);
224 #else
225 #define memguard_init(_s) (_s)
226 #define memguard_guard_stack(_p) ((void)0)
227 #define memguard_guard_range(_p,_l) ((void)0)
228 #define memguard_unguard_range(_p,_l) ((void)0)
229 #endif
231 // prototype of misc memory stuff
232 //unsigned long __get_free_pages(unsigned int mask, unsigned int order);
233 //void __free_pages(struct page_info *page, unsigned int order);
234 void *pgtable_quicklist_alloc(void);
235 void pgtable_quicklist_free(void *pgtable_entry);
237 // FOLLOWING FROM linux-2.6.7/include/mm.h
239 /*
240 * This struct defines a memory VMM memory area. There is one of these
241 * per VM-area/task. A VM area is any part of the process virtual memory
242 * space that has a special rule for the page-fault handlers (ie a shared
243 * library, the executable area etc).
244 */
245 struct vm_area_struct {
246 struct mm_struct * vm_mm; /* The address space we belong to. */
247 unsigned long vm_start; /* Our start address within vm_mm. */
248 unsigned long vm_end; /* The first byte after our end address
249 within vm_mm. */
251 /* linked list of VM areas per task, sorted by address */
252 struct vm_area_struct *vm_next;
254 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
255 unsigned long vm_flags; /* Flags, listed below. */
257 #ifndef XEN
258 struct rb_node vm_rb;
260 // XEN doesn't need all the backing store stuff
261 /*
262 * For areas with an address space and backing store,
263 * linkage into the address_space->i_mmap prio tree, or
264 * linkage to the list of like vmas hanging off its node, or
265 * linkage of vma in the address_space->i_mmap_nonlinear list.
266 */
267 union {
268 struct {
269 struct list_head list;
270 void *parent; /* aligns with prio_tree_node parent */
271 struct vm_area_struct *head;
272 } vm_set;
274 struct prio_tree_node prio_tree_node;
275 } shared;
277 /*
278 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
279 * list, after a COW of one of the file pages. A MAP_SHARED vma
280 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
281 * or brk vma (with NULL file) can only be in an anon_vma list.
282 */
283 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
284 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
286 /* Function pointers to deal with this struct. */
287 struct vm_operations_struct * vm_ops;
289 /* Information about our backing store: */
290 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
291 units, *not* PAGE_CACHE_SIZE */
292 struct file * vm_file; /* File we map to (can be NULL). */
293 void * vm_private_data; /* was vm_pte (shared mem) */
295 #ifdef CONFIG_NUMA
296 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
297 #endif
298 #endif
299 };
300 /*
301 * vm_flags..
302 */
303 #define VM_READ 0x00000001 /* currently active flags */
304 #define VM_WRITE 0x00000002
305 #define VM_EXEC 0x00000004
306 #define VM_SHARED 0x00000008
308 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
309 #define VM_MAYWRITE 0x00000020
310 #define VM_MAYEXEC 0x00000040
311 #define VM_MAYSHARE 0x00000080
313 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
314 #define VM_GROWSUP 0x00000200
315 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
316 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
318 #define VM_EXECUTABLE 0x00001000
319 #define VM_LOCKED 0x00002000
320 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
322 /* Used by sys_madvise() */
323 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
324 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
326 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
327 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
328 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
329 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
330 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
331 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
333 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
334 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
335 #endif
337 #ifdef CONFIG_STACK_GROWSUP
338 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
339 #else
340 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
341 #endif
343 #if 0 /* removed when rebasing to 2.6.13 */
344 /*
345 * The zone field is never updated after free_area_init_core()
346 * sets it, so none of the operations on it need to be atomic.
347 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
348 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
349 */
350 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
351 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
353 static inline unsigned long page_zonenum(struct page_info *page)
354 {
355 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
356 }
357 static inline unsigned long page_to_nid(struct page_info *page)
358 {
359 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
360 }
362 struct zone;
363 extern struct zone *zone_table[];
365 static inline struct zone *page_zone(struct page_info *page)
366 {
367 return zone_table[page->flags >> NODEZONE_SHIFT];
368 }
370 static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num)
371 {
372 page->flags &= ~(~0UL << NODEZONE_SHIFT);
373 page->flags |= nodezone_num << NODEZONE_SHIFT;
374 }
375 #endif
377 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
378 extern unsigned long max_mapnr;
379 #endif
381 static inline void *lowmem_page_address(struct page_info *page)
382 {
383 return __va(page_to_mfn(page) << PAGE_SHIFT);
384 }
386 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
387 #define HASHED_PAGE_VIRTUAL
388 #endif
390 #if defined(WANT_PAGE_VIRTUAL)
391 #define page_address(page) ((page)->virtual)
392 #define set_page_address(page, address) \
393 do { \
394 (page)->virtual = (address); \
395 } while(0)
396 #define page_address_init() do { } while(0)
397 #endif
399 #if defined(HASHED_PAGE_VIRTUAL)
400 void *page_address(struct page_info *page);
401 void set_page_address(struct page_info *page, void *virtual);
402 void page_address_init(void);
403 #endif
405 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
406 #define page_address(page) lowmem_page_address(page)
407 #define set_page_address(page, address) do { } while(0)
408 #define page_address_init() do { } while(0)
409 #endif
412 #ifndef CONFIG_DEBUG_PAGEALLOC
413 static inline void
414 kernel_map_pages(struct page_info *page, int numpages, int enable)
415 {
416 }
417 #endif
419 extern unsigned long num_physpages;
420 extern unsigned long totalram_pages;
421 extern int nr_swap_pages;
423 extern void alloc_dom_xen_and_dom_io(void);
424 extern int mm_teardown(struct domain* d);
425 extern void mm_final_teardown(struct domain* d);
426 extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
427 extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
428 extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);
429 extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
430 extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
431 struct p2m_entry;
432 extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);
433 extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);
434 extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr);
435 extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size, unsigned long flags);
436 extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);
437 int domain_page_mapped(struct domain *d, unsigned long mpaddr);
438 int efi_mmio(unsigned long physaddr, unsigned long size);
439 extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
440 extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
441 extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
442 extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);
443 extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid);
444 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
445 extern void expose_p2m_init(void);
446 extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn);
447 extern void foreign_p2m_init(struct domain* d);
448 extern void foreign_p2m_destroy(struct domain* d);
449 extern unsigned long dom0vp_expose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid, XEN_GUEST_HANDLE(char) buffer, unsigned long flags);
450 extern unsigned long dom0vp_unexpose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid);
451 #else
452 #define expose_p2m_init() do { } while (0)
453 #define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS)
454 #define foreign_p2m_init(d) do { } while (0)
455 #define foreign_p2m_destroy(d) do { } while (0)
456 #define dom0vp_expose_foreign_p2m(dest_dom, dest_gpfn, domid, buffer, flags) (-ENOSYS)
457 #define dom0vp_unexpose_foreign_p2m(dest_dom, dest_gpfn, domid) (-ENOSYS)
458 #endif
460 extern volatile unsigned long *mpt_table;
461 extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
462 extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__,
463 u64* itir, struct p2m_entry* entry);
464 #define machine_to_phys_mapping mpt_table
466 #define INVALID_M2P_ENTRY (~0UL)
467 #define VALID_M2P(_e) (!((_e) & (1UL<<63)))
469 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
470 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
472 /* If pmt table is provided by control pannel later, we need __get_user
473 * here. However if it's allocated by HV, we should access it directly
474 */
476 #define mfn_to_gmfn(_d, mfn) \
477 get_gpfn_from_mfn(mfn)
479 #define gmfn_to_mfn(_d, gpfn) \
480 gmfn_to_mfn_foreign((_d), (gpfn))
482 #define __gpfn_invalid(_d, gpfn) \
483 (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) == INVALID_MFN)
485 #define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
487 #define __gpa_to_mpa(_d, gpa) \
488 ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
490 #define __mpa_to_gpa(madr) \
491 ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \
492 ((madr) & ~PAGE_MASK))
494 /* Internal use only: returns 0 in case of bad address. */
495 extern unsigned long paddr_to_maddr(unsigned long paddr);
497 /* Arch-specific portion of memory_op hypercall. */
498 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
500 int steal_page(
501 struct domain *d, struct page_info *page, unsigned int memflags);
503 #define domain_clamp_alloc_bitsize(d, b) (b)
505 unsigned long domain_get_maximum_gpfn(struct domain *d);
507 extern struct domain *dom_xen, *dom_io; /* for vmcoreinfo */
509 #endif /* __ASM_IA64_MM_H__ */