ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h @ 13341:3040ba0f2d3d

When booting via xm, only run the bootloader if it's in non-interactive mode:
otherwise we lose the user's named kernel and try to bootload the temporary
file pygrub returned.

Signed-off-by: John Levon <john.levon@sun.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jan 09 13:24:45 2007 +0000 (2007-01-09)
parents ade94aa072c5
children 4fad820a2233
line source
1 #ifndef _I386_PAGE_H
2 #define _I386_PAGE_H
4 /* PAGE_SHIFT determines the page size */
5 #define PAGE_SHIFT 12
6 #define PAGE_SIZE (1UL << PAGE_SHIFT)
7 #define PAGE_MASK (~(PAGE_SIZE-1))
9 #ifdef CONFIG_X86_PAE
10 #define __PHYSICAL_MASK_SHIFT 36
11 #define __PHYSICAL_MASK ((1ULL << __PHYSICAL_MASK_SHIFT) - 1)
12 #define PHYSICAL_PAGE_MASK (~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK)
13 #else
14 #define __PHYSICAL_MASK_SHIFT 32
15 #define __PHYSICAL_MASK (~0UL)
16 #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
17 #endif
19 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
20 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
22 #ifdef __KERNEL__
23 #ifndef __ASSEMBLY__
25 #include <linux/config.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/kernel.h>
29 #include <asm/bug.h>
30 #include <xen/interface/xen.h>
31 #include <xen/features.h>
32 #include <xen/foreign_page.h>
34 #define arch_free_page(_page,_order) \
35 ({ int foreign = PageForeign(_page); \
36 if (foreign) \
37 (PageForeignDestructor(_page))(_page); \
38 foreign; \
39 })
40 #define HAVE_ARCH_FREE_PAGE
42 #ifdef CONFIG_XEN_SCRUB_PAGES
43 #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
44 #else
45 #define scrub_pages(_p,_n) ((void)0)
46 #endif
48 #ifdef CONFIG_X86_USE_3DNOW
50 #include <asm/mmx.h>
52 #define clear_page(page) mmx_clear_page((void *)(page))
53 #define copy_page(to,from) mmx_copy_page(to,from)
55 #else
57 #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
58 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
60 /*
61 * On older X86 processors it's not a win to use MMX here it seems.
62 * Maybe the K6-III ?
63 */
65 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
66 #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
68 #endif
70 #define clear_user_page(page, vaddr, pg) clear_page(page)
71 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
73 /*
74 * These are used to make use of C type-checking..
75 */
76 extern int nx_enabled;
77 #ifdef CONFIG_X86_PAE
78 extern unsigned long long __supported_pte_mask;
79 typedef struct { unsigned long pte_low, pte_high; } pte_t;
80 typedef struct { unsigned long long pmd; } pmd_t;
81 typedef struct { unsigned long long pgd; } pgd_t;
82 typedef struct { unsigned long long pgprot; } pgprot_t;
83 #define pgprot_val(x) ((x).pgprot)
84 #include <asm/maddr.h>
85 #define __pte(x) ({ unsigned long long _x = (x); \
86 if (_x & 1) _x = phys_to_machine(_x); \
87 ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
88 #define __pgd(x) ({ unsigned long long _x = (x); \
89 (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
90 #define __pmd(x) ({ unsigned long long _x = (x); \
91 (((_x)&1) ? ((pmd_t) {phys_to_machine(_x)}) : ((pmd_t) {(_x)})); })
92 static inline unsigned long long pte_val(pte_t x)
93 {
94 unsigned long long ret;
96 if (x.pte_low) {
97 ret = x.pte_low | (unsigned long long)x.pte_high << 32;
98 ret = pte_machine_to_phys(ret) | 1;
99 } else {
100 ret = 0;
101 }
102 return ret;
103 }
104 static inline unsigned long long pmd_val(pmd_t x)
105 {
106 unsigned long long ret = x.pmd;
107 if (ret) ret = pte_machine_to_phys(ret) | 1;
108 return ret;
109 }
110 static inline unsigned long long pgd_val(pgd_t x)
111 {
112 unsigned long long ret = x.pgd;
113 if (ret) ret = pte_machine_to_phys(ret) | 1;
114 return ret;
115 }
116 static inline unsigned long long pte_val_ma(pte_t x)
117 {
118 return (unsigned long long)x.pte_high << 32 | x.pte_low;
119 }
120 #define HPAGE_SHIFT 21
121 #else
122 typedef struct { unsigned long pte_low; } pte_t;
123 typedef struct { unsigned long pgd; } pgd_t;
124 typedef struct { unsigned long pgprot; } pgprot_t;
125 #define pgprot_val(x) ((x).pgprot)
126 #include <asm/maddr.h>
127 #define boot_pte_t pte_t /* or would you rather have a typedef */
128 #define pte_val(x) (((x).pte_low & 1) ? \
129 pte_machine_to_phys((x).pte_low) : \
130 (x).pte_low)
131 #define pte_val_ma(x) ((x).pte_low)
132 #define __pte(x) ({ unsigned long _x = (x); \
133 (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
134 #define __pgd(x) ({ unsigned long _x = (x); \
135 (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
136 static inline unsigned long pgd_val(pgd_t x)
137 {
138 unsigned long ret = x.pgd;
139 if (ret) ret = pte_machine_to_phys(ret) | 1;
140 return ret;
141 }
142 #define HPAGE_SHIFT 22
143 #endif
144 #define PTE_MASK PAGE_MASK
146 #ifdef CONFIG_HUGETLB_PAGE
147 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
148 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
149 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
150 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
151 #endif
153 #define __pgprot(x) ((pgprot_t) { (x) } )
155 #endif /* !__ASSEMBLY__ */
157 /* to align the pointer to the (next) page boundary */
158 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
160 /*
161 * This handles the memory map.. We could make this a config
162 * option, but too many people screw it up, and too few need
163 * it.
164 *
165 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
166 * a virtual address space of one gigabyte, which limits the
167 * amount of physical memory you can use to about 950MB.
168 *
169 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
170 * and CONFIG_HIGHMEM64G options in the kernel configuration.
171 */
173 #ifndef __ASSEMBLY__
175 /*
176 * This much address space is reserved for vmalloc() and iomap()
177 * as well as fixmap mappings.
178 */
179 extern unsigned int __VMALLOC_RESERVE;
181 extern int sysctl_legacy_va_layout;
183 extern int page_is_ram(unsigned long pagenr);
185 #endif /* __ASSEMBLY__ */
187 #ifdef __ASSEMBLY__
188 #define __PAGE_OFFSET CONFIG_PAGE_OFFSET
189 #define __PHYSICAL_START CONFIG_PHYSICAL_START
190 #else
191 #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
192 #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
193 #endif
194 #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
196 #ifdef CONFIG_XEN_COMPAT_030002
197 #undef LOAD_OFFSET
198 #define LOAD_OFFSET 0
199 #endif /* CONFIG_XEN_COMPAT_030002 */
201 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
202 #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
203 #define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
204 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
205 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
206 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
207 #ifdef CONFIG_FLATMEM
208 #define pfn_to_page(pfn) (mem_map + (pfn))
209 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
210 #define pfn_valid(pfn) ((pfn) < max_mapnr)
211 #endif /* CONFIG_FLATMEM */
212 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
214 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
216 #define VM_DATA_DEFAULT_FLAGS \
217 (VM_READ | VM_WRITE | \
218 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
219 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
221 #define __HAVE_ARCH_GATE_AREA 1
223 #endif /* __KERNEL__ */
225 #include <asm-generic/page.h>
227 #endif /* _I386_PAGE_H */