ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h @ 6432:b54144915ae6

merge?
author cl349@firebug.cl.cam.ac.uk
date Thu Aug 25 16:26:30 2005 +0000 (2005-08-25)
parents 522bc50588ed b88c5350de57
children 0610add7c3fe
line source
1 #ifndef _I386_PAGE_H
2 #define _I386_PAGE_H
4 /* PAGE_SHIFT determines the page size */
5 #define PAGE_SHIFT 12
6 #define PAGE_SIZE (1UL << PAGE_SHIFT)
7 #define PAGE_MASK (~(PAGE_SIZE-1))
9 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
10 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
12 #ifdef __KERNEL__
13 #ifndef __ASSEMBLY__
15 #include <linux/config.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <asm/bug.h>
20 #include <asm-xen/xen-public/xen.h>
21 #include <asm-xen/foreign_page.h>
23 #define arch_free_page(_page,_order) \
24 ({ int foreign = PageForeign(_page); \
25 if (foreign) \
26 (PageForeignDestructor(_page))(_page); \
27 foreign; \
28 })
29 #define HAVE_ARCH_FREE_PAGE
31 #ifdef CONFIG_XEN_SCRUB_PAGES
32 #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
33 #else
34 #define scrub_pages(_p,_n) ((void)0)
35 #endif
37 #ifdef CONFIG_X86_USE_3DNOW
39 #include <asm/mmx.h>
41 #define clear_page(page) mmx_clear_page((void *)(page))
42 #define copy_page(to,from) mmx_copy_page(to,from)
44 #else
46 #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
47 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
49 /*
50 * On older X86 processors it's not a win to use MMX here it seems.
51 * Maybe the K6-III ?
52 */
54 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
55 #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
57 #endif
59 #define clear_user_page(page, vaddr, pg) clear_page(page)
60 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
62 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
63 #define INVALID_P2M_ENTRY (~0U)
64 #define FOREIGN_FRAME(m) ((m) | 0x80000000U)
65 extern unsigned int *phys_to_machine_mapping;
66 #define pfn_to_mfn(pfn) \
67 ((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL)
68 static inline unsigned long mfn_to_pfn(unsigned long mfn)
69 {
70 unsigned int pfn;
72 /*
73 * The array access can fail (e.g., device space beyond end of RAM).
74 * In such cases it doesn't matter what we return (we return garbage),
75 * but we must handle the fault without crashing!
76 */
77 asm (
78 "1: movl %1,%0\n"
79 "2:\n"
80 ".section __ex_table,\"a\"\n"
81 " .align 4\n"
82 " .long 1b,2b\n"
83 ".previous"
84 : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
86 return (unsigned long)pfn;
87 }
89 /* Definitions for machine and pseudophysical addresses. */
90 #ifdef CONFIG_X86_PAE
91 typedef unsigned long long paddr_t;
92 typedef unsigned long long maddr_t;
93 #else
94 typedef unsigned long paddr_t;
95 typedef unsigned long maddr_t;
96 #endif
98 static inline maddr_t phys_to_machine(paddr_t phys)
99 {
100 maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
101 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
102 return machine;
103 }
104 static inline paddr_t machine_to_phys(maddr_t machine)
105 {
106 paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
107 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
108 return phys;
109 }
111 /*
112 * These are used to make use of C type-checking..
113 */
114 extern int nx_enabled;
115 #ifdef CONFIG_X86_PAE
116 extern unsigned long long __supported_pte_mask;
117 typedef struct { unsigned long pte_low, pte_high; } pte_t;
118 typedef struct { unsigned long long pmd; } pmd_t;
119 typedef struct { unsigned long long pgd; } pgd_t;
120 typedef struct { unsigned long long pgprot; } pgprot_t;
121 #define __pte(x) ({ unsigned long long _x = (x); \
122 if (_x & 1) _x = phys_to_machine(_x); \
123 ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
124 #define __pgd(x) ({ unsigned long long _x = (x); \
125 (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
126 #define __pmd(x) ({ unsigned long long _x = (x); \
127 (((_x)&1) ? ((pmd_t) {phys_to_machine(_x)}) : ((pmd_t) {(_x)})); })
128 static inline unsigned long long pte_val(pte_t x)
129 {
130 unsigned long long ret;
132 if (x.pte_low) {
133 ret = x.pte_low | (unsigned long long)x.pte_high << 32;
134 ret = machine_to_phys(ret) | 1;
135 } else {
136 ret = 0;
137 }
138 return ret;
139 }
140 static inline unsigned long long pmd_val(pmd_t x)
141 {
142 unsigned long long ret = x.pmd;
143 if (ret) ret = machine_to_phys(ret) | 1;
144 return ret;
145 }
146 static inline unsigned long long pgd_val(pgd_t x)
147 {
148 unsigned long long ret = x.pgd;
149 if (ret) ret = machine_to_phys(ret) | 1;
150 return ret;
151 }
152 static inline unsigned long long pte_val_ma(pte_t x)
153 {
154 return (unsigned long long)x.pte_high << 32 | x.pte_low;
155 }
156 #define HPAGE_SHIFT 21
157 #else
158 typedef struct { unsigned long pte_low; } pte_t;
159 typedef struct { unsigned long pgd; } pgd_t;
160 typedef struct { unsigned long pgprot; } pgprot_t;
161 #define boot_pte_t pte_t /* or would you rather have a typedef */
162 #define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
163 (x).pte_low)
164 #define pte_val_ma(x) ((x).pte_low)
165 #define __pte(x) ({ unsigned long _x = (x); \
166 (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
167 #define __pgd(x) ({ unsigned long _x = (x); \
168 (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
169 static inline unsigned long pgd_val(pgd_t x)
170 {
171 unsigned long ret = x.pgd;
172 if (ret) ret = machine_to_phys(ret) | 1;
173 return ret;
174 }
175 #define HPAGE_SHIFT 22
176 #endif
177 #define PTE_MASK PAGE_MASK
179 #ifdef CONFIG_HUGETLB_PAGE
180 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
181 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
182 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
183 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
184 #endif
186 #define pgprot_val(x) ((x).pgprot)
188 #define __pte_ma(x) ((pte_t) { (x) } )
189 #define __pgprot(x) ((pgprot_t) { (x) } )
191 #endif /* !__ASSEMBLY__ */
193 /* to align the pointer to the (next) page boundary */
194 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
196 /*
197 * This handles the memory map.. We could make this a config
198 * option, but too many people screw it up, and too few need
199 * it.
200 *
201 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
202 * a virtual address space of one gigabyte, which limits the
203 * amount of physical memory you can use to about 950MB.
204 *
205 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
206 * and CONFIG_HIGHMEM64G options in the kernel configuration.
207 */
209 #ifndef __ASSEMBLY__
211 /*
212 * This much address space is reserved for vmalloc() and iomap()
213 * as well as fixmap mappings.
214 */
215 extern unsigned int __VMALLOC_RESERVE;
217 /* Pure 2^n version of get_order */
218 static __inline__ int get_order(unsigned long size)
219 {
220 int order;
222 size = (size-1) >> (PAGE_SHIFT-1);
223 order = -1;
224 do {
225 size >>= 1;
226 order++;
227 } while (size);
228 return order;
229 }
231 extern int sysctl_legacy_va_layout;
233 #endif /* __ASSEMBLY__ */
235 #ifdef __ASSEMBLY__
236 #define __PAGE_OFFSET (0xC0000000)
237 #else
238 #define __PAGE_OFFSET (0xC0000000UL)
239 #endif
242 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
243 #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
244 #define MAXMEM (HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE)
245 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
246 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
247 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
248 #ifndef CONFIG_DISCONTIGMEM
249 #define pfn_to_page(pfn) (mem_map + (pfn))
250 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
251 #define pfn_valid(pfn) ((pfn) < max_mapnr)
252 #endif /* !CONFIG_DISCONTIGMEM */
253 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
255 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
257 #define VM_DATA_DEFAULT_FLAGS \
258 (VM_READ | VM_WRITE | \
259 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
260 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
262 /* VIRT <-> MACHINE conversion */
263 #define virt_to_machine(v) (phys_to_machine(__pa(v)))
264 #define machine_to_virt(m) (__va(machine_to_phys(m)))
265 #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
266 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
268 #endif /* __KERNEL__ */
270 #endif /* _I386_PAGE_H */